diff --git a/cmd/certsuite/check/check.go b/cmd/certsuite/check/check.go index 32ebb6197..c2ce89949 100644 --- a/cmd/certsuite/check/check.go +++ b/cmd/certsuite/check/check.go @@ -13,6 +13,13 @@ var ( } ) +// NewCommand Creates a check command that aggregates image certification and result verification actions +// +// This function builds a new Cobra command for the tool’s check +// functionality. It registers two child commands: one to verify image +// certificates and another to validate test results against expected outputs or +// logs. The resulting command is returned for inclusion in the main CLI +// hierarchy. func NewCommand() *cobra.Command { checkCmd.AddCommand(imagecert.NewCommand()) checkCmd.AddCommand(results.NewCommand()) diff --git a/cmd/certsuite/check/image_cert_status/image_cert_status.go b/cmd/certsuite/check/image_cert_status/image_cert_status.go index 7b8a51e08..db4544dfc 100644 --- a/cmd/certsuite/check/image_cert_status/image_cert_status.go +++ b/cmd/certsuite/check/image_cert_status/image_cert_status.go @@ -30,6 +30,14 @@ var checkImageCertStatusCmd = &cobra.Command{ RunE: checkImageCertStatus, } +// checkImageCertStatus checks whether a container image is certified +// +// The function reads command-line flags for an image name, registry, tag, or +// digest, then uses a validator from the certdb package to determine +// certification status. It prints formatted information about the selected +// image and outputs a colored result indicating success or failure. Errors are +// returned if required parameters are missing or if the validator cannot be +// obtained. func checkImageCertStatus(cmd *cobra.Command, _ []string) error { imageName, _ := cmd.Flags().GetString("name") imageRegistry, _ := cmd.Flags().GetString("registry") @@ -64,6 +72,13 @@ func checkImageCertStatus(cmd *cobra.Command, _ []string) error { return nil } +// NewCommand configures and returns the image certificate status command +// +// This function sets up persistent flags for specifying an image name, +// registry, tag, digest, and an optional offline database path. It enforces +// that a name and registry must be provided together while ensuring the name +// and digest cannot both be set at once. Finally, it returns the fully +// configured command object. func NewCommand() *cobra.Command { checkImageCertStatusCmd.PersistentFlags().String("name", "", "name of the image to verify") checkImageCertStatusCmd.PersistentFlags().String("registry", "", "registry where the image is stored") diff --git a/cmd/certsuite/check/results/results.go b/cmd/certsuite/check/results/results.go index b8092653a..fe7bf96ad 100644 --- a/cmd/certsuite/check/results/results.go +++ b/cmd/certsuite/check/results/results.go @@ -26,12 +26,25 @@ const ( resultMiss = "MISSING" ) +// TestCaseList Stores the names of test cases categorized by outcome +// +// This structure keeps three slices, each holding strings that represent test +// case identifiers. The Pass slice lists all tests that succeeded, Fail +// contains those that failed, and Skip holds tests that were not executed. It +// is used to report results in a concise format. type TestCaseList struct { Pass []string `yaml:"pass"` Fail []string `yaml:"fail"` Skip []string `yaml:"skip"` } +// TestResults Holds a collection of test case results +// +// This structure contains a slice of individual test case outcomes, allowing +// the program to group related results together. The embedded field +// automatically inherits all fields and methods from the underlying test case +// list type, enabling direct access to the collection’s elements. It serves +// as a container for serializing or reporting aggregated test data. type TestResults struct { TestCaseList `yaml:"testCases"` } @@ -42,6 +55,14 @@ var checkResultsCmd = &cobra.Command{ RunE: checkResults, } +// checkResults compares recorded test outcomes against a reference template +// +// The function reads actual test results from a log file, optionally generates +// a YAML template of those results, or loads expected results from an existing +// template. It then checks each test case for mismatches between actual and +// expected values, reporting any discrepancies in a formatted table and +// terminating the program if differences are found. If all results match, it +// prints a success message. func checkResults(cmd *cobra.Command, _ []string) error { templateFileName, _ := cmd.Flags().GetString("template") generateTemplate, _ := cmd.Flags().GetBool("generate-template") @@ -90,6 +111,13 @@ func checkResults(cmd *cobra.Command, _ []string) error { return nil } +// getTestResultsDB Parses a log file to build a test result map +// +// The function opens the specified log file, reads it line by line, and +// extracts test case names and their recorded results using a regular +// expression. Each matched pair is stored in a map where the key is the test +// case name and the value is its result string. It returns this map along with +// an error if any step fails. func getTestResultsDB(logFileName string) (map[string]string, error) { resultsDB := make(map[string]string) @@ -124,6 +152,13 @@ func getTestResultsDB(logFileName string) (map[string]string, error) { return resultsDB, nil } +// getExpectedTestResults loads expected test outcomes from a YAML template +// +// The function reads a specified file, decodes its YAML content into a +// structured list of test cases classified as pass, skip, or fail, then builds +// a map associating each case with the corresponding result string. It returns +// this map along with any error that occurs during file reading or +// unmarshalling. func getExpectedTestResults(templateFileName string) (map[string]string, error) { templateFile, err := os.ReadFile(templateFileName) if err != nil { @@ -150,6 +185,14 @@ func getExpectedTestResults(templateFileName string) (map[string]string, error) return expectedTestResults, nil } +// printTestResultsMismatch Displays a formatted table of test cases that did not match the expected results +// +// The function receives a list of mismatched test case identifiers along with +// maps of actual and expected outcomes. It prints a header, then iterates over +// each mismatched case, retrieving the corresponding expected and actual +// values—using a placeholder when either is missing—and outputs them in +// aligned columns. Finally, it draws separators to delineate each row for +// readability. func printTestResultsMismatch(mismatchedTestCases []string, actualResults, expectedResults map[string]string) { fmt.Printf("\n") fmt.Println(strings.Repeat("-", 96)) //nolint:mnd // table line @@ -169,6 +212,14 @@ func printTestResultsMismatch(mismatchedTestCases []string, actualResults, expec } } +// generateTemplateFile Creates a YAML template file summarizing test case outcomes +// +// This function takes a map of test cases to result strings and builds a +// structured template containing lists for passed, skipped, and failed tests. +// It encodes the structure into YAML with two-space indentation and writes it +// to a predefined file path with specific permissions. If an unknown result +// value is encountered or any I/O operation fails, it returns an error +// detailing the issue. func generateTemplateFile(resultsDB map[string]string) error { var resultsTemplate TestResults for testCase, result := range resultsDB { @@ -201,6 +252,12 @@ func generateTemplateFile(resultsDB map[string]string) error { return nil } +// NewCommand Creates a command for checking test results against expected templates +// +// It defines persistent flags for specifying the template file, log file, and +// an option to generate a new template from logs. The flags are mutually +// exclusive to avoid conflicting inputs. Finally, it returns the configured +// command instance. func NewCommand() *cobra.Command { checkResultsCmd.PersistentFlags().String("template", "expected_results.yaml", "reference YAML template with the expected results") checkResultsCmd.PersistentFlags().String("log-file", "certsuite.log", "log file of the Certsuite execution") diff --git a/cmd/certsuite/claim/claim.go b/cmd/certsuite/claim/claim.go index 5f940d3b7..892f6ea01 100644 --- a/cmd/certsuite/claim/claim.go +++ b/cmd/certsuite/claim/claim.go @@ -13,6 +13,12 @@ var ( } ) +// NewCommand Creates a subcommand for claim operations +// +// It initializes the claim command by attaching its compare and show +// subcommands, each of which provides functionality for comparing claim files +// or displaying claim information. The function returns the configured +// cobra.Command ready to be added to the main application root command. func NewCommand() *cobra.Command { claimCommand.AddCommand(compare.NewCommand()) claimCommand.AddCommand(show.NewCommand()) diff --git a/cmd/certsuite/claim/compare/compare.go b/cmd/certsuite/claim/compare/compare.go index 8539beb51..2e25f2d4d 100644 --- a/cmd/certsuite/claim/compare/compare.go +++ b/cmd/certsuite/claim/compare/compare.go @@ -86,6 +86,12 @@ var ( } ) +// NewCommand Creates a command for comparing two claim files +// +// It defines flags for the paths of two existing claim files, marks both as +// required, and handles errors by logging them before returning nil if marking +// fails. The function then returns the configured command object for use in the +// CLI. func NewCommand() *cobra.Command { claimCompareFiles.Flags().StringVarP( &Claim1FilePathFlag, "claim1", "1", "", @@ -109,6 +115,13 @@ func NewCommand() *cobra.Command { return claimCompareFiles } +// claimCompare compares two claim files for differences +// +// This function reads the paths provided by global flags, loads each file, +// unmarshals them into claim structures, and then generates diff reports for +// versions, test cases, configurations, and nodes. The resulting diffs are +// printed to standard output. If any step fails, it logs a fatal error and +// exits. func claimCompare(_ *cobra.Command, _ []string) error { err := claimCompareFilesfunc(Claim1FilePathFlag, Claim2FilePathFlag) if err != nil { @@ -117,6 +130,13 @@ func claimCompare(_ *cobra.Command, _ []string) error { return nil } +// claimCompareFilesfunc Reads two claim files, unmarshals them, and outputs structured comparison reports +// +// The function loads the contents of two specified claim files and parses each +// JSON document into a claim schema structure. It then generates separate diff +// reports for the claim versions, test case results, configuration differences, +// and node details, printing each report to standard output. Errors during file +// reading or unmarshalling are wrapped with context and returned for handling. func claimCompareFilesfunc(claim1, claim2 string) error { // readfiles claimdata1, err := os.ReadFile(claim1) @@ -161,6 +181,12 @@ func claimCompareFilesfunc(claim1, claim2 string) error { return nil } +// unmarshalClaimFile Parses raw claim data into a structured schema +// +// This function receives raw JSON bytes representing a claim file, attempts to +// unmarshal them into the claim.Schema type, and returns either the populated +// struct or an error if parsing fails. It uses standard library JSON decoding +// and propagates any unmarshaling errors back to the caller. func unmarshalClaimFile(claimdata []byte) (claim.Schema, error) { var claimDataResult claim.Schema err := json.Unmarshal(claimdata, &claimDataResult) diff --git a/cmd/certsuite/claim/compare/configurations/configurations.go b/cmd/certsuite/claim/compare/configurations/configurations.go index a7da0276c..5123f57bd 100644 --- a/cmd/certsuite/claim/compare/configurations/configurations.go +++ b/cmd/certsuite/claim/compare/configurations/configurations.go @@ -7,11 +7,24 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim" ) +// AbnormalEventsCount Displays counts of abnormal events for two claims +// +// This struct holds integer counts of abnormal events for two distinct claims, +// named Claim1 and Claim2. The String method formats these values into a +// readable table with headers, producing a string that summarizes the event +// counts for comparison purposes. type AbnormalEventsCount struct { Claim1 int `json:"claim1"` Claim2 int `json:"claim2"` } +// AbnormalEventsCount.String Formats abnormal event counts for two claims +// +// This method builds a multi-line string that displays the number of abnormal +// events detected in two separate claims. It starts with a header line, then +// adds a formatted table row showing the claim identifiers and their +// corresponding counts using printf-style formatting. The resulting string is +// returned for display or logging. func (c *AbnormalEventsCount) String() string { const ( rowHeaderFmt = "%-12s%-s\n" @@ -25,11 +38,25 @@ func (c *AbnormalEventsCount) String() string { return str } +// DiffReport captures configuration differences and abnormal event counts +// +// This structure contains a diff of Cert Suite configuration objects and a +// count of abnormal events for two claims. The Config field holds the result +// from a diff comparison, while AbnormalEvents stores how many abnormal events +// each claim reported. It is used to report and display discrepancies between +// claims. type DiffReport struct { Config *diff.Diffs `json:"CertSuiteConfig"` AbnormalEvents AbnormalEventsCount `json:"abnormalEventsCount"` } +// DiffReport.String Formats the diff report into a readable string +// +// This method builds a formatted representation of a configuration comparison, +// beginning with header lines and then appending the configuration details +// followed by any abnormal events. It concatenates strings from the embedded +// Config and AbnormalEvents fields and returns the final result as a single +// string. func (d *DiffReport) String() string { str := "CONFIGURATIONS\n" str += "--------------\n\n" @@ -42,6 +69,13 @@ func (d *DiffReport) String() string { return str } +// GetDiffReport Creates a report of configuration differences +// +// The function compares two configuration objects from claim files, generating +// a DiffReport that includes field-by-field differences in the main +// configuration map and counts of abnormal events present in each file. It uses +// an external diff utility to compute the detailed comparison and returns the +// assembled report for further processing or display. func GetDiffReport(claim1Configurations, claim2Configurations *claim.Configurations) *DiffReport { return &DiffReport{ Config: diff.Compare("Cert Suite Configuration", claim1Configurations.Config, claim2Configurations.Config, nil), diff --git a/cmd/certsuite/claim/compare/diff/diff.go b/cmd/certsuite/claim/compare/diff/diff.go index fb85993c5..4ae8a8689 100644 --- a/cmd/certsuite/claim/compare/diff/diff.go +++ b/cmd/certsuite/claim/compare/diff/diff.go @@ -8,8 +8,12 @@ import ( "strings" ) -// Diffs holds the differences between two interface{} objects that have -// been obtained by unmarshalling JSON strings. +// Diffs Captures differences between two JSON objects +// +// This structure records fields that differ, as well as those present only in +// one of the compared claims. It stores the object name for contextual output +// and provides a method to format the differences into a readable table. The +// fields are populated by comparing flattened representations of each claim. type Diffs struct { // Name of the json object whose diffs are stored here. // It will be used when serializing the data in table format. @@ -21,32 +25,25 @@ type Diffs struct { FieldsInClaim2Only []string } -// FieldDIff holds the field path and the values from both claim files -// that have been found to be different. +// FieldDiff Represents a mismatch between two claim files +// +// This structure records the location of a differing field along with its value +// from each claim file. It is used during comparison to track which fields +// differ, enabling further processing or reporting. The field path indicates +// where in the document the discrepancy occurs. type FieldDiff struct { FieldPath string `json:"field"` Claim1Value interface{} `json:"claim1Value"` Claim2Value interface{} `json:"claim2Value"` } -// Stringer method. The output string is a table like this: -// : Differences -// FIELD CLAIM 1 CLAIM 2 -// /jsonpath/to/field1 value1 value2 -// /jsonpath/to/another/field2 value3 value4 -// ... -// -// : Only in CLAIM 1 -// /jsonpath/to/field/in/claim1/only -// ... -// -// : Only in CLAIM 2 -// /jsonpath/to/field/in/claim2/only -// ... +// Diffs.String Formats a readable report of claim differences // -// Where is replaced by the value of d.Name. -// The columns "FIELD" and "CLAIM 1" have a dynamic width that depends -// on the longest field path and longest value. +// The method builds a string that lists fields with differing values between +// two claims, as well as fields present only in one claim or the other. It +// calculates column widths based on longest field paths and values to align the +// table neatly. If no differences exist it displays a placeholder indicating +// none were found. func (d *Diffs) String() string { const ( noDiffs = "" @@ -110,13 +107,13 @@ func (d *Diffs) String() string { return str } -// Compares to interface{} objects obtained through json.Unmarshal() and returns -// a pointer to a Diffs object. -// A simple filtering of json subtrees can be achieved using the filters slice parameter. -// This might be helpful with big json trees that could have too many potential differences, -// but we want to get just the differences for some custom nodes/subtrees. -// E.g.: filters = []string{"labels"} : only the nodes/subtrees under all the -// labels nodes/branches will be traversed and compared. +// Compare Compares two JSON structures for differences +// +// This function takes two interface values that were previously unmarshaled +// from JSON, walks each tree to collect paths and values, then compares +// corresponding entries. It records mismatched values, fields present only in +// the first object, and fields present only in the second. Optional filters +// allow limiting comparison to specified subtrees. func Compare(objectName string, claim1Object, claim2Object interface{}, filters []string) *Diffs { objectsDiffs := Diffs{Name: objectName} @@ -163,13 +160,24 @@ func Compare(objectName string, claim1Object, claim2Object interface{}, filters return &objectsDiffs } +// field represents a node in the traversal result +// +// This structure holds the full path to a value and the value itself as +// encountered during tree walking. The Path string records the hierarchical +// location using delimiters, while Value captures any type of data found at +// that point. It is used by the traversal routine to aggregate matching fields +// for comparison. type field struct { Path string Value interface{} } -// Helper function that traverses recursively a node to return a list -// of each field (leaf) path and its value. +// traverse recursively collects leaf paths and values from a nested data structure +// +// The function walks through maps, slices, or simple values, building a path +// string for each leaf node separated by slashes. It optionally filters the +// collected fields based on provided substrings in the path. The result is a +// slice of field structs containing the full path and the corresponding value. func traverse(node interface{}, path string, filters []string) []field { if node == nil { return nil diff --git a/cmd/certsuite/claim/compare/nodes/nodes.go b/cmd/certsuite/claim/compare/nodes/nodes.go index 959ff6a99..11198aa18 100644 --- a/cmd/certsuite/claim/compare/nodes/nodes.go +++ b/cmd/certsuite/claim/compare/nodes/nodes.go @@ -5,9 +5,13 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim" ) -// Structure that holds a summary of nodes roles and a slice of NodeDiffReports, -// one per node found in both claim files. In case one node only exists in one -// claim file, it will be marked as "not found in claim[1|2]". +// DiffReport Summarizes differences between two node claims +// +// It aggregates comparison results for nodes, CNI networks, CSI drivers, and +// hardware information into separate diff objects. Each field holds a report of +// changes or missing entries between the two provided claim files. The struct +// provides a consolidated view that can be rendered as a human‑readable +// string. type DiffReport struct { Nodes *diff.Diffs `json:"nodes"` CNI *diff.Diffs `json:"CNI"` @@ -15,9 +19,12 @@ type DiffReport struct { Hardware *diff.Diffs `json:"hardware"` } -// Stringer method to show in a table the the differences found on each node -// appearing on both claim files. If a node only appears in one claim file, it -// will be flagged as "not found in claim[1|2]" +// DiffReport.String Formats node differences into a readable table +// +// It builds a string starting with a header and separator, then appends the +// string representations of any non‑nil subreports for Nodes, CNI, CSI, and +// Hardware, each followed by a newline. The resulting text lists discrepancies +// found in cluster nodes across two claim files. func (d DiffReport) String() string { str := "CLUSTER NODES DIFFERENCES\n" str += "-------------------------\n\n" @@ -41,8 +48,13 @@ func (d DiffReport) String() string { return str } -// Generates a DiffReport from two pointers to claim.Nodes. The report consists -// of a diff.Diffs object per node's section (CNIs, CSIs & Hardware). +// GetDiffReport Creates a report of differences between two node claim sets +// +// This function takes pointers to two node claim structures and returns a +// DiffReport containing four diff objects: Nodes, CNIs, CSIs, and Hardware. +// Each field is produced by calling the Compare helper with appropriate data +// slices and optional filters for labels and annotations. The resulting report +// aggregates all differences for downstream display or analysis. func GetDiffReport(claim1Nodes, claim2Nodes *claim.Nodes) *DiffReport { return &DiffReport{ Nodes: diff.Compare("Nodes", claim1Nodes.NodesSummary, claim2Nodes.NodesSummary, []string{"labels", "annotations"}), diff --git a/cmd/certsuite/claim/compare/testcases/testcases.go b/cmd/certsuite/claim/compare/testcases/testcases.go index 8b30686f7..0f38281ea 100644 --- a/cmd/certsuite/claim/compare/testcases/testcases.go +++ b/cmd/certsuite/claim/compare/testcases/testcases.go @@ -7,20 +7,37 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim" ) +// TcResultsSummary provides a count of test case outcomes +// +// This structure holds three integer counters: how many tests passed, were +// skipped, and failed. It is populated by iterating over result strings and +// incrementing the corresponding field. The counts can be used to report +// overall test performance. type TcResultsSummary struct { Passed int Skipped int Failed int } +// TcResultDifference Represents a discrepancy between two claim results +// +// This structure holds the name of a test case along with the outcomes from two +// different claims. By comparing Claim1Result and Claim2Result, users can +// identify mismatches or confirm consistency across claim evaluations. type TcResultDifference struct { Name string Claim1Result string Claim2Result string } -// Holds the results summary and the list of test cases whose result -// is different. +// DiffReport Summarizes test result differences between two claim files +// +// This structure holds a summary of passed, skipped, and failed tests for each +// claim file, along with a list of individual test cases whose outcomes differ. +// It tracks the total number of differing test cases and provides a string +// representation that lists both the overall status counts and the specific +// differences. The data is used to report and compare results between two sets +// of claim executions. type DiffReport struct { Claim1ResultsSummary TcResultsSummary `json:"claimFile1ResultsSummary"` Claim2ResultsSummary TcResultsSummary `json:"claimFile2ResultsSummary"` @@ -29,8 +46,12 @@ type DiffReport struct { DifferentTestCasesResults int `json:"differentTestCasesResults"` } -// Helper function that iterates over resultsByTestSuite, which maps a test suite name to a list -// of test case results, to create a map with test case results. +// getTestCasesResultsMap Creates a map from test case identifiers to their execution state +// +// This helper traverses the provided test suite results, extracting each test +// case's unique ID and its current . It builds a string-to-string mapping where +// keys are the IDs and values are the states. The resulting map is used by +// other functions to compare outcomes between different claim results. func getTestCasesResultsMap(testSuiteResults claim.TestSuiteResults) map[string]string { testCaseResults := map[string]string{} @@ -41,10 +62,12 @@ func getTestCasesResultsMap(testSuiteResults claim.TestSuiteResults) map[string] return testCaseResults } -// Given two results helper maps whose keys are test case names, returns a slice of -// all the test cases (sorted) names found in both maps, without repetitions. -// If one test case appears in both map, it will only appear once in the -// output slice. +// getMergedTestCasesNames Collects all unique test case names from two result maps +// +// The function iterates over each input map, adding every key to a temporary +// set to eliminate duplicates. After gathering the keys, it converts the set +// into a slice and sorts the entries alphabetically. The sorted list of test +// case names is returned for further processing. func getMergedTestCasesNames(results1, results2 map[string]string) []string { testCasesNamesMap := map[string]struct{}{} @@ -66,7 +89,12 @@ func getMergedTestCasesNames(results1, results2 map[string]string) []string { return names } -// Helper function to fill a TcResultsSummary struct from a results map (tc name -> result). +// getTestCasesResultsSummary Aggregates test case results into a summary count +// +// The function iterates over a mapping of test case names to result strings and +// tallies the number of passed, skipped, and failed cases. It increments +// counters in a TcResultsSummary structure based on predefined result +// constants. The populated summary is then returned for use elsewhere. func getTestCasesResultsSummary(results map[string]string) TcResultsSummary { summary := TcResultsSummary{} @@ -84,9 +112,13 @@ func getTestCasesResultsSummary(results map[string]string) TcResultsSummary { return summary } -// Process the results from different claim files and return the DiffReport. -// In case one tc name does not exist in the other claim file, the result will -// be marked as "not found" in the table. +// GetDiffReport Creates a report of differences between two sets of test results +// +// The function compares test case outcomes from two claim files, marking any +// missing cases as "not found". It builds a list of differing results, counts +// the number of discrepancies, and summarizes each claim’s passed, skipped, +// and failed totals. The returned DiffReport contains this information for +// further analysis. func GetDiffReport(resultsClaim1, resultsClaim2 claim.TestSuiteResults) *DiffReport { const tcResultNotFound = "not found" @@ -128,20 +160,12 @@ func GetDiffReport(resultsClaim1, resultsClaim2 claim.TestSuiteResults) *DiffRep return &report } -// Stringer method for the DiffReport. Will return a string with two tables: -// Test cases summary table: -// STATUS # in CLAIM-1 # in CLAIM-2 -// passed 22 21 -// skipped 62 62 -// failed 3 4 +// DiffReport.String Formats a detailed report of test case comparisons // -// Test cases with different results table: -// TEST CASE NAME CLAIM-1 CLAIM-2 -// access-control-net-admin-capability-check failed passed -// access-control-pod-automount-service-account-token passed failed -// access-control-pod-role-bindings passed failed -// access-control-pod-service-account passed failed -// ... +// The method builds a human‑readable string containing two tables: one +// summarizing the count of passed, skipped and failed cases for each claim, and +// another listing individual test cases that differ between the claims. It uses +// formatted printing to align columns and returns the combined text. func (r *DiffReport) String() string { const ( tcDiffRowFmt = "%-60s%-10s%-s\n" diff --git a/cmd/certsuite/claim/compare/versions/versions.go b/cmd/certsuite/claim/compare/versions/versions.go index 184c55139..82ef893b4 100644 --- a/cmd/certsuite/claim/compare/versions/versions.go +++ b/cmd/certsuite/claim/compare/versions/versions.go @@ -8,10 +8,23 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff" ) +// DiffReport Represents the differences between two claim versions +// +// This struct holds a pointer to a diff.Diffs object that captures all detected +// changes when comparing two sets of claim versions. The String method formats +// those differences into a human-readable string, or returns an empty +// representation if no differences exist. type DiffReport struct { Diffs *diff.Diffs `json:"differences"` } +// DiffReport.String Returns a formatted string representation of the diff report +// +// When called on a DiffReport instance, this method checks if its internal +// Diffs field is nil. If it is, it creates an empty Diffs object and returns +// its string form; otherwise, it delegates to the existing Diffs object's +// String method. The resulting string summarizes the differences captured by +// the report. func (d *DiffReport) String() string { if d.Diffs == nil { return (&diff.Diffs{}).String() @@ -20,6 +33,13 @@ func (d *DiffReport) String() string { return d.Diffs.String() } +// Compare compares two claim version structures +// +// The function serializes each versions object to JSON, then unmarshals them +// into generic interface values so they can be compared by the diff package. It +// returns a report containing differences between the two sets of versions. +// Errors during marshaling or unmarshaling cause the program to log a fatal +// message. func Compare(claim1Versions, claim2Versions *officialClaimScheme.Versions) *DiffReport { // Convert the versions struct type to agnostic map[string]interface{} objects so // it can be compared using the diff.Compare func. diff --git a/cmd/certsuite/claim/show/csv/csv.go b/cmd/certsuite/claim/show/csv/csv.go index 1f9d40d26..49390e9b9 100644 --- a/cmd/certsuite/claim/show/csv/csv.go +++ b/cmd/certsuite/claim/show/csv/csv.go @@ -44,6 +44,13 @@ with column header: } ) +// NewCommand Creates a command for exporting claim data to CSV +// +// This function configures a command with required flags for the claim file +// path, CNF name, and CNF type mapping file, as well as an optional flag to +// include a header row. It marks each required flag, handling any errors by +// logging a fatal message. The configured command is then returned for use in +// the CLI. func NewCommand() *cobra.Command { CSVDumpCommand.Flags().StringVarP(&claimFilePathFlag, "claim-file", "c", "", "Required: path to claim file.", @@ -82,6 +89,13 @@ func NewCommand() *cobra.Command { return CSVDumpCommand } +// dumpCsv Exports claim results to CSV format +// +// This function parses a claim file, validates its version, loads CNF type +// mappings, builds a catalog map, and then constructs CSV records for each test +// result. It writes the assembled data to standard output using a CSV writer, +// handling any errors that occur during parsing or writing. The function +// returns nil on success or an error describing what failed. func dumpCsv(_ *cobra.Command, _ []string) error { // set log output to stderr log.SetOutput(os.Stderr) @@ -133,8 +147,14 @@ func dumpCsv(_ *cobra.Command, _ []string) error { return nil } -// dumps claim file in CSV format. -// adds remediation, mandatory/optional, CNFType to the claim data +// buildCSV Creates CSV rows from claim data with remediation, CNF type, and optional header +// +// It iterates over each test result in the claim schema, building a record that +// includes operator versions, test identifiers, suite names, descriptions, +// states, timestamps, skip reasons, check details, captured output, remediation +// actions, CNF type, and mandatory/optional status. If a header flag is set, a +// header row is added first. The function returns a slice of string slices +// ready for CSV writing. func buildCSV(claimScheme *claim.Schema, cnfType string, catalogMap map[string]claimschema.TestCaseDescription) (resultsCSVRecords [][]string) { if cnfType == "" { cnfType = identifiers.NonTelco @@ -187,7 +207,13 @@ func buildCSV(claimScheme *claim.Schema, cnfType string, catalogMap map[string]c return resultsCSVRecords } -// loads records from a CSV +// loadCNFTypeMap Loads a mapping of CNF names to their types +// +// This routine opens the specified file, reads its contents, and unmarshals the +// data into a string-to-string map that associates each CNF name with its +// corresponding type. If any step fails—opening, reading, or decoding—the +// function returns an error describing the issue; otherwise it supplies the +// populated map. func loadCNFTypeMap(path string) (CNFTypeMap map[string]string, err error) { //nolint:gocritic // CNF is a valid acronym // Open the CSV file file, err := os.Open(path) @@ -213,7 +239,12 @@ func loadCNFTypeMap(path string) (CNFTypeMap map[string]string, err error) { //n return CNFTypeMap, nil } -// builds a catalog map indexed by test case ID +// buildCatalogByID Creates a map of test case descriptions keyed by ID +// +// It initializes an empty mapping, then iterates over the global catalog +// collection, inserting each entry into the map using its identifier as the +// key. The resulting map is returned for quick lookup of test cases by their +// unique IDs. func buildCatalogByID() (catalogMap map[string]claimschema.TestCaseDescription) { catalogMap = make(map[string]claimschema.TestCaseDescription) diff --git a/cmd/certsuite/claim/show/failures/failures.go b/cmd/certsuite/claim/show/failures/failures.go index cf8defa17..411385a7f 100644 --- a/cmd/certsuite/claim/show/failures/failures.go +++ b/cmd/certsuite/claim/show/failures/failures.go @@ -104,6 +104,13 @@ var availableOutputFormats = []string{ outputFormatText, outputFormatJSON, } +// NewCommand Creates a command to display claim failures +// +// The function builds a Cobra command that requires a path to an existing claim +// file and optionally accepts a comma‑separated list of test suites to filter +// the output. It also allows specifying the output format, defaulting to plain +// text but supporting JSON. Errors during flag configuration are logged +// fatally, after which the command is returned for registration. func NewCommand() *cobra.Command { showFailuresCommand.Flags().StringVarP(&claimFilePathFlag, "claim", "c", "", "Required: Existing claim file path.", @@ -130,8 +137,13 @@ func NewCommand() *cobra.Command { return showFailuresCommand } -// Parses the comma separated list to create a helper map, whose -// keys are the test suite names. +// parseTargetTestSuitesFlag Creates a map of test suite names from the flag input +// +// This function checks if the global test suites flag is empty; if so, it +// returns nil. Otherwise, it splits the comma-separated string into individual +// suite names, trims whitespace from each, and stores them as keys in a boolean +// map set to true. The resulting map is used elsewhere to quickly determine +// whether a given test suite should be processed. func parseTargetTestSuitesFlag() map[string]bool { if testSuitesFlag == "" { return nil @@ -145,8 +157,12 @@ func parseTargetTestSuitesFlag() map[string]bool { return targetTestSuites } -// Parses the output format flag. Returns error if the format -// does not appear in the list "availableOutputFormats". +// parseOutputFormatFlag Validates the output format flag +// +// It checks whether the user-specified format matches one of the supported +// formats listed in "availableOutputFormats". If a match is found, it returns +// that format string with no error; otherwise it returns an empty string and an +// error explaining the invalid value and listing the allowed options. func parseOutputFormatFlag() (string, error) { for _, outputFormat := range availableOutputFormats { if outputFormat == outputFormatFlag { @@ -157,8 +173,14 @@ func parseOutputFormatFlag() (string, error) { return "", fmt.Errorf("invalid output format flag %q - available formats: %v", outputFormatFlag, availableOutputFormats) } -// Parses the claim's test case's checkDetails field and creates a list -// of NonCompliantObject's. +// getNonCompliantObjectsFromFailureReason parses a test case failure payload into non‑compliant objects +// +// The function receives the JSON string that represents a test case’s check +// details, decodes it to extract compliant and non‑compliant report objects, +// and then builds a slice of NonCompliantObject structures. It returns the +// constructed list along with an error if the payload cannot be decoded. The +// output includes each object's type, reason, and any additional specification +// fields. func getNonCompliantObjectsFromFailureReason(checkDetails string) ([]NonCompliantObject, error) { objects := struct { Compliant []testhelper.ReportObject `json:"CompliantObjectsOut"` @@ -184,7 +206,13 @@ func getNonCompliantObjectsFromFailureReason(checkDetails string) ([]NonComplian return nonCompliantObjects, nil } -// Prints the failures in plain text. +// printFailuresText Prints a plain text summary of failed test suites and cases +// +// The function iterates over each test suite, outputting its name and then +// details for every failing test case. For each case it shows the name, +// description, and either a single failure reason or a list of non‑compliant +// objects with type, reason, and spec fields. The information is formatted +// using printf statements to produce a readable report. func printFailuresText(testSuites []FailedTestSuite) { for _, ts := range testSuites { fmt.Printf("Test Suite: %s\n", ts.TestSuiteName) @@ -216,7 +244,13 @@ func printFailuresText(testSuites []FailedTestSuite) { } } -// Prints the failures in json format. +// printFailuresJSON Outputs failures as indented JSON +// +// The function receives a slice of failure objects, wraps them in a struct with +// a field named "testSuites", marshals this structure to pretty‑printed JSON, +// and prints the result. If marshalling fails it logs a fatal error and exits. +// The output is written to standard output as a single line containing the JSON +// string. func printFailuresJSON(testSuites []FailedTestSuite) { type ClaimFailures struct { Failures []FailedTestSuite `json:"testSuites"` @@ -231,10 +265,13 @@ func printFailuresJSON(testSuites []FailedTestSuite) { fmt.Printf("%s\n", string(bytes)) } -// Creates a list of FailingTestSuite from the results parsed from a claim file. The parsed -// results in claimResultsByTestSuite var maps a test suite name to a list of TestCaseResult, -// which are processed to create the list of FailingTestSuite, filtering out those test suites -// that don't exist in the targetTestSuites map. +// getFailedTestCasesByTestSuite generates a list of failing test suites from parsed claim data +// +// The function iterates over test suite results, filtering by the target suites +// if specified. For each failed test case it extracts details, attempts to +// parse non‑compliant objects, and records either the parsed objects or the +// raw failure reason on error. It returns a slice of structures that represent +// only those test suites containing at least one failing test case. func getFailedTestCasesByTestSuite(claimResultsByTestSuite map[string][]*claim.TestCaseResult, targetTestSuites map[string]bool) []FailedTestSuite { testSuites := []FailedTestSuite{} for testSuite := range claimResultsByTestSuite { @@ -277,7 +314,12 @@ func getFailedTestCasesByTestSuite(claimResultsByTestSuite map[string][]*claim.T return testSuites } -// Main function for the `show failures` subcommand. +// showFailures Displays failed test cases from a claim file +// +// The function reads the claim file, validates its format version, groups +// results by test suite, filters for failures, and outputs them either in JSON +// or plain text based on a flag. It returns an error if parsing or validation +// fails. func showFailures(_ *cobra.Command, _ []string) error { outputFormat, err := parseOutputFormatFlag() if err != nil { diff --git a/cmd/certsuite/claim/show/failures/types.go b/cmd/certsuite/claim/show/failures/types.go index fbb76445a..b4738a008 100644 --- a/cmd/certsuite/claim/show/failures/types.go +++ b/cmd/certsuite/claim/show/failures/types.go @@ -2,18 +2,36 @@ package failures import "fmt" -// Custom object type needed to provide a different JSON serialization than -// the one in claim's test cases' skipReason field. +// NonCompliantObject represents a non‑compliant object extracted from failure data +// +// This type holds information about objects that failed compliance checks, +// including the object's kind, the reason for failure, and its specification +// details. The Spec field aggregates key/value pairs representing the object's +// configuration at the time of the check. Instances are created by parsing JSON +// output from a compliance test and converting it into a more convenient +// structure for reporting. type NonCompliantObject struct { Type string `json:"type"` Reason string `json:"reason"` Spec ObjectSpec `json:"spec"` } +// ObjectSpec Represents a collection of key/value pairs for JSON output +// +// This structure holds an ordered list of fields where each field has a string +// key and value. It provides methods to add new fields and to marshal the +// collection into a valid JSON object. If no fields are present, marshaling +// returns an empty JSON object. type ObjectSpec struct { Fields []struct{ Key, Value string } } +// ObjectSpec.AddField Adds a key/value pair to the object's specification +// +// This method appends a new field containing the provided key and value strings +// to the spec's internal slice of fields. It does not return any value or +// perform validation, simply extending the slice. The updated spec can then be +// used elsewhere to represent object metadata. func (spec *ObjectSpec) AddField(key, value string) { spec.Fields = append(spec.Fields, struct { Key string @@ -21,6 +39,12 @@ func (spec *ObjectSpec) AddField(key, value string) { }{key, value}) } +// ObjectSpec.MarshalJSON Converts the ObjectSpec into JSON bytes +// +// The method checks if there are any fields; if none, it returns an empty JSON +// object. Otherwise, it builds a JSON string by iterating over each field and +// formatting key/value pairs as quoted strings separated by commas. The +// resulting byte slice is returned with no error. func (spec *ObjectSpec) MarshalJSON() ([]byte, error) { if len(spec.Fields) == 0 { return []byte("{}"), nil @@ -39,6 +63,12 @@ func (spec *ObjectSpec) MarshalJSON() ([]byte, error) { return []byte(specStr), nil } +// FailedTestCase Represents a test case that did not pass +// +// It holds the name and description of the test case, optional details about +// the check, and any objects that failed to meet compliance criteria. The +// structure is used to aggregate failure information for reporting or logging +// purposes. type FailedTestCase struct { TestCaseName string `json:"name"` TestCaseDescription string `json:"description"` @@ -46,6 +76,11 @@ type FailedTestCase struct { NonCompliantObjects []NonCompliantObject `json:"nonCompliantObjects,omitempty"` } +// FailedTestSuite represents a test suite with failures +// +// This struct holds the name of a test suite and a list of its failing test +// cases. It is used when reporting or displaying results, allowing consumers to +// see which specific tests failed within each suite. type FailedTestSuite struct { TestSuiteName string `json:"name"` FailingTestCases []FailedTestCase `json:"failures"` diff --git a/cmd/certsuite/claim/show/show.go b/cmd/certsuite/claim/show/show.go index 07bb39301..25718527d 100644 --- a/cmd/certsuite/claim/show/show.go +++ b/cmd/certsuite/claim/show/show.go @@ -13,6 +13,13 @@ var ( } ) +// NewCommand Creates the show command with its subcommands +// +// This function constructs a Cobra command responsible for displaying claim +// information. It registers two child commands—one that shows failures and +// another that outputs CSV dumps—by adding them to the parent command before +// returning it. The returned command can then be integrated into the larger CLI +// hierarchy. func NewCommand() *cobra.Command { showCommand.AddCommand(failures.NewCommand()) showCommand.AddCommand(csv.NewCommand()) diff --git a/cmd/certsuite/generate/catalog/catalog.go b/cmd/certsuite/generate/catalog/catalog.go index 95702ff91..f407d56cc 100644 --- a/cmd/certsuite/generate/catalog/catalog.go +++ b/cmd/certsuite/generate/catalog/catalog.go @@ -59,11 +59,25 @@ var ( } ) +// Entry represents a test entry with its name and identifier +// +// This struct holds the display name of a test and an associated identifier +// that includes the test's URL and version information. It is used as the value +// type in catalogs generated from lists of identifiers, grouping entries by +// their suite names. The fields are unexported except for the struct itself, +// keeping the internal representation hidden while still allowing external +// packages to create and use Entry instances. type Entry struct { testName string identifier claim.Identifier // {url and version} } +// catalogSummary Collects test suite statistics for catalog generation +// +// This structure aggregates counts of total tests, total suites, and +// per‑suite test numbers while also tracking optional versus mandatory tests +// for each scenario category. The fields are populated during catalog creation +// and used to format markdown summaries. type catalogSummary struct { totalSuites int totalTests int @@ -71,8 +85,11 @@ type catalogSummary struct { testPerScenario map[string]map[string]int } -// emitTextFromFile is a utility method to stream file contents to stdout. This allows more natural specification of -// the non-dynamic aspects of CATALOG.md. +// emitTextFromFile streams a file’s contents to standard output +// +// This helper reads the entire content of the specified file into memory, +// converts it to a string, and prints it directly to stdout. It returns any +// read error encountered; otherwise, it completes successfully with no value. func emitTextFromFile(filename string) error { text, err := os.ReadFile(filename) if err != nil { @@ -82,20 +99,14 @@ func emitTextFromFile(filename string) error { return nil } -// createPrintableCatalogFromIdentifiers creates an structured catalogue. -// Decompose claim.Identifier urls like http://redhat-best-practices-for-k8s.com/testcases/SuiteName/TestName -// to get SuiteNames and TestNames and build a "more printable" catalogue in the way of: +// CreatePrintableCatalogFromIdentifiers organizes identifiers into a suite‑based map // -// { -// suiteNameA: [ -// {testName, identifier{url, version}}, -// {testName2, identifier{url, version}} -// ] -// suiteNameB: [ -// {testName3, identifier{url, version}}, -// {testName4, identifier{url, version}} -// ] -// } +// The function receives a slice of identifier objects, extracts each +// identifier’s suite name, and groups the identifiers by that suite. For +// every entry it creates an Entry containing the test name and the original +// identifier, appending it to the corresponding slice in the result map. The +// returned map maps suite names to lists of these entries, ready for further +// processing or display. func CreatePrintableCatalogFromIdentifiers(keys []claim.Identifier) map[string][]Entry { catalog := make(map[string][]Entry) // we need the list of suite's names @@ -108,6 +119,12 @@ func CreatePrintableCatalogFromIdentifiers(keys []claim.Identifier) map[string][ return catalog } +// GetSuitesFromIdentifiers Creates a list of unique test suite names from identifiers +// +// This function iterates over a slice of identifier objects, collecting each +// identifier's Suite field into a temporary slice. It then removes duplicate +// suite names by calling a helper that returns only unique values. The +// resulting slice of distinct suite names is returned. func GetSuitesFromIdentifiers(keys []claim.Identifier) []string { var suites []string for _, i := range keys { @@ -116,6 +133,12 @@ func GetSuitesFromIdentifiers(keys []claim.Identifier) []string { return arrayhelper.Unique(suites) } +// scenarioIDToText Converts scenario identifiers to readable text +// +// The function maps a string identifier to a human‑friendly scenario name +// using predefined constants. If the identifier does not match any known case, +// it returns "Unknown Scenario". The returned value is used throughout catalog +// generation for display and labeling. func scenarioIDToText(id string) (text string) { switch id { case identifiers.FarEdge: @@ -132,6 +155,14 @@ func scenarioIDToText(id string) (text string) { return text } +// addPreflightTestsToCatalog Adds preflight test entries to the catalog +// +// The function retrieves operator and container preflight tests via the +// preflight library, collects their metadata, and inserts each as a catalog +// entry with default remediation and classification values. It logs errors if +// artifact creation or list retrieval fails but continues processing remaining +// tests. Each test is added under the common preflight suite key, ensuring they +// appear in the generated test case documentation. func addPreflightTestsToCatalog() { const dummy = "dummy" // Create artifacts handler @@ -183,7 +214,11 @@ func addPreflightTestsToCatalog() { } } -// outputTestCases outputs the Markdown representation for test cases from the catalog to stdout. +// outputTestCases generates Markdown for test case catalog +// +// It compiles all test cases, sorts them by ID and suite, builds a table of +// properties and impact statements, and returns the formatted string along with +// statistics about tests per scenario and suite. func outputTestCases() (outString string, summary catalogSummary) { //nolint:funlen // Adds Preflight tests to catalog addPreflightTestsToCatalog() @@ -284,6 +319,13 @@ func outputTestCases() (outString string, summary catalogSummary) { //nolint:fun return outString, summary } +// summaryToMD Generates a markdown formatted test case summary +// +// The function accepts a catalogSummary structure containing totals and +// per-suite/per-scenario counts. It builds a string with headings, total +// numbers, tables of suites, and separate sections for each scenario’s +// mandatory and optional tests, using sorted keys to ensure consistent +// ordering. func summaryToMD(aSummary catalogSummary) (out string) { const tableHeader = "|---|---|---|\n" out += "## Test cases summary\n\n" @@ -321,6 +363,11 @@ func summaryToMD(aSummary catalogSummary) (out string) { return out } +// outputJS Prints the classification data as formatted JSON +// +// The function marshals a global classification structure into pretty-printed +// JSON. If marshalling fails, it logs an error and exits early. Otherwise, it +// writes the resulting string to standard output. func outputJS() { out, err := json.MarshalIndent(identifiers.Classification, "", " ") if err != nil { @@ -329,6 +376,14 @@ func outputJS() { } fmt.Printf("classification= %s ", out) } + +// generateJS Outputs classification data as formatted JSON +// +// This routine triggers the generation of JavaScript-friendly output by +// invoking a helper that marshals classification identifiers into indented +// JSON. It captures any marshalling errors, logs them if they occur, and prints +// the resulting string to standard output. The function returns nil on success +// or propagates an error when one is encountered. func generateJS(_ *cobra.Command, _ []string) error { // process the test cases outputJS() @@ -336,6 +391,15 @@ func generateJS(_ *cobra.Command, _ []string) error { return nil } +// outputIntro Generates introductory markdown for the catalog +// +// This function builds a Markdown header that introduces the Red Hat Best +// Practices Test Suite for Kubernetes catalog, including a title and +// descriptive paragraph. It concatenates static strings containing HTML +// comments to disable specific linting rules, the main heading, and a paragraph +// explaining the test areas, mandatory tests, and workload scenarios. The +// resulting string is returned for inclusion at the top of generated +// documentation. func outputIntro() (out string) { headerStr := "\n" + @@ -350,6 +414,13 @@ func outputIntro() (out string) { return headerStr + introStr } +// outputSccCategories Provides a Markdown section describing security context categories +// +// The function builds a string containing a header, an introductory note, and +// four subsections that explain different SCC scenarios for Kubernetes +// workloads. Each subsection lists the expected capabilities or restrictions +// associated with that category. The resulting text is returned as a single +// string. func outputSccCategories() (sccCategories string) { sccCategories = "\n## Security Context Categories\n" @@ -393,7 +464,12 @@ func outputSccCategories() (sccCategories string) { return sccCategories + intro + firstCat + secondCat + thirdCat + fourthCat } -// runGenerateMarkdownCmd generates a markdown test catalog. +// runGenerateMarkdownCmd Produces a markdown catalog of test cases +// +// It gathers introductory text, formats each test case with metadata and impact +// statements, builds a summary table, appends security context categories, then +// writes the combined output to standard output. The function returns no error +// unless writing fails. func runGenerateMarkdownCmd(_ *cobra.Command, _ []string) error { // prints intro intro := outputIntro() @@ -408,7 +484,12 @@ func runGenerateMarkdownCmd(_ *cobra.Command, _ []string) error { return nil } -// Execute executes the "catalog" CLI. +// NewCommand Creates a catalog generation command +// +// This function builds a new command for the generate tool, adding +// sub‑commands that produce markdown documentation and classification files. +// It returns the fully constructed command ready to be attached to the main CLI +// tree. func NewCommand() *cobra.Command { generateCmd.AddCommand(markdownGenerateCmd) diff --git a/cmd/certsuite/generate/config/config.go b/cmd/certsuite/generate/config/config.go index 974faa938..3e4a66b4f 100644 --- a/cmd/certsuite/generate/config/config.go +++ b/cmd/certsuite/generate/config/config.go @@ -15,11 +15,23 @@ import ( "gopkg.in/yaml.v3" ) +// configOption Represents a configuration setting with its description +// +// This structure holds two text fields: one that specifies the name of an +// option, and another that provides explanatory help for that option. It is +// used internally to map command-line flags or configuration keys to +// user-facing descriptions. type configOption struct { Option string Help string } +// NewCommand Creates the configuration subcommand +// +// This function returns a preconfigured cobra.Command that provides options for +// generating or managing configuration files within the application. It does +// not take any arguments and simply returns the command instance that has been +// set up elsewhere in the package. func NewCommand() *cobra.Command { return generateConfigCmd } @@ -46,6 +58,12 @@ var templates = &promptui.SelectTemplates{ {{ .Help }}`, } +// generateConfig Launches an interactive menu for managing configuration +// +// When invoked, this routine displays a prompt with options to create, view, +// save, or exit the configuration workflow. It loops until the user selects +// quit, calling helper functions to handle each action. Errors during prompt +// execution are logged and cause an early return. func generateConfig() { mainMenu := []configOption{ {Option: create, Help: createConfigHelp}, @@ -82,6 +100,13 @@ func generateConfig() { } } +// createConfiguration Starts the interactive configuration menu +// +// The function presents a list of configuration categories such as resources, +// exceptions, settings, and an option to return to the previous menu. It uses a +// prompt loop that displays the choices and handles user selection by invoking +// dedicated sub‑configuration functions. Errors during the prompt are logged +// and cause an early exit from the routine. func createConfiguration() { createMenu := []configOption{ {Option: certSuiteResources, Help: certSuiteResourcesHelp}, @@ -121,6 +146,12 @@ func createConfiguration() { } } +// showConfiguration Displays the current configuration in YAML format +// +// The function serializes a TestConfiguration object into YAML and prints it to +// standard output, surrounded by header and footer lines for readability. If +// marshaling fails, it logs an error message and exits without printing +// anything. func showConfiguration(config *configuration.TestConfiguration) { configYaml, err := yaml.Marshal(config) if err != nil { @@ -132,6 +163,13 @@ func showConfiguration(config *configuration.TestConfiguration) { fmt.Println("=====================================================") } +// saveConfiguration Saves the current configuration to a YAML file +// +// The function converts a TestConfiguration struct into YAML, prompts the user +// for a filename with a default suggestion, writes the data to that file with +// appropriate permissions, and prints a success message. If any step +// fails—marshalling, prompting, or writing—it logs an error and aborts +// without returning a value. func saveConfiguration(config *configuration.TestConfiguration) { configYaml, err := yaml.Marshal(config) if err != nil { @@ -159,6 +197,13 @@ func saveConfiguration(config *configuration.TestConfiguration) { fmt.Println(color.GreenString("Configuration saved")) } +// createCertSuiteResourcesConfiguration Presents an interactive menu to configure resource selections +// +// The function displays a list of configuration options such as namespaces, pod +// labels, operator labels, CRD filters, and managed deployments. Users can +// select each option to provide input via prompts, which is then parsed and +// stored in the global configuration. Selecting "previousMenu" exits back to +// the higher‑level menu. func createCertSuiteResourcesConfiguration() { certSuiteResourcesOptions := []configOption{ {Option: namespaces, Help: namespacesHelp}, @@ -210,6 +255,14 @@ func createCertSuiteResourcesConfiguration() { } } +// createExceptionsConfiguration Presents an interactive menu to configure exception lists +// +// The routine builds a selection list of exception categories such as kernel +// taints, Helm charts, protocol names, services, and non‑scalable +// deployments. It uses promptui to allow the user to search and choose an +// option; upon selection it calls helper functions that read comma‑separated +// input from the terminal and populate global configuration slices. The process +// repeats until the user chooses to return to the previous menu. func createExceptionsConfiguration() { exceptionsOptions := []configOption{ {Option: kernelTaints, Help: kernelTaintsHelp}, @@ -261,6 +314,15 @@ func createExceptionsConfiguration() { } } +// createCollectorConfiguration prompts the user to select a collector configuration option +// +// The function presents an interactive menu of configuration options such as +// endpoint, executor identity, partner name, password, and an exit choice. It +// uses a searcher that filters options by matching input text ignoring case and +// spaces. When the user selects an item, the corresponding action is handled in +// a switch; currently only the exit option terminates the loop while other +// cases are placeholders for future implementation. +// //nolint:unused func createCollectorConfiguration() { collectorOptions := []configOption{ @@ -307,6 +369,13 @@ func createCollectorConfiguration() { } } +// createSettingsConfiguration Prompts user to configure Probe DaemonSet namespace +// +// The function presents a menu with an option to set the Probe DaemonSet +// namespace or return to the previous menu. When selected, it asks the user for +// a comma‑separated list of namespaces, parses the input, and assigns the +// first value to the global configuration. The loop continues until the user +// chooses to exit. func createSettingsConfiguration() { settingsOptions := []configOption{ {Option: probeDaemonSet, Help: probeDaemonSetHelp}, @@ -335,6 +404,13 @@ func createSettingsConfiguration() { } } +// getAnswer Collects a comma‑separated list of items from the user +// +// The function displays a prompt with syntax and example guidance, then reads a +// single line of text from standard input. It splits the entered string on +// commas, trims surrounding whitespace from each element, and returns the +// resulting slice of strings. If reading fails, it logs an error and returns +// nil. func getAnswer(prompt, syntax, example string) []string { fullPrompt := color.HiCyanString("%s\n", prompt) + color.CyanString("Syntax: ") + color.WhiteString("%s\n", syntax) + @@ -358,6 +434,12 @@ func getAnswer(prompt, syntax, example string) []string { return fields } +// loadNamespaces Stores selected namespaces in the configuration +// +// This routine receives a slice of namespace names, clears any previously +// stored target namespaces, and then appends each provided name as a Namespace +// struct to the global configuration list. It modifies the config in place +// without returning a value. func loadNamespaces(namespaces []string) { certsuiteConfig.TargetNameSpaces = nil for _, namespace := range namespaces { @@ -366,16 +448,36 @@ func loadNamespaces(namespaces []string) { } } +// loadPodLabels Stores user-specified pod labels for later configuration +// +// This routine clears any existing pod label settings and then assigns the +// supplied slice of strings to the global configuration structure. It is +// invoked after the user selects pod labels from an interactive prompt, +// ensuring that only the chosen labels are retained. No value is returned; the +// effect is visible through the updated configuration state. func loadPodLabels(podLabels []string) { certsuiteConfig.PodsUnderTestLabels = nil certsuiteConfig.PodsUnderTestLabels = podLabels } +// loadOperatorLabels Updates the configuration with new operator labels +// +// This function replaces any previously stored operator labels in the global +// configuration with a fresh list provided as input. It first resets the +// current label collection to an empty state and then assigns the supplied +// slice, ensuring that subsequent operations use only the latest set of labels. func loadOperatorLabels(operatorLabels []string) { certsuiteConfig.OperatorsUnderTestLabels = nil certsuiteConfig.OperatorsUnderTestLabels = operatorLabels } +// loadCRDfilters parses CRD filter strings into configuration objects +// +// The function clears the existing list of CRD filters, then iterates over each +// supplied string. Each string is split on a slash to extract a name suffix and +// a boolean flag indicating scalability; it converts the second part to a bool, +// logs an error if conversion fails, and appends a new filter structure to the +// global configuration. func loadCRDfilters(crdFilters []string) { certsuiteConfig.CrdFilters = nil for _, crdFilterStr := range crdFilters { @@ -391,6 +493,14 @@ func loadCRDfilters(crdFilters []string) { } } +// loadManagedDeployments Populates the list of deployments to be managed +// +// The function receives a slice of deployment names, clears any previously +// stored deployments in the global configuration, and then iterates over each +// name. For every entry it creates a new ManagedDeploymentsStatefulsets object +// with the name field set, appending this object to the configuration’s +// ManagedDeployments list. This prepares the configuration for subsequent +// resource generation. func loadManagedDeployments(deployments []string) { certsuiteConfig.ManagedDeployments = nil for _, deployment := range deployments { @@ -399,6 +509,13 @@ func loadManagedDeployments(deployments []string) { } } +// loadManagedStatefulSets Stores user-selected StatefulSet names for later configuration +// +// This routine clears any previously stored StatefulSet entries in the global +// configuration, then iterates over each supplied name. For every name it +// creates a lightweight structure containing that name and appends it to the +// list of managed StatefulSets maintained by the application. The function has +// no return value but updates shared state used by subsequent setup steps. func loadManagedStatefulSets(statefulSets []string) { certsuiteConfig.ManagedStatefulsets = nil for _, statefulSet := range statefulSets { @@ -407,6 +524,12 @@ func loadManagedStatefulSets(statefulSets []string) { } } +// loadAcceptedKernelTaints stores a list of accepted kernel taints in the configuration +// +// The function clears any previously stored taint entries, then iterates over +// the supplied slice. For each taint string it creates a new struct containing +// the module name and appends it to the global configuration slice. The +// resulting list is used by the tool when evaluating cluster readiness. func loadAcceptedKernelTaints(taints []string) { certsuiteConfig.AcceptedKernelTaints = nil for _, taint := range taints { @@ -415,6 +538,12 @@ func loadAcceptedKernelTaints(taints []string) { } } +// loadHelmCharts Stores specified Helm chart names to skip during configuration +// +// The function receives a slice of chart identifiers and resets the global skip +// list before adding each entry as a new configuration object. Each name is +// wrapped in a struct that represents an item to be excluded from processing. +// The resulting list is used elsewhere to avoid handling those Helm charts. func loadHelmCharts(helmCharts []string) { certsuiteConfig.SkipHelmChartList = nil for _, chart := range helmCharts { @@ -423,16 +552,36 @@ func loadHelmCharts(helmCharts []string) { } } +// loadProtocolNames stores a list of acceptable protocol names +// +// This function replaces the current collection of valid protocol identifiers +// in the configuration with a new slice supplied by the caller. It first clears +// any previously stored values to avoid residual data, then assigns the +// provided slice directly to the global configuration variable. No return value +// is produced. func loadProtocolNames(protocolNames []string) { certsuiteConfig.ValidProtocolNames = nil certsuiteConfig.ValidProtocolNames = protocolNames } +// loadServices sets the list of services to ignore +// +// The function replaces any existing ignored service entries with a new slice +// provided as input. It first clears the current configuration's ignore list, +// then assigns the supplied list directly. The resulting configuration is used +// elsewhere to skip checks for these services. func loadServices(services []string) { certsuiteConfig.ServicesIgnoreList = nil certsuiteConfig.ServicesIgnoreList = services } +// loadNonScalableDeployments parses a list of non-scalable deployments to skip scaling tests +// +// The function receives an array of strings where each entry contains a +// deployment name and namespace separated by a slash. It clears any previously +// stored entries, then splits each string into its two parts; if the format is +// invalid it logs an error and aborts. Valid pairs are converted into +// configuration objects that are appended to the global skip list. func loadNonScalableDeployments(nonScalableDeployments []string) { certsuiteConfig.SkipScalingTestDeployments = nil for _, nonScalableDeploymentStr := range nonScalableDeployments { @@ -450,6 +599,13 @@ func loadNonScalableDeployments(nonScalableDeployments []string) { } } +// loadNonScalableStatefulSets Parses a list of non-scalable StatefulSet identifiers to skip scaling tests +// +// The function takes an array of strings, each expected in the form +// "name/namespace", splits them into name and namespace components, validates +// the format, and appends the parsed information to a global configuration +// slice. If any string does not contain exactly two parts separated by a slash, +// it logs an error and aborts further processing. func loadNonScalableStatefulSets(nonScalableStatefulSets []string) { certsuiteConfig.SkipScalingTestStatefulSets = nil for _, nonScalableStatefulSetStr := range nonScalableStatefulSets { @@ -467,6 +623,12 @@ func loadNonScalableStatefulSets(nonScalableStatefulSets []string) { } } +// loadProbeDaemonSetNamespace Sets the Probe DaemonSet namespace in the configuration +// +// The function receives a list of strings and assigns the first element to the +// ProbeDaemonSetNamespace field of the shared configuration object. It assumes +// that the slice contains at least one entry and uses it directly without +// validation or conversion. func loadProbeDaemonSetNamespace(namespace []string) { certsuiteConfig.ProbeDaemonSetNamespace = namespace[0] } diff --git a/cmd/certsuite/generate/feedback/feedback.go b/cmd/certsuite/generate/feedback/feedback.go index e8c080afa..620a91a4c 100644 --- a/cmd/certsuite/generate/feedback/feedback.go +++ b/cmd/certsuite/generate/feedback/feedback.go @@ -38,6 +38,13 @@ var ( } ) +// runGenerateFeedbackJsFile Creates a JavaScript file containing feedback data +// +// The function reads a JSON file with feedback information, parses it into a +// map, formats the data with indentation, and writes it to a new JavaScript +// file prefixed by 'feedback=' in the specified output directory. It logs the +// resulting string to standard output and returns any errors encountered during +// reading, unmarshalling, or writing. func runGenerateFeedbackJsFile(_ *cobra.Command, _ []string) error { dat, err := os.ReadFile(feedbackJSONFilePath) if err != nil { @@ -69,7 +76,11 @@ func runGenerateFeedbackJsFile(_ *cobra.Command, _ []string) error { return nil } -// Execute executes the "catalog" CLI. +// NewCommand Creates a command to generate feedback.js from a JSON file +// +// It defines flags for the input JSON path and output directory, marking both +// as required. If flag validation fails, it logs a fatal error. The function +// returns the configured cobra.Command instance. func NewCommand() *cobra.Command { generateFeedbackJsFile.Flags().StringVarP( &feedbackJSONFilePath, "feedback", "f", "", diff --git a/cmd/certsuite/generate/generate.go b/cmd/certsuite/generate/generate.go index 282b099c3..c793648a0 100644 --- a/cmd/certsuite/generate/generate.go +++ b/cmd/certsuite/generate/generate.go @@ -15,6 +15,13 @@ var ( } ) +// NewCommand Builds the generate CLI command with its subcommands +// +// This function initializes a cobra.Command for the generate group and +// registers several child commands—catalog, feedback, config, and QE coverage +// reporting—by calling their NewCommand functions. It then returns the fully +// configured parent command ready to be added to the main application root. The +// returned value is a pointer to the cobra.Command instance. func NewCommand() *cobra.Command { generate.AddCommand(catalog.NewCommand()) generate.AddCommand(feedback.NewCommand()) diff --git a/cmd/certsuite/generate/qe_coverage/qe_coverage.go b/cmd/certsuite/generate/qe_coverage/qe_coverage.go index 7dc8755a8..ce41ad32e 100644 --- a/cmd/certsuite/generate/qe_coverage/qe_coverage.go +++ b/cmd/certsuite/generate/qe_coverage/qe_coverage.go @@ -14,6 +14,12 @@ const ( multiplier = 100.0 ) +// TestCoverageSummaryReport Provides a snapshot of QE coverage across test suites +// +// This struct holds overall statistics such as total test cases, those covered +// by QE, and the percentage of coverage. It also maps each suite name to its +// own TestSuiteQeCoverage record for detailed per-suite information. The data +// is used by reporting functions to display coverage metrics. type TestCoverageSummaryReport struct { CoverageByTestSuite map[string]TestSuiteQeCoverage TotalCoveragePercentage float32 @@ -21,6 +27,12 @@ type TestCoverageSummaryReport struct { TestCasesWithQe int } +// TestSuiteQeCoverage Represents coverage statistics for a test suite +// +// This structure holds counts of total test cases, how many include QE-specific +// tests, and the calculated percentage coverage. It also tracks any test cases +// that are not yet implemented. The data can be used to assess overall quality +// and identify gaps in QE integration. type TestSuiteQeCoverage struct { TestCases int TestCasesWithQe int @@ -28,6 +40,12 @@ type TestSuiteQeCoverage struct { NotImplementedTestCases []string } +// NewCommand Creates a command to report QE test coverage +// +// The function builds a new command instance that includes a persistent string +// flag named "suitename" for filtering coverage output by suite name. It +// returns this configured command so it can be added to the parent generate +// command hierarchy. func NewCommand() *cobra.Command { qeCoverageReportCmd.PersistentFlags().String("suitename", "", "Displays the remaining tests not covered by QE for the specified suite name.") @@ -58,6 +76,12 @@ var ( } ) +// showQeCoverageForTestCaseName Displays QE coverage statistics for a specified test suite +// +// The function prints the name of the test suite, total number of test cases, +// overall coverage percentage, and how many are not covered by QE. It then +// reports whether all tests have QE coverage or lists any unimplemented test +// cases in detail. func showQeCoverageForTestCaseName(suiteName string, qeCoverage TestCoverageSummaryReport) { tsCoverage := qeCoverage.CoverageByTestSuite[suiteName] @@ -75,6 +99,13 @@ func showQeCoverageForTestCaseName(suiteName string, qeCoverage TestCoverageSumm fmt.Println() } +// GetQeCoverage Calculates overall and per-suite QE coverage statistics +// +// The function iterates over a catalog of test case descriptions, counting +// total cases, those marked for QE, and noting which are not implemented. It +// aggregates these counts by test suite, computing a percentage coverage for +// each suite using a multiplier factor. Finally, it returns a summary report +// containing per-suite data, overall coverage, and total counts. func GetQeCoverage(catalog map[claim.Identifier]claim.TestCaseDescription) TestCoverageSummaryReport { totalTcs := 0 totalTcsWithQe := 0 @@ -120,6 +151,13 @@ func GetQeCoverage(catalog map[claim.Identifier]claim.TestCaseDescription) TestC } } +// showQeCoverageSummaryReport Displays a formatted report of QE coverage statistics +// +// This routine calculates overall and per-test-suite coverage by calling +// GetQeCoverage, then sorts the suite names alphabetically. It prints total +// percentages and counts, followed by a table showing each suite’s name, its +// coverage percentage, total test cases, and how many are not covered. The +// output is formatted for console readability. func showQeCoverageSummaryReport() { qeCoverage := GetQeCoverage(identifiers.Catalog) diff --git a/cmd/certsuite/info/info.go b/cmd/certsuite/info/info.go index 347823e72..1ecccbe4d 100644 --- a/cmd/certsuite/info/info.go +++ b/cmd/certsuite/info/info.go @@ -25,6 +25,14 @@ var ( lineMaxWidth = 120 ) +// showInfo Displays detailed information about selected test cases +// +// The function retrieves a list of test case identifiers based on a label +// expression, optionally listing them if the --list flag is set. If not +// listing, it fetches full descriptions for each matching test case and prints +// a formatted box containing identifier, description, remediation, exceptions, +// and best practice references. Errors are returned if no matches or retrieval +// fails. func showInfo(cmd *cobra.Command, _ []string) error { testCaseFlag, _ := cmd.Flags().GetString("test-label") listFlag, _ := cmd.Flags().GetBool("list") @@ -58,6 +66,12 @@ func showInfo(cmd *cobra.Command, _ []string) error { return nil } +// NewCommand Creates the info subcommand with a required test-label flag +// +// The function configures an information command for the CLI by adding +// persistent string and boolean flags that filter and display test case data. +// It marks the test-label flag as mandatory, printing an error to standard +// error if this fails, and then returns the configured command object. func NewCommand() *cobra.Command { infoCmd.PersistentFlags().StringP("test-label", "t", "", "The test label filter to select the test cases to show information about") infoCmd.PersistentFlags().BoolP("list", "l", false, "Show only the names of the test cases for a given test label") @@ -69,6 +83,13 @@ func NewCommand() *cobra.Command { return infoCmd } +// printTestCaseInfoBox Displays a formatted information box for a test case +// +// The function builds a bordered text block that shows the test case ID, +// description, remediation steps, exceptions, and best‑practice references. +// It uses helper functions to center or left‑align lines, color headers, and +// wrap long paragraphs to fit within the terminal width. Each section is +// separated by horizontal borders made of dashes. func printTestCaseInfoBox(testCase *claim.TestCaseDescription) { // Test case identifier border := strings.Repeat("-", lineMaxWidth+linePadding) @@ -111,6 +132,13 @@ func printTestCaseInfoBox(testCase *claim.TestCaseDescription) { fmt.Printf("\n\n") } +// printTestList Displays a formatted list of test case identifiers +// +// The function receives a slice of strings representing test IDs, then prints a +// header, each ID within a bordered box, and a footer to visually separate the +// list. It uses fixed-width formatting so that all entries align consistently +// in the terminal output. No value is returned; the output is directed to +// standard output via fmt functions. func printTestList(testIDs []string) { fmt.Println("------------------------------------------------------------") fmt.Println("| TEST CASE SELECTION |") @@ -121,6 +149,12 @@ func printTestList(testIDs []string) { fmt.Println("------------------------------------------------------------") } +// getMatchingTestIDs retrieves test case identifiers that match a label expression +// +// The function initializes a label evaluator with the provided expression, +// loads all internal check definitions, then filters those checks to return +// only IDs whose labels satisfy the evaluator. It returns a slice of matching +// IDs or an error if initialization or filtering fails. func getMatchingTestIDs(labelExpr string) ([]string, error) { if err := checksdb.InitLabelsExprEvaluator(labelExpr); err != nil { return nil, fmt.Errorf("failed to initialize a test case label evaluator, err: %v", err) @@ -134,6 +168,14 @@ func getMatchingTestIDs(labelExpr string) ([]string, error) { return testIDs, nil } +// getTestDescriptionsFromTestIDs Retrieves test case descriptions for given IDs +// +// The function receives a slice of test ID strings, iterates over each ID, and +// searches a catalog map for matching entries by comparing the identifier +// field. When a match is found, the corresponding test case description is +// appended to a result slice. After processing all input IDs, it returns the +// slice containing all matched descriptions, which may be empty if no IDs were +// found. func getTestDescriptionsFromTestIDs(testIDs []string) []claim.TestCaseDescription { var testCases []claim.TestCaseDescription for _, test := range testIDs { @@ -147,6 +189,12 @@ func getTestDescriptionsFromTestIDs(testIDs []string) []claim.TestCaseDescriptio return testCases } +// adjustLineMaxWidth Adjusts the maximum line width for output +// +// The function checks if standard input is a terminal, then retrieves the +// terminal's width. If the width is smaller than the current maximum plus +// padding, it reduces the maximum line width accordingly to fit the display. No +// value is returned; the global variable is updated in place. func adjustLineMaxWidth() { if term.IsTerminal(0) { width, _, err := term.GetSize(0) diff --git a/cmd/certsuite/main.go b/cmd/certsuite/main.go index 9f9729d9b..532a2723c 100644 --- a/cmd/certsuite/main.go +++ b/cmd/certsuite/main.go @@ -15,6 +15,13 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/version" ) +// newRootCmd Creates the top-level command for the certsuite CLI +// +// This function initializes a new root command with usage information and +// attaches subcommands such as claim, generate, check, run, info, version, and +// upload. Each subcommand is constructed by calling its own NewCommand +// function. The resulting command object is returned to be executed in the main +// entry point. func newRootCmd() *cobra.Command { rootCmd := cobra.Command{ Use: "certsuite", @@ -32,6 +39,11 @@ func newRootCmd() *cobra.Command { return &rootCmd } +// main Runs the certsuite command-line interface +// +// It creates a root command with subcommands, executes it, and exits with an +// error code if execution fails. Errors are logged before terminating the +// program. func main() { rootCmd := newRootCmd() if err := rootCmd.Execute(); err != nil { diff --git a/cmd/certsuite/pkg/claim/claim.go b/cmd/certsuite/pkg/claim/claim.go index 9bd65b57d..60cd1da46 100644 --- a/cmd/certsuite/pkg/claim/claim.go +++ b/cmd/certsuite/pkg/claim/claim.go @@ -19,17 +19,35 @@ const ( TestCaseResultFailed = "failed" ) +// TestCaseRawResult Represents the outcome of a test case +// +// This structure holds the name of a test case along with its status, such as +// passed or failed. The fields are tagged for JSON serialization but omitted +// from output. It is used to aggregate results before further processing. type TestCaseRawResult struct { Name string `json:"-name"` Status string `json:"-status"` } +// TestCaseID represents a unique identifier for a test case +// +// This struct holds the ID, suite name, and tags of a test case as strings. The +// fields are exported and annotated for JSON serialization with keys "id", +// "suite", and "tags". It is used to track and reference individual test cases +// within the claim package. type TestCaseID struct { ID string `json:"id"` Suite string `json:"suite"` Tags string `json:"tags"` } +// TestCaseResult Stores the outcome of an individual test case +// +// This structure captures metadata about a single test execution, including its +// identifier, timing, state, and any failure details. It also holds catalog +// information such as best practice references, descriptions, exception +// handling notes, and remediation steps. The fields are organized to support +// serialization for reporting and analysis of test results. type TestCaseResult struct { CapturedTestOutput string `json:"capturedTestOutput"` CatalogInfo struct { @@ -57,6 +75,12 @@ type TestCaseResult struct { // Maps a test suite name to a list of TestCaseResult type TestSuiteResults map[string]TestCaseResult +// Nodes represents information about nodes in a cluster +// +// This struct holds aggregated data for the nodes, including their hardware +// details, network plugin configuration, CSI driver status, and an overall +// summary of node health or capabilities. Each field is defined as an interface +// to allow flexible JSON unmarshalling from various sources. type Nodes struct { NodesSummary interface{} `json:"nodeSummary"` CniNetworks interface{} `json:"cniPlugins"` @@ -64,18 +88,37 @@ type Nodes struct { CsiDriver interface{} `json:"csiDriver"` } +// TestOperator Describes a Kubernetes operator to be tested +// +// This struct holds the basic identifying information for an operator, +// including its name, the namespace it runs in, and its version string. It is +// used by testing utilities to reference specific operator deployments during +// validation or cleanup operations. type TestOperator struct { Name string `json:"name"` Namespace string `json:"namespace"` Version string `json:"version"` } +// Configurations Holds test configuration data +// +// This structure stores the overall configuration for a claim test, including +// any custom settings, a list of abnormal events to be monitored, and a +// collection of operators that should run during the test. Each field is +// designed to be marshalled to or from JSON, allowing easy integration with +// external tools or configuration files. type Configurations struct { Config interface{} `json:"Config"` AbnormalEvents []interface{} `json:"AbnormalEvents"` TestOperators []TestOperator `json:"testOperators"` } +// Schema Encapsulates an entire claim record +// +// The structure holds the top‑level claim object which includes configuration +// settings, node information, test suite outcomes, and schema versioning data. +// Each field maps directly to a JSON key in the claim file, allowing easy +// serialization and deserialization of the claim contents. type Schema struct { Claim struct { Configurations `json:"configurations"` @@ -87,6 +130,13 @@ type Schema struct { } `json:"claim"` } +// CheckVersion Validates the claim file format version against a supported version +// +// The function parses the supplied version string into a semantic version +// object, then compares it to the predefined supported claim format version. If +// parsing fails or if the two versions do not match exactly, an error is +// returned describing the issue. When the versions are equal, the function +// returns nil indicating success. func CheckVersion(version string) error { claimSemVersion, err := semver.NewVersion(version) if err != nil { @@ -106,6 +156,12 @@ func CheckVersion(version string) error { return nil } +// Parse Parses a JSON claim file into a structured schema +// +// The function reads the entire contents of the specified file path, handling +// any read errors with an informative message. It then unmarshals the JSON data +// into a Schema object, returning detailed errors if parsing fails. On success +// it returns a pointer to the populated Schema and a nil error. func Parse(filePath string) (*Schema, error) { fileBytes, err := os.ReadFile(filePath) if err != nil { diff --git a/cmd/certsuite/run/run.go b/cmd/certsuite/run/run.go index 98e00ccaf..a61c781c2 100644 --- a/cmd/certsuite/run/run.go +++ b/cmd/certsuite/run/run.go @@ -23,6 +23,13 @@ var ( } ) +// NewCommand Creates the run command with all persistent flags +// +// This function builds a cobra.Command that configures numerous persistent +// options for executing the test suite, such as output location, timeout, +// configuration files, kubeconfig, server mode, logging, data collection, and +// integration with external services. It registers each flag with default +// values and help text, then returns the fully configured command instance. func NewCommand() *cobra.Command { runCmd.PersistentFlags().StringP("output-dir", "o", "results", "The directory where the output artifacts will be placed") runCmd.PersistentFlags().StringP("label-filter", "l", "none", "Label expression to filter test cases (e.g. --label-filter 'access-control && !access-control-sys-admin-capability')") @@ -56,6 +63,14 @@ func NewCommand() *cobra.Command { return runCmd } +// initTestParamsFromFlags initializes test configuration from command line flags +// +// This function reads a variety of flags provided to the CLI command and stores +// their values in a shared test parameters structure used throughout the +// application. It ensures that the output directory exists, creating it if +// necessary, and parses a timeout value with a default fallback. If any +// filesystem or parsing errors occur, an error is returned for the caller to +// handle. func initTestParamsFromFlags(cmd *cobra.Command) error { testParams := configuration.GetTestParameters() @@ -111,6 +126,13 @@ func initTestParamsFromFlags(cmd *cobra.Command) error { return nil } + +// runTestSuite Initializes test parameters and executes the suite in either server or standalone mode +// +// The function reads command flags to set up test configuration, then checks if +// a web‑server mode is requested. In server mode it starts an HTTP listener +// serving results; otherwise it runs the certification suite locally, handling +// startup, execution, shutdown, and error reporting. func runTestSuite(cmd *cobra.Command, _ []string) error { err := initTestParamsFromFlags(cmd) if err != nil { diff --git a/cmd/certsuite/upload/results_spreadsheet/drive_utils.go b/cmd/certsuite/upload/results_spreadsheet/drive_utils.go index c410f06bc..66def701e 100644 --- a/cmd/certsuite/upload/results_spreadsheet/drive_utils.go +++ b/cmd/certsuite/upload/results_spreadsheet/drive_utils.go @@ -10,6 +10,14 @@ import ( "google.golang.org/api/sheets/v4" ) +// createDriveFolder creates a new folder in Google Drive +// +// The function builds a folder metadata object with the specified name, parent +// ID, and MIME type for folders. It first checks if a folder with that name +// already exists under the given parent by querying the Drive API; if found it +// returns an error to avoid duplication. If no existing folder is detected, it +// calls the API to create the folder and returns the resulting file object or +// any creation errors. func createDriveFolder(srv *drive.Service, folderName, parentFolderID string) (*drive.File, error) { driveFolder := &drive.File{ Name: folderName, @@ -38,6 +46,13 @@ func createDriveFolder(srv *drive.Service, folderName, parentFolderID string) (* return createdFolder, nil } +// MoveSpreadSheetToFolder Moves a spreadsheet into a specified Google Drive folder +// +// This function retrieves the current parent folders of the given spreadsheet +// using the Drive service, then updates the file to add the target folder as a +// new parent while removing any existing parents. It performs these operations +// via the Drive API's Update call and logs fatal errors if any step fails. On +// success it returns nil, indicating the spreadsheet has been relocated. func MoveSpreadSheetToFolder(srv *drive.Service, folder *drive.File, spreadsheet *sheets.Spreadsheet) error { file, err := srv.Files.Get(spreadsheet.SpreadsheetId).Fields("parents").Do() if err != nil { @@ -65,6 +80,12 @@ func MoveSpreadSheetToFolder(srv *drive.Service, folder *drive.File, spreadsheet return nil } +// extractFolderIDFromURL extracts the final path segment from a URL +// +// This routine parses an input string as a URL, splits its path into +// components, and returns the last component which represents a folder +// identifier. If parsing fails it propagates the error; otherwise it provides +// the ID and no error. func extractFolderIDFromURL(u string) (string, error) { parsedURL, err := url.Parse(u) if err != nil { diff --git a/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go b/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go index 2665597f7..b03817285 100644 --- a/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go +++ b/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go @@ -36,6 +36,13 @@ var ( } ) +// NewCommand Creates a command for uploading results spreadsheets +// +// This function configures flags for the spreadsheet upload command, including +// paths to the results file, destination URL, optional OCP version, and +// credentials file. It marks the required flags and handles errors by logging +// fatal messages if flag validation fails. The configured command is then +// returned for use in the larger CLI. func NewCommand() *cobra.Command { uploadResultSpreadSheetCmd.Flags().StringVarP(&resultsFilePath, "results-file", "f", "", "Required: path to results file") uploadResultSpreadSheetCmd.Flags().StringVarP(&rootFolderURL, "dest-url", "d", "", "Required: Destination drive folder's URL") @@ -57,6 +64,12 @@ func NewCommand() *cobra.Command { return uploadResultSpreadSheetCmd } +// readCSV loads CSV file contents into a two-dimensional string slice +// +// The function opens the specified file path, reads all rows using the csv +// package, and returns them as a slice of records where each record is a slice +// of fields. It propagates any I/O or parsing errors to the caller. The file is +// closed automatically via defer before returning. func readCSV(fp string) ([][]string, error) { file, err := os.Open(fp) if err != nil { @@ -72,6 +85,11 @@ func readCSV(fp string) ([][]string, error) { return records, nil } +// CreateSheetsAndDriveServices Initializes Google Sheets and Drive services +// +// This function takes a path to credentials and uses it to create authenticated +// clients for both the Sheets and Drive APIs. It returns the two service +// instances or an error if either creation fails. func CreateSheetsAndDriveServices(credentials string) (sheetService *sheets.Service, driveService *drive.Service, err error) { ctx := context.TODO() @@ -88,6 +106,14 @@ func CreateSheetsAndDriveServices(credentials string) (sheetService *sheets.Serv return sheetSrv, driveSrv, nil } +// prepareRecordsForSpreadSheet Converts CSV rows into spreadsheet row data +// +// This routine takes a two‑dimensional string slice, representing CSV +// records, and transforms each cell into a CellData object suitable for Google +// Sheets. It trims overly long content to a predefined limit, replaces empty +// cells with a single space to preserve layout, and removes line breaks from +// text. Each processed row is wrapped in a RowData structure; the function +// returns a slice of these rows for use in sheet creation. func prepareRecordsForSpreadSheet(records [][]string) []*sheets.RowData { var rows []*sheets.RowData for _, row := range records { @@ -114,14 +140,14 @@ func prepareRecordsForSpreadSheet(records [][]string) []*sheets.RowData { return rows } -// createSingleWorkloadRawResultsSheet creates a new sheet with test case results of a single workload, -// extracted from rawResultsSheets (which may contain the results of several workloads). -// The sheet will use the same header columns as the rawResultsSheet, but will also add two extra columns: -// - "Owner/TechLead Conclusion": the partner/user is expected to add the name of the workload owner that should lead the fix -// of this test case result. -// - "Next Step Actions": the partner/user may use this column to add the follow-up actions to fix this test case result. +// createSingleWorkloadRawResultsSheet Creates a new sheet containing only the rows for a specified workload // -// Note: the caller of the function is responsible to check that the given rawResultsSheet data is not empty +// The function filters an existing raw results sheet to include only the test +// case rows that match the given workload name, adding two empty columns for +// owner/tech lead conclusion and next step actions. It retains the original +// header row from the raw sheet while inserting the new headers at the +// beginning. The resulting sheet is returned along with any error encountered +// during processing. func createSingleWorkloadRawResultsSheet(rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Sheet, error) { // Initialize sheet with the two new column headers only. filteredRows := []*sheets.RowData{{Values: []*sheets.CellData{ @@ -164,6 +190,13 @@ func createSingleWorkloadRawResultsSheet(rawResultsSheet *sheets.Sheet, workload return workloadResultsSheet, nil } +// createSingleWorkloadRawResultsSpreadSheet Creates a Google Sheets spreadsheet containing raw results for a specific workload +// +// The function builds a new sheet from the provided raw results, then creates a +// spreadsheet titled with the workload name. It applies a filter to show only +// failed or mandatory entries and moves the file into the designated Drive +// folder. Errors are returned if any step fails, and a log message confirms +// creation. func createSingleWorkloadRawResultsSpreadSheet(sheetService *sheets.Service, driveService *drive.Service, folder *drive.File, rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Spreadsheet, error) { workloadResultsSheet, err := createSingleWorkloadRawResultsSheet(rawResultsSheet, workloadName) if err != nil { @@ -195,10 +228,15 @@ func createSingleWorkloadRawResultsSpreadSheet(sheetService *sheets.Service, dri return workloadResultsSpreadsheet, nil } -// createConclusionsSheet creates a new sheet with unique workloads data extracted from rawResultsSheets. -// The sheet's columns include: -// "Category" (Telco\Non-Telco workload), "Workload Version", "OCP Version", "Workload Name" and -// "Results" containing a hyper link leading to the workload's raw results spreadsheet. +// createConclusionsSheet Creates a conclusion sheet summarizing unique workloads +// +// The function builds a new Google Sheets tab that lists each distinct workload +// from the raw results, along with its category, version, OCP release, and a +// hyperlink to a dedicated results spreadsheet. It first creates a folder for +// per‑workload sheets, then iterates over the raw data rows, extracting +// unique names and assembling row values. For every new workload it generates +// an individual results file and inserts a link; if any step fails it returns +// an error. // //nolint:funlen func createConclusionsSheet(sheetsService *sheets.Service, driveService *drive.Service, rawResultsSheet *sheets.Sheet, mainResultsFolderID string) (*sheets.Sheet, error) { @@ -288,6 +326,12 @@ func createConclusionsSheet(sheetsService *sheets.Service, driveService *drive.S return conclusionSheet, nil } +// createRawResultsSheet parses a CSV file into a Google Sheets sheet +// +// The function reads the specified CSV file, converts each row into spreadsheet +// rows while trimming overly long cell content and normalizing empty cells and +// line breaks. It builds a Sheet object with a title and frozen header row, +// then returns this sheet or an error if reading fails. func createRawResultsSheet(fp string) (*sheets.Sheet, error) { records, err := readCSV(fp) if err != nil { @@ -307,6 +351,14 @@ func createRawResultsSheet(fp string) (*sheets.Sheet, error) { return rawResultsSheet, nil } +// generateResultsSpreadSheet Creates a Google Sheets document with raw results and conclusions +// +// This routine establishes Google Sheets and Drive services, extracts the root +// folder ID from a URL, and creates a main results folder named with the OCP +// version and timestamp. It then builds a raw results sheet from a CSV file and +// a conclusions sheet that aggregates workload data, moves the new spreadsheet +// into the created folder, applies basic filtering, sorts by category, and +// prints the final URL. func generateResultsSpreadSheet() { sheetService, driveService, err := CreateSheetsAndDriveServices(credentials) if err != nil { diff --git a/cmd/certsuite/upload/results_spreadsheet/sheet_utils.go b/cmd/certsuite/upload/results_spreadsheet/sheet_utils.go index 66465dd2a..6efcccb05 100644 --- a/cmd/certsuite/upload/results_spreadsheet/sheet_utils.go +++ b/cmd/certsuite/upload/results_spreadsheet/sheet_utils.go @@ -6,6 +6,12 @@ import ( "google.golang.org/api/sheets/v4" ) +// GetHeadersFromSheet Retrieves header names from a spreadsheet sheet +// +// The function accesses the first row of the provided sheet, extracts each +// cell's string value, and returns them as a slice of strings. It assumes that +// the sheet contains at least one row with headers. The returned slice +// preserves the order of columns as they appear in the sheet. func GetHeadersFromSheet(sheet *sheets.Sheet) []string { headers := []string{} for _, val := range sheet.Data[0].RowData[0].Values { @@ -14,6 +20,13 @@ func GetHeadersFromSheet(sheet *sheets.Sheet) []string { return headers } +// GetHeadersFromValueRange extracts header names from the first row of a spreadsheet +// +// The function receives a ValueRange object containing cell values, accesses +// its first row, and converts each entry to a string using formatting logic. It +// collects these strings into a slice that represents column headers for later +// lookup operations. The returned slice is used by other utilities to map +// header names to column indices. func GetHeadersFromValueRange(sheetsValues *sheets.ValueRange) []string { headers := []string{} for _, val := range sheetsValues.Values[0] { @@ -22,6 +35,13 @@ func GetHeadersFromValueRange(sheetsValues *sheets.ValueRange) []string { return headers } +// GetHeaderIndicesByColumnNames Finds header positions for specified column names +// +// The function scans a slice of header strings to locate the index of each +// requested column name. It returns an integer slice containing the indices in +// the same order as the input names or an error if any name is missing from the +// headers. The returned indices can be used to reference columns when +// manipulating spreadsheet data. func GetHeaderIndicesByColumnNames(headers, names []string) ([]int, error) { indices := []int{} for _, name := range names { @@ -40,6 +60,11 @@ func GetHeaderIndicesByColumnNames(headers, names []string) ([]int, error) { return indices, nil } +// GetSheetIDByName Retrieves a sheet's numeric identifier by its title +// +// This function scans the list of sheets in a spreadsheet for one whose title +// matches the provided name. If found, it returns that sheet's unique ID and no +// error; otherwise it returns -1 and an error describing the missing sheet. func GetSheetIDByName(spreadsheet *sheets.Spreadsheet, name string) (int64, error) { for _, sheet := range spreadsheet.Sheets { if sheet.Properties.Title == name { @@ -49,6 +74,12 @@ func GetSheetIDByName(spreadsheet *sheets.Spreadsheet, name string) (int64, erro return -1, fmt.Errorf("there is no sheet named %s in spreadsheet %s", name, spreadsheet.SpreadsheetUrl) } +// addBasicFilterToSpreadSheet Adds a basic filter to every sheet in the spreadsheet +// +// The function iterates over each sheet in the provided spreadsheet, creating a +// request that sets a basic filter covering the entire sheet range. It then +// sends all requests as a batch update to the Google Sheets API. If the update +// succeeds it returns nil; otherwise it propagates the error. func addBasicFilterToSpreadSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet) error { requests := []*sheets.Request{} for _, sheet := range spreadsheet.Sheets { @@ -70,6 +101,13 @@ func addBasicFilterToSpreadSheet(srv *sheets.Service, spreadsheet *sheets.Spread return nil } +// addDescendingSortFilterToSheet applies a descending sort filter to a specified column in a spreadsheet sheet +// +// This routine retrieves the values of the target sheet, determines the index +// of the requested column header, obtains the sheet ID, and then constructs a +// batch update request that sorts all rows below the header in descending order +// based on that column. It returns an error if any step fails, otherwise +// completes silently. func addDescendingSortFilterToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName, colName string) error { sheetsValues, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, sheetName).Do() if err != nil { @@ -112,6 +150,13 @@ func addDescendingSortFilterToSheet(srv *sheets.Service, spreadsheet *sheets.Spr return nil } +// addFilterByFailedAndMandatoryToSheet applies a filter to show only failed mandatory tests +// +// This function retrieves the specified sheet’s data, identifies the columns +// for test state and mandatory status, then builds a request to set a basic +// filter that displays rows where the state is "failed" and the test is marked +// as "Mandatory". It executes this filter through a batch update on the +// spreadsheet. If any step fails, it returns an error describing the issue. func addFilterByFailedAndMandatoryToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName string) error { sheetsValues, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, sheetName).Do() if err != nil { diff --git a/cmd/certsuite/upload/upload.go b/cmd/certsuite/upload/upload.go index 99cdecabc..03f6ca302 100644 --- a/cmd/certsuite/upload/upload.go +++ b/cmd/certsuite/upload/upload.go @@ -12,6 +12,12 @@ var ( } ) +// NewCommand Creates the upload command group for the CLI +// +// This function constructs a cobra.Command that represents the upload feature +// of the tool. It registers subcommands, such as those handling result +// spreadsheets, by adding them to the main upload command. The resulting +// command is returned for integration into the root command hierarchy. func NewCommand() *cobra.Command { upload.AddCommand(resultsspreadsheet.NewCommand()) diff --git a/cmd/certsuite/version/version.go b/cmd/certsuite/version/version.go index 55faf9653..c437993ca 100644 --- a/cmd/certsuite/version/version.go +++ b/cmd/certsuite/version/version.go @@ -15,6 +15,12 @@ var ( } ) +// showVersion Displays the current application and claim file versions +// +// This function prints out two pieces of information: the version string for +// the Certsuite binary, which includes release and commit details, and the +// version number used for claim files. It formats both strings with newline +// separators and returns nil to indicate successful execution. func showVersion(cmd *cobra.Command, _ []string) error { fmt.Printf("Certsuite version: %s\n", versions.GitVersion()) fmt.Printf("Claim file version: %s\n", versions.ClaimFormatVersion) @@ -22,6 +28,12 @@ func showVersion(cmd *cobra.Command, _ []string) error { return nil } +// NewCommand Provides the CLI command for displaying application version +// +// This function creates and returns a cobra command configured to show the +// current version of the tool when invoked. The command is set up elsewhere in +// the package, so this function simply exposes that preconfigured command +// instance for use by the main application. func NewCommand() *cobra.Command { return versionCmd } diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..209e40f0a --- /dev/null +++ b/docs/README.md @@ -0,0 +1,95 @@ +## Code Documentation Index + +Generated by docgen. Each package has an overview and per-symbol docs. + +- [cmd/certsuite](cmd/certsuite/main.md): **main** (1 files) +- [cmd/certsuite/check](cmd/certsuite/check/check.md): **check** (1 files) +- [cmd/certsuite/check/image_cert_status](cmd/certsuite/check/image_cert_status/imagecert.md): **imagecert** (1 files) +- [cmd/certsuite/check/results](cmd/certsuite/check/results/results.md): **results** (1 files) +- [cmd/certsuite/claim](cmd/certsuite/claim/claim.md): **claim** (1 files) +- [cmd/certsuite/claim/compare](cmd/certsuite/claim/compare/compare.md): **compare** (1 files) +- [cmd/certsuite/claim/compare/configurations](cmd/certsuite/claim/compare/configurations/configurations.md): **configurations** (1 files) +- [cmd/certsuite/claim/compare/diff](cmd/certsuite/claim/compare/diff/diff.md): **diff** (1 files) +- [cmd/certsuite/claim/compare/nodes](cmd/certsuite/claim/compare/nodes/nodes.md): **nodes** (1 files) +- [cmd/certsuite/claim/compare/testcases](cmd/certsuite/claim/compare/testcases/testcases.md): **testcases** (1 files) +- [cmd/certsuite/claim/compare/versions](cmd/certsuite/claim/compare/versions/versions.md): **versions** (1 files) +- [cmd/certsuite/claim/show](cmd/certsuite/claim/show/show.md): **show** (1 files) +- [cmd/certsuite/claim/show/csv](cmd/certsuite/claim/show/csv/csv.md): **csv** (1 files) +- [cmd/certsuite/claim/show/failures](cmd/certsuite/claim/show/failures/failures.md): **failures** (2 files) +- [cmd/certsuite/generate](cmd/certsuite/generate/generate.md): **generate** (1 files) +- [cmd/certsuite/generate/catalog](cmd/certsuite/generate/catalog/catalog.md): **catalog** (1 files) +- [cmd/certsuite/generate/config](cmd/certsuite/generate/config/config.md): **config** (2 files) +- [cmd/certsuite/generate/feedback](cmd/certsuite/generate/feedback/feedback.md): **feedback** (1 files) +- [cmd/certsuite/generate/qe_coverage](cmd/certsuite/generate/qe_coverage/qecoverage.md): **qecoverage** (1 files) +- [cmd/certsuite/info](cmd/certsuite/info/info.md): **info** (1 files) +- [cmd/certsuite/pkg/claim](cmd/certsuite/pkg/claim/claim.md): **claim** (1 files) +- [cmd/certsuite/run](cmd/certsuite/run/run.md): **run** (1 files) +- [cmd/certsuite/upload](cmd/certsuite/upload/upload.md): **upload** (1 files) +- [cmd/certsuite/upload/results_spreadsheet](cmd/certsuite/upload/results_spreadsheet/resultsspreadsheet.md): **resultsspreadsheet** (4 files) +- [cmd/certsuite/version](cmd/certsuite/version/version.md): **version** (1 files) +- [internal/cli](internal/cli/cli.md): **cli** (1 files) +- [internal/clientsholder](internal/clientsholder/clientsholder.md): **clientsholder** (3 files) +- [internal/crclient](internal/crclient/crclient.md): **crclient** (1 files) +- [internal/datautil](internal/datautil/datautil.md): **datautil** (1 files) +- [internal/log](internal/log/log.md): **log** (3 files) +- [internal/results](internal/results/results.md): **results** (4 files) +- [pkg/arrayhelper](pkg/arrayhelper/arrayhelper.md): **arrayhelper** (1 files) +- [pkg/autodiscover](pkg/autodiscover/autodiscover.md): **autodiscover** (19 files) +- [pkg/certsuite](pkg/certsuite/certsuite.md): **certsuite** (1 files) +- [pkg/checksdb](pkg/checksdb/checksdb.md): **checksdb** (3 files) +- [pkg/claimhelper](pkg/claimhelper/claimhelper.md): **claimhelper** (1 files) +- [pkg/collector](pkg/collector/collector.md): **collector** (1 files) +- [pkg/compatibility](pkg/compatibility/compatibility.md): **compatibility** (1 files) +- [pkg/configuration](pkg/configuration/configuration.md): **configuration** (3 files) +- [pkg/diagnostics](pkg/diagnostics/diagnostics.md): **diagnostics** (1 files) +- [pkg/junit](pkg/junit/junit.md): **junit** (1 files) +- [pkg/labels](pkg/labels/labels.md): **labels** (1 files) +- [pkg/podhelper](pkg/podhelper/podhelper.md): **podhelper** (1 files) +- [pkg/postmortem](pkg/postmortem/postmortem.md): **postmortem** (1 files) +- [pkg/provider](pkg/provider/provider.md): **provider** (13 files) +- [pkg/scheduling](pkg/scheduling/scheduling.md): **scheduling** (1 files) +- [pkg/stringhelper](pkg/stringhelper/stringhelper.md): **stringhelper** (1 files) +- [pkg/testhelper](pkg/testhelper/testhelper.md): **testhelper** (1 files) +- [pkg/versions](pkg/versions/versions.md): **versions** (1 files) +- [tests](tests/suite.md): **suite** (1 files) +- [tests/accesscontrol](tests/accesscontrol/accesscontrol.md): **accesscontrol** (3 files) +- [tests/accesscontrol/namespace](tests/accesscontrol/namespace/namespace.md): **namespace** (1 files) +- [tests/accesscontrol/resources](tests/accesscontrol/resources/resources.md): **resources** (1 files) +- [tests/accesscontrol/securitycontextcontainer](tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.md): **securitycontextcontainer** (1 files) +- [tests/certification](tests/certification/certification.md): **certification** (2 files) +- [tests/common](tests/common/common.md): **common** (3 files) +- [tests/common/rbac](tests/common/rbac/rbac.md): **rbac** (2 files) +- [tests/identifiers](tests/identifiers/identifiers.md): **identifiers** (6 files) +- [tests/lifecycle](tests/lifecycle/lifecycle.md): **lifecycle** (2 files) +- [tests/lifecycle/ownerreference](tests/lifecycle/ownerreference/ownerreference.md): **ownerreference** (1 files) +- [tests/lifecycle/podrecreation](tests/lifecycle/podrecreation/podrecreation.md): **podrecreation** (1 files) +- [tests/lifecycle/podsets](tests/lifecycle/podsets/podsets.md): **podsets** (1 files) +- [tests/lifecycle/scaling](tests/lifecycle/scaling/scaling.md): **scaling** (4 files) +- [tests/lifecycle/tolerations](tests/lifecycle/tolerations/tolerations.md): **tolerations** (1 files) +- [tests/lifecycle/volumes](tests/lifecycle/volumes/volumes.md): **volumes** (1 files) +- [tests/manageability](tests/manageability/manageability.md): **manageability** (1 files) +- [tests/networking](tests/networking/networking.md): **networking** (2 files) +- [tests/networking/icmp](tests/networking/icmp/icmp.md): **icmp** (1 files) +- [tests/networking/netcommons](tests/networking/netcommons/netcommons.md): **netcommons** (1 files) +- [tests/networking/netutil](tests/networking/netutil/netutil.md): **netutil** (1 files) +- [tests/networking/policies](tests/networking/policies/policies.md): **policies** (1 files) +- [tests/networking/services](tests/networking/services/services.md): **services** (1 files) +- [tests/observability](tests/observability/observability.md): **observability** (2 files) +- [tests/observability/pdb](tests/observability/pdb/pdb.md): **pdb** (1 files) +- [tests/operator](tests/operator/operator.md): **operator** (3 files) +- [tests/operator/access](tests/operator/access/access.md): **access** (1 files) +- [tests/operator/catalogsource](tests/operator/catalogsource/catalogsource.md): **catalogsource** (1 files) +- [tests/operator/openapi](tests/operator/openapi/openapi.md): **openapi** (1 files) +- [tests/operator/phasecheck](tests/operator/phasecheck/phasecheck.md): **phasecheck** (1 files) +- [tests/performance](tests/performance/performance.md): **performance** (1 files) +- [tests/platform](tests/platform/platform.md): **platform** (2 files) +- [tests/platform/bootparams](tests/platform/bootparams/bootparams.md): **bootparams** (1 files) +- [tests/platform/clusteroperator](tests/platform/clusteroperator/clusteroperator.md): **clusteroperator** (1 files) +- [tests/platform/cnffsdiff](tests/platform/cnffsdiff/cnffsdiff.md): **cnffsdiff** (1 files) +- [tests/platform/hugepages](tests/platform/hugepages/hugepages.md): **hugepages** (1 files) +- [tests/platform/isredhat](tests/platform/isredhat/isredhat.md): **isredhat** (1 files) +- [tests/platform/nodetainted](tests/platform/nodetainted/nodetainted.md): **nodetainted** (1 files) +- [tests/platform/operatingsystem](tests/platform/operatingsystem/operatingsystem.md): **operatingsystem** (1 files) +- [tests/platform/sysctlconfig](tests/platform/sysctlconfig/sysctlconfig.md): **sysctlconfig** (1 files) +- [tests/preflight](tests/preflight/preflight.md): **preflight** (1 files) +- [webserver](webserver/webserver.md): **webserver** (1 files) diff --git a/docs/analysis.json b/docs/analysis.json new file mode 100644 index 000000000..9a5773ede --- /dev/null +++ b/docs/analysis.json @@ -0,0 +1,192321 @@ +[ + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite", + "name": "main", + "files": 1, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/version", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/spf13/cobra", + "os" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "main", + "qualifiedName": "main", + "exported": false, + "signature": "func()()", + "doc": "main Runs the certsuite command-line interface\n\nIt creates a root command with subcommands, executes it, and exits with an\nerror code if execution fails. Errors are logged before terminating the\nprogram.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/main.go:47", + "calls": [ + { + "name": "newRootCmd", + "kind": "function", + "source": [ + "func newRootCmd() *cobra.Command {", + "\trootCmd := cobra.Command{", + "\t\tUse: \"certsuite\",", + "\t\tShort: \"A CLI tool for the Red Hat Best Practices Test Suite for Kubernetes.\",", + "\t}", + "", + "\trootCmd.AddCommand(claim.NewCommand())", + "\trootCmd.AddCommand(generate.NewCommand())", + "\trootCmd.AddCommand(check.NewCommand())", + "\trootCmd.AddCommand(run.NewCommand())", + "\trootCmd.AddCommand(info.NewCommand())", + "\trootCmd.AddCommand(version.NewCommand())", + "\trootCmd.AddCommand(upload.NewCommand())", + "", + "\treturn \u0026rootCmd", + "}" + ] + }, + { + "name": "Execute", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "os", + "name": "Exit", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func main() {", + "\trootCmd := newRootCmd()", + "\tif err := rootCmd.Execute(); err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\tos.Exit(1)", + "\t}", + "}" + ] + }, + { + "name": "newRootCmd", + "qualifiedName": "newRootCmd", + "exported": false, + "signature": "func()(*cobra.Command)", + "doc": "newRootCmd Creates the top-level command for the certsuite CLI\n\nThis function initializes a new root command with usage information and\nattaches subcommands such as claim, generate, check, run, info, version, and\nupload. Each subcommand is constructed by calling its own NewCommand\nfunction. The resulting command object is returned to be executed in the main\nentry point.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/main.go:25", + "calls": [ + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tclaimCommand.AddCommand(compare.NewCommand())", + "\tclaimCommand.AddCommand(show.NewCommand())", + "", + "\treturn claimCommand", + "}" + ] + }, + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tgenerate.AddCommand(catalog.NewCommand())", + "\tgenerate.AddCommand(feedback.NewCommand())", + "\tgenerate.AddCommand(config.NewCommand())", + "\tgenerate.AddCommand(qecoverage.NewCommand())", + "", + "\treturn generate", + "}" + ] + }, + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tcheckCmd.AddCommand(imagecert.NewCommand())", + "\tcheckCmd.AddCommand(results.NewCommand())", + "", + "\treturn checkCmd", + "}" + ] + }, + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\trunCmd.PersistentFlags().StringP(\"output-dir\", \"o\", \"results\", \"The directory where the output artifacts will be placed\")", + "\trunCmd.PersistentFlags().StringP(\"label-filter\", \"l\", \"none\", \"Label expression to filter test cases (e.g. --label-filter 'access-control \u0026\u0026 !access-control-sys-admin-capability')\")", + "\trunCmd.PersistentFlags().String(\"timeout\", timeoutFlagDefaultvalue.String(), \"Time allowed for the test suite execution to complete (e.g. --timeout 30m or -timeout 1h30m)\")", + "\trunCmd.PersistentFlags().StringP(\"config-file\", \"c\", \"config/certsuite_config.yml\", \"The certsuite configuration file\")", + "\trunCmd.PersistentFlags().StringP(\"kubeconfig\", \"k\", \"\", \"The target cluster's Kubeconfig file\")", + "\trunCmd.PersistentFlags().Bool(\"server-mode\", false, \"Run the certsuite in web server mode\")", + "\trunCmd.PersistentFlags().Bool(\"omit-artifacts-zip-file\", false, \"Prevents the creation of a zip file with the result artifacts\")", + "\trunCmd.PersistentFlags().String(\"log-level\", \"debug\", \"Sets the log level\")", + "\trunCmd.PersistentFlags().String(\"offline-db\", \"\", \"Set the location of an offline DB to check the certification status of for container images, operators and helm charts\")", + "\trunCmd.PersistentFlags().String(\"preflight-dockerconfig\", \"\", \"Set the dockerconfig file to be used by the Preflight test suite\")", + "\trunCmd.PersistentFlags().Bool(\"intrusive\", true, \"Run intrusive tests that may disrupt the test environment\")", + "\trunCmd.PersistentFlags().Bool(\"allow-preflight-insecure\", false, \"Allow insecure connections in the Preflight test suite\")", + "\trunCmd.PersistentFlags().Bool(\"include-web-files\", false, \"Save web files in the configured output folder\")", + "\trunCmd.PersistentFlags().Bool(\"enable-data-collection\", false, \"Allow sending test results to an external data collector\")", + "\trunCmd.PersistentFlags().Bool(\"create-xml-junit-file\", false, \"Create a JUnit file with the test results\")", + "\trunCmd.PersistentFlags().String(\"certsuite-probe-image\", \"quay.io/redhat-best-practices-for-k8s/certsuite-probe:v0.0.25\", \"Certsuite probe image\")", + "\trunCmd.PersistentFlags().String(\"daemonset-cpu-req\", \"100m\", \"CPU request for the probe daemonset container\")", + "\trunCmd.PersistentFlags().String(\"daemonset-cpu-lim\", \"100m\", \"CPU limit for the probe daemonset container\")", + "\trunCmd.PersistentFlags().String(\"daemonset-mem-req\", \"100M\", \"Memory request for the probe daemonset container\")", + "\trunCmd.PersistentFlags().String(\"daemonset-mem-lim\", \"100M\", \"Memory limit for the probe daemonset container\")", + "\trunCmd.PersistentFlags().Bool(\"sanitize-claim\", false, \"Sanitize the claim.json file before sending it to the collector\")", + "\t// Include non-Running pods during autodiscovery when enabled (default false)", + "\trunCmd.PersistentFlags().Bool(\"allow-non-running\", false, \"Include non-Running pods during autodiscovery phase\")", + "\trunCmd.PersistentFlags().String(\"connect-api-key\", \"\", \"API Key for Red Hat Connect portal\")", + "\trunCmd.PersistentFlags().String(\"connect-project-id\", \"\", \"Project ID for Red Hat Connect portal\")", + "\trunCmd.PersistentFlags().String(\"connect-api-base-url\", \"\", \"Base URL for Red Hat Connect API\")", + "\trunCmd.PersistentFlags().String(\"connect-api-proxy-url\", \"\", \"Proxy URL for Red Hat Connect API\")", + "\trunCmd.PersistentFlags().String(\"connect-api-proxy-port\", \"\", \"Proxy port for Red Hat Connect API\")", + "", + "\treturn runCmd", + "}" + ] + }, + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tinfoCmd.PersistentFlags().StringP(\"test-label\", \"t\", \"\", \"The test label filter to select the test cases to show information about\")", + "\tinfoCmd.PersistentFlags().BoolP(\"list\", \"l\", false, \"Show only the names of the test cases for a given test label\")", + "\terr := infoCmd.MarkPersistentFlagRequired(\"test-label\")", + "\tif err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not mark persistent flag \\\"test-case\\\" as required, err: %v\", err)", + "\t\treturn nil", + "\t}", + "\treturn infoCmd", + "}" + ] + }, + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/version", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\treturn versionCmd", + "}" + ] + }, + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tupload.AddCommand(resultsspreadsheet.NewCommand())", + "", + "\treturn upload", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite", + "name": "main", + "kind": "function", + "source": [ + "func main() {", + "\trootCmd := newRootCmd()", + "\tif err := rootCmd.Execute(); err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\tos.Exit(1)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func newRootCmd() *cobra.Command {", + "\trootCmd := cobra.Command{", + "\t\tUse: \"certsuite\",", + "\t\tShort: \"A CLI tool for the Red Hat Best Practices Test Suite for Kubernetes.\",", + "\t}", + "", + "\trootCmd.AddCommand(claim.NewCommand())", + "\trootCmd.AddCommand(generate.NewCommand())", + "\trootCmd.AddCommand(check.NewCommand())", + "\trootCmd.AddCommand(run.NewCommand())", + "\trootCmd.AddCommand(info.NewCommand())", + "\trootCmd.AddCommand(version.NewCommand())", + "\trootCmd.AddCommand(upload.NewCommand())", + "", + "\treturn \u0026rootCmd", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check", + "name": "check", + "files": 1, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/image_cert_status", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/results", + "github.com/spf13/cobra" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates a check command that aggregates image certification and result verification actions\n\nThis function builds a new Cobra command for the tool’s check\nfunctionality. It registers two child commands: one to verify image\ncertificates and another to validate test results against expected outputs or\nlogs. The resulting command is returned for inclusion in the main CLI\nhierarchy.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/check.go:23", + "calls": [ + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/image_cert_status", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tcheckImageCertStatusCmd.PersistentFlags().String(\"name\", \"\", \"name of the image to verify\")", + "\tcheckImageCertStatusCmd.PersistentFlags().String(\"registry\", \"\", \"registry where the image is stored\")", + "\tcheckImageCertStatusCmd.PersistentFlags().String(\"tag\", \"latest\", \"image tag to be fetched\")", + "\tcheckImageCertStatusCmd.PersistentFlags().String(\"digest\", \"\", \"digest of the image\")", + "\tcheckImageCertStatusCmd.PersistentFlags().String(\"offline-db\", \"\", \"path to the offline db (for disconnected environments)\")", + "", + "\tcheckImageCertStatusCmd.MarkFlagsRequiredTogether(\"name\", \"registry\")", + "\tcheckImageCertStatusCmd.MarkFlagsMutuallyExclusive(\"name\", \"digest\")", + "", + "\treturn checkImageCertStatusCmd", + "}" + ] + }, + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/results", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tcheckResultsCmd.PersistentFlags().String(\"template\", \"expected_results.yaml\", \"reference YAML template with the expected results\")", + "\tcheckResultsCmd.PersistentFlags().String(\"log-file\", \"certsuite.log\", \"log file of the Certsuite execution\")", + "\tcheckResultsCmd.PersistentFlags().Bool(\"generate-template\", false, \"generate a reference YAML template from the log file\")", + "", + "\tcheckResultsCmd.MarkFlagsMutuallyExclusive(\"template\", \"generate-template\")", + "", + "\treturn checkResultsCmd", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite", + "name": "newRootCmd", + "kind": "function", + "source": [ + "func newRootCmd() *cobra.Command {", + "\trootCmd := cobra.Command{", + "\t\tUse: \"certsuite\",", + "\t\tShort: \"A CLI tool for the Red Hat Best Practices Test Suite for Kubernetes.\",", + "\t}", + "", + "\trootCmd.AddCommand(claim.NewCommand())", + "\trootCmd.AddCommand(generate.NewCommand())", + "\trootCmd.AddCommand(check.NewCommand())", + "\trootCmd.AddCommand(run.NewCommand())", + "\trootCmd.AddCommand(info.NewCommand())", + "\trootCmd.AddCommand(version.NewCommand())", + "\trootCmd.AddCommand(upload.NewCommand())", + "", + "\treturn \u0026rootCmd", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tcheckCmd.AddCommand(imagecert.NewCommand())", + "\tcheckCmd.AddCommand(results.NewCommand())", + "", + "\treturn checkCmd", + "}" + ] + } + ], + "globals": [ + { + "name": "checkCmd", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/check.go:10" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/image_cert_status", + "name": "imagecert", + "files": 1, + "imports": [ + "fmt", + "github.com/fatih/color", + "github.com/redhat-best-practices-for-k8s/oct/pkg/certdb", + "github.com/spf13/cobra" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand configures and returns the image certificate status command\n\nThis function sets up persistent flags for specifying an image name,\nregistry, tag, digest, and an optional offline database path. It enforces\nthat a name and registry must be provided together while ensuring the name\nand digest cannot both be set at once. Finally, it returns the fully\nconfigured command object.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/image_cert_status/image_cert_status.go:82", + "calls": [ + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "MarkFlagsRequiredTogether", + "kind": "function" + }, + { + "name": "MarkFlagsMutuallyExclusive", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tcheckCmd.AddCommand(imagecert.NewCommand())", + "\tcheckCmd.AddCommand(results.NewCommand())", + "", + "\treturn checkCmd", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tcheckImageCertStatusCmd.PersistentFlags().String(\"name\", \"\", \"name of the image to verify\")", + "\tcheckImageCertStatusCmd.PersistentFlags().String(\"registry\", \"\", \"registry where the image is stored\")", + "\tcheckImageCertStatusCmd.PersistentFlags().String(\"tag\", \"latest\", \"image tag to be fetched\")", + "\tcheckImageCertStatusCmd.PersistentFlags().String(\"digest\", \"\", \"digest of the image\")", + "\tcheckImageCertStatusCmd.PersistentFlags().String(\"offline-db\", \"\", \"path to the offline db (for disconnected environments)\")", + "", + "\tcheckImageCertStatusCmd.MarkFlagsRequiredTogether(\"name\", \"registry\")", + "\tcheckImageCertStatusCmd.MarkFlagsMutuallyExclusive(\"name\", \"digest\")", + "", + "\treturn checkImageCertStatusCmd", + "}" + ] + }, + { + "name": "checkImageCertStatus", + "qualifiedName": "checkImageCertStatus", + "exported": false, + "signature": "func(*cobra.Command, []string)(error)", + "doc": "checkImageCertStatus checks whether a container image is certified\n\nThe function reads command-line flags for an image name, registry, tag, or\ndigest, then uses a validator from the certdb package to determine\ncertification status. It prints formatted information about the selected\nimage and outputs a colored result indicating success or failure. Errors are\nreturned if required parameters are missing or if the validator cannot be\nobtained.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/image_cert_status/image_cert_status.go:41", + "calls": [ + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/oct/pkg/certdb", + "name": "GetValidator", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "IsContainerCertified", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/fatih/color", + "name": "GreenString", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/fatih/color", + "name": "RedString", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func checkImageCertStatus(cmd *cobra.Command, _ []string) error {", + "\timageName, _ := cmd.Flags().GetString(\"name\")", + "\timageRegistry, _ := cmd.Flags().GetString(\"registry\")", + "\timageTag, _ := cmd.Flags().GetString(\"tag\")", + "\timageDigest, _ := cmd.Flags().GetString(\"digest\")", + "\tofflineDB, _ := cmd.Flags().GetString(\"offline-db\")", + "", + "\tvalidator, err := certdb.GetValidator(offlineDB)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get a validator for container images, error: %v\", err)", + "\t}", + "", + "\tswitch {", + "\tcase imageName != \"\":", + "\t\tfmt.Println(\"**********************************************\")", + "\t\tfmt.Printf(\"Image name: %s\\nImage registry: %s\\nImage tag: %s\\n\", imageName, imageRegistry, imageTag)", + "\t\tfmt.Println(\"**********************************************\")", + "\tcase imageDigest != \"\":", + "\t\tfmt.Println(\"**************************************************************************************\")", + "\t\tfmt.Printf(\"Image digest: %s\\n\", imageDigest)", + "\t\tfmt.Println(\"**************************************************************************************\")", + "\tdefault:", + "\t\treturn fmt.Errorf(\"either an image name or an image digest must be provided\")", + "\t}", + "", + "\tif validator.IsContainerCertified(imageRegistry, imageName, imageTag, imageDigest) {", + "\t\tfmt.Printf(\"Result: %s\\n\", color.GreenString(\"Image certified\"))", + "\t} else {", + "\t\tfmt.Printf(\"Result: %s\\n\", color.RedString(\"Image not certified\"))", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "globals": [ + { + "name": "checkImageCertStatusCmd", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/image_cert_status/image_cert_status.go:27" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/results", + "name": "results", + "files": 1, + "imports": [ + "bufio", + "bytes", + "fmt", + "github.com/spf13/cobra", + "gopkg.in/yaml.v3", + "os", + "regexp", + "strings" + ], + "structs": [ + { + "name": "TestCaseList", + "exported": true, + "doc": "TestCaseList Stores the names of test cases categorized by outcome\n\nThis structure keeps three slices, each holding strings that represent test\ncase identifiers. The Pass slice lists all tests that succeeded, Fail\ncontains those that failed, and Skip holds tests that were not executed. It\nis used to report results in a concise format.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:35", + "fields": { + "Fail": "[]string", + "Pass": "[]string", + "Skip": "[]string" + }, + "methodNames": null, + "source": [ + "type TestCaseList struct {", + "\tPass []string `yaml:\"pass\"`", + "\tFail []string `yaml:\"fail\"`", + "\tSkip []string `yaml:\"skip\"`", + "}" + ] + }, + { + "name": "TestResults", + "exported": true, + "doc": "TestResults Holds a collection of test case results\n\nThis structure contains a slice of individual test case outcomes, allowing\nthe program to group related results together. The embedded field\nautomatically inherits all fields and methods from the underlying test case\nlist type, enabling direct access to the collection’s elements. It serves\nas a container for serializing or reporting aggregated test data.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:48", + "fields": { + "embedded:TestCaseList": "TestCaseList" + }, + "methodNames": null, + "source": [ + "type TestResults struct {", + "\tTestCaseList `yaml:\"testCases\"`", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates a command for checking test results against expected templates\n\nIt defines persistent flags for specifying the template file, log file, and\nan option to generate a new template from logs. The flags are mutually\nexclusive to avoid conflicting inputs. Finally, it returns the configured\ncommand instance.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:261", + "calls": [ + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "Bool", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "MarkFlagsMutuallyExclusive", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tcheckCmd.AddCommand(imagecert.NewCommand())", + "\tcheckCmd.AddCommand(results.NewCommand())", + "", + "\treturn checkCmd", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tcheckResultsCmd.PersistentFlags().String(\"template\", \"expected_results.yaml\", \"reference YAML template with the expected results\")", + "\tcheckResultsCmd.PersistentFlags().String(\"log-file\", \"certsuite.log\", \"log file of the Certsuite execution\")", + "\tcheckResultsCmd.PersistentFlags().Bool(\"generate-template\", false, \"generate a reference YAML template from the log file\")", + "", + "\tcheckResultsCmd.MarkFlagsMutuallyExclusive(\"template\", \"generate-template\")", + "", + "\treturn checkResultsCmd", + "}" + ] + }, + { + "name": "checkResults", + "qualifiedName": "checkResults", + "exported": false, + "signature": "func(*cobra.Command, []string)(error)", + "doc": "checkResults compares recorded test outcomes against a reference template\n\nThe function reads actual test results from a log file, optionally generates\na YAML template of those results, or loads expected results from an existing\ntemplate. It then checks each test case for mismatches between actual and\nexpected values, reporting any discrepancies in a formatted table and\nterminating the program if differences are found. If all results match, it\nprints a success message.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:66", + "calls": [ + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetBool", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "getTestResultsDB", + "kind": "function", + "source": [ + "func getTestResultsDB(logFileName string) (map[string]string, error) {", + "\tresultsDB := make(map[string]string)", + "", + "\tfile, err := os.Open(logFileName)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not open file %q, err: %v\", logFileName, err)", + "\t}", + "\tdefer file.Close()", + "", + "\tre := regexp.MustCompile(`.*\\[(.*?)\\]\\s+Recording result\\s+\"(.*?)\"`)", + "", + "\tscanner := bufio.NewScanner(file)", + "\t// Fix for bufio.Scanner: token too long", + "\tconst kBytes64 = 64 * 1024", + "\tconst kBytes1024 = 1024 * 1024", + "\tbuf := make([]byte, 0, kBytes64)", + "\tscanner.Buffer(buf, kBytes1024)", + "\tfor scanner.Scan() {", + "\t\tline := scanner.Text()", + "\t\tmatch := re.FindStringSubmatch(line)", + "\t\tif match != nil {", + "\t\t\ttestCaseName := match[1]", + "\t\t\tresult := match[2]", + "\t\t\tresultsDB[testCaseName] = result", + "\t\t}", + "\t}", + "", + "\tif err := scanner.Err(); err != nil {", + "\t\treturn nil, fmt.Errorf(\"error scanning file, err: %v\", err)", + "\t}", + "", + "\treturn resultsDB, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "generateTemplateFile", + "kind": "function", + "source": [ + "func generateTemplateFile(resultsDB map[string]string) error {", + "\tvar resultsTemplate TestResults", + "\tfor testCase, result := range resultsDB {", + "\t\tswitch result {", + "\t\tcase resultPass:", + "\t\t\tresultsTemplate.Pass = append(resultsTemplate.Pass, testCase)", + "\t\tcase resultSkip:", + "\t\t\tresultsTemplate.Skip = append(resultsTemplate.Skip, testCase)", + "\t\tcase resultFail:", + "\t\t\tresultsTemplate.Fail = append(resultsTemplate.Fail, testCase)", + "\t\tdefault:", + "\t\t\treturn fmt.Errorf(\"unknown test case result %q\", result)", + "\t\t}", + "\t}", + "", + "\tconst twoSpaces = 2", + "\tvar yamlTemplate bytes.Buffer", + "\tyamlEncoder := yaml.NewEncoder(\u0026yamlTemplate)", + "\tyamlEncoder.SetIndent(twoSpaces)", + "\terr := yamlEncoder.Encode(\u0026resultsTemplate)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not encode template yaml, err: %v\", err)", + "\t}", + "", + "\terr = os.WriteFile(TestResultsTemplateFileName, yamlTemplate.Bytes(), TestResultsTemplateFilePermissions)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not write to file %q: %v\", TestResultsTemplateFileName, err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "getExpectedTestResults", + "kind": "function", + "source": [ + "func getExpectedTestResults(templateFileName string) (map[string]string, error) {", + "\ttemplateFile, err := os.ReadFile(templateFileName)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not open template file %q, err: %v\", templateFileName, err)", + "\t}", + "", + "\tvar expectedTestResultsList TestResults", + "\terr = yaml.Unmarshal(templateFile, \u0026expectedTestResultsList)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not parse the template YAML file, err: %v\", err)", + "\t}", + "", + "\texpectedTestResults := make(map[string]string)", + "\tfor _, testCase := range expectedTestResultsList.Pass {", + "\t\texpectedTestResults[testCase] = resultPass", + "\t}", + "\tfor _, testCase := range expectedTestResultsList.Skip {", + "\t\texpectedTestResults[testCase] = resultSkip", + "\t}", + "\tfor _, testCase := range expectedTestResultsList.Fail {", + "\t\texpectedTestResults[testCase] = resultFail", + "\t}", + "", + "\treturn expectedTestResults, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "name": "printTestResultsMismatch", + "kind": "function", + "source": [ + "func printTestResultsMismatch(mismatchedTestCases []string, actualResults, expectedResults map[string]string) {", + "\tfmt.Printf(\"\\n\")", + "\tfmt.Println(strings.Repeat(\"-\", 96)) //nolint:mnd // table line", + "\tfmt.Printf(\"| %-58s %-19s %s |\\n\", \"TEST_CASE\", \"EXPECTED_RESULT\", \"ACTUAL_RESULT\")", + "\tfmt.Println(strings.Repeat(\"-\", 96)) //nolint:mnd // table line", + "\tfor _, testCase := range mismatchedTestCases {", + "\t\texpectedResult, exist := expectedResults[testCase]", + "\t\tif !exist {", + "\t\t\texpectedResult = resultMiss", + "\t\t}", + "\t\tactualResult, exist := actualResults[testCase]", + "\t\tif !exist {", + "\t\t\tactualResult = resultMiss", + "\t\t}", + "\t\tfmt.Printf(\"| %-54s %19s %17s |\\n\", testCase, expectedResult, actualResult)", + "\t\tfmt.Println(strings.Repeat(\"-\", 96)) //nolint:mnd // table line", + "\t}", + "}" + ] + }, + { + "pkgPath": "os", + "name": "Exit", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func checkResults(cmd *cobra.Command, _ []string) error {", + "\ttemplateFileName, _ := cmd.Flags().GetString(\"template\")", + "\tgenerateTemplate, _ := cmd.Flags().GetBool(\"generate-template\")", + "\tlogFileName, _ := cmd.Flags().GetString(\"log-file\")", + "", + "\t// Build a database with the test results from the log file", + "\tactualTestResults, err := getTestResultsDB(logFileName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the test results DB, err: %v\", err)", + "\t}", + "", + "\t// Generate a reference YAML template with the test results if required", + "\tif generateTemplate {", + "\t\treturn generateTemplateFile(actualTestResults)", + "\t}", + "", + "\t// Get the expected test results from the reference YAML template", + "\texpectedTestResults, err := getExpectedTestResults(templateFileName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the expected test results, err: %v\", err)", + "\t}", + "", + "\t// Match the results between the test results DB and the reference YAML template", + "\tvar mismatchedTestCases []string", + "\tfor testCase, testResult := range actualTestResults {", + "\t\tif testResult != expectedTestResults[testCase] {", + "\t\t\tmismatchedTestCases = append(mismatchedTestCases, testCase)", + "\t\t}", + "\t}", + "", + "\t// Verify that there are no unmatched expected test results", + "\tfor testCase := range expectedTestResults {", + "\t\tif _, exists := actualTestResults[testCase]; !exists {", + "\t\t\tmismatchedTestCases = append(mismatchedTestCases, testCase)", + "\t\t}", + "\t}", + "", + "\tif len(mismatchedTestCases) \u003e 0 {", + "\t\tfmt.Println(\"Expected results DO NOT match actual results\")", + "\t\tprintTestResultsMismatch(mismatchedTestCases, actualTestResults, expectedTestResults)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tfmt.Println(\"Expected results and actual results match\")", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "generateTemplateFile", + "qualifiedName": "generateTemplateFile", + "exported": false, + "signature": "func(map[string]string)(error)", + "doc": "generateTemplateFile Creates a YAML template file summarizing test case outcomes\n\nThis function takes a map of test cases to result strings and builds a\nstructured template containing lists for passed, skipped, and failed tests.\nIt encodes the structure into YAML with two-space indentation and writes it\nto a predefined file path with specific permissions. If an unknown result\nvalue is encountered or any I/O operation fails, it returns an error\ndetailing the issue.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:223", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "NewEncoder", + "kind": "function" + }, + { + "name": "SetIndent", + "kind": "function" + }, + { + "name": "Encode", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "WriteFile", + "kind": "function" + }, + { + "name": "Bytes", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/results", + "name": "checkResults", + "kind": "function", + "source": [ + "func checkResults(cmd *cobra.Command, _ []string) error {", + "\ttemplateFileName, _ := cmd.Flags().GetString(\"template\")", + "\tgenerateTemplate, _ := cmd.Flags().GetBool(\"generate-template\")", + "\tlogFileName, _ := cmd.Flags().GetString(\"log-file\")", + "", + "\t// Build a database with the test results from the log file", + "\tactualTestResults, err := getTestResultsDB(logFileName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the test results DB, err: %v\", err)", + "\t}", + "", + "\t// Generate a reference YAML template with the test results if required", + "\tif generateTemplate {", + "\t\treturn generateTemplateFile(actualTestResults)", + "\t}", + "", + "\t// Get the expected test results from the reference YAML template", + "\texpectedTestResults, err := getExpectedTestResults(templateFileName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the expected test results, err: %v\", err)", + "\t}", + "", + "\t// Match the results between the test results DB and the reference YAML template", + "\tvar mismatchedTestCases []string", + "\tfor testCase, testResult := range actualTestResults {", + "\t\tif testResult != expectedTestResults[testCase] {", + "\t\t\tmismatchedTestCases = append(mismatchedTestCases, testCase)", + "\t\t}", + "\t}", + "", + "\t// Verify that there are no unmatched expected test results", + "\tfor testCase := range expectedTestResults {", + "\t\tif _, exists := actualTestResults[testCase]; !exists {", + "\t\t\tmismatchedTestCases = append(mismatchedTestCases, testCase)", + "\t\t}", + "\t}", + "", + "\tif len(mismatchedTestCases) \u003e 0 {", + "\t\tfmt.Println(\"Expected results DO NOT match actual results\")", + "\t\tprintTestResultsMismatch(mismatchedTestCases, actualTestResults, expectedTestResults)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tfmt.Println(\"Expected results and actual results match\")", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func generateTemplateFile(resultsDB map[string]string) error {", + "\tvar resultsTemplate TestResults", + "\tfor testCase, result := range resultsDB {", + "\t\tswitch result {", + "\t\tcase resultPass:", + "\t\t\tresultsTemplate.Pass = append(resultsTemplate.Pass, testCase)", + "\t\tcase resultSkip:", + "\t\t\tresultsTemplate.Skip = append(resultsTemplate.Skip, testCase)", + "\t\tcase resultFail:", + "\t\t\tresultsTemplate.Fail = append(resultsTemplate.Fail, testCase)", + "\t\tdefault:", + "\t\t\treturn fmt.Errorf(\"unknown test case result %q\", result)", + "\t\t}", + "\t}", + "", + "\tconst twoSpaces = 2", + "\tvar yamlTemplate bytes.Buffer", + "\tyamlEncoder := yaml.NewEncoder(\u0026yamlTemplate)", + "\tyamlEncoder.SetIndent(twoSpaces)", + "\terr := yamlEncoder.Encode(\u0026resultsTemplate)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not encode template yaml, err: %v\", err)", + "\t}", + "", + "\terr = os.WriteFile(TestResultsTemplateFileName, yamlTemplate.Bytes(), TestResultsTemplateFilePermissions)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not write to file %q: %v\", TestResultsTemplateFileName, err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "getExpectedTestResults", + "qualifiedName": "getExpectedTestResults", + "exported": false, + "signature": "func(string)(map[string]string, error)", + "doc": "getExpectedTestResults loads expected test outcomes from a YAML template\n\nThe function reads a specified file, decodes its YAML content into a\nstructured list of test cases classified as pass, skip, or fail, then builds\na map associating each case with the corresponding result string. It returns\nthis map along with any error that occurs during file reading or\nunmarshalling.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:162", + "calls": [ + { + "pkgPath": "os", + "name": "ReadFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/results", + "name": "checkResults", + "kind": "function", + "source": [ + "func checkResults(cmd *cobra.Command, _ []string) error {", + "\ttemplateFileName, _ := cmd.Flags().GetString(\"template\")", + "\tgenerateTemplate, _ := cmd.Flags().GetBool(\"generate-template\")", + "\tlogFileName, _ := cmd.Flags().GetString(\"log-file\")", + "", + "\t// Build a database with the test results from the log file", + "\tactualTestResults, err := getTestResultsDB(logFileName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the test results DB, err: %v\", err)", + "\t}", + "", + "\t// Generate a reference YAML template with the test results if required", + "\tif generateTemplate {", + "\t\treturn generateTemplateFile(actualTestResults)", + "\t}", + "", + "\t// Get the expected test results from the reference YAML template", + "\texpectedTestResults, err := getExpectedTestResults(templateFileName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the expected test results, err: %v\", err)", + "\t}", + "", + "\t// Match the results between the test results DB and the reference YAML template", + "\tvar mismatchedTestCases []string", + "\tfor testCase, testResult := range actualTestResults {", + "\t\tif testResult != expectedTestResults[testCase] {", + "\t\t\tmismatchedTestCases = append(mismatchedTestCases, testCase)", + "\t\t}", + "\t}", + "", + "\t// Verify that there are no unmatched expected test results", + "\tfor testCase := range expectedTestResults {", + "\t\tif _, exists := actualTestResults[testCase]; !exists {", + "\t\t\tmismatchedTestCases = append(mismatchedTestCases, testCase)", + "\t\t}", + "\t}", + "", + "\tif len(mismatchedTestCases) \u003e 0 {", + "\t\tfmt.Println(\"Expected results DO NOT match actual results\")", + "\t\tprintTestResultsMismatch(mismatchedTestCases, actualTestResults, expectedTestResults)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tfmt.Println(\"Expected results and actual results match\")", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getExpectedTestResults(templateFileName string) (map[string]string, error) {", + "\ttemplateFile, err := os.ReadFile(templateFileName)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not open template file %q, err: %v\", templateFileName, err)", + "\t}", + "", + "\tvar expectedTestResultsList TestResults", + "\terr = yaml.Unmarshal(templateFile, \u0026expectedTestResultsList)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not parse the template YAML file, err: %v\", err)", + "\t}", + "", + "\texpectedTestResults := make(map[string]string)", + "\tfor _, testCase := range expectedTestResultsList.Pass {", + "\t\texpectedTestResults[testCase] = resultPass", + "\t}", + "\tfor _, testCase := range expectedTestResultsList.Skip {", + "\t\texpectedTestResults[testCase] = resultSkip", + "\t}", + "\tfor _, testCase := range expectedTestResultsList.Fail {", + "\t\texpectedTestResults[testCase] = resultFail", + "\t}", + "", + "\treturn expectedTestResults, nil", + "}" + ] + }, + { + "name": "getTestResultsDB", + "qualifiedName": "getTestResultsDB", + "exported": false, + "signature": "func(string)(map[string]string, error)", + "doc": "getTestResultsDB Parses a log file to build a test result map\n\nThe function opens the specified log file, reads it line by line, and\nextracts test case names and their recorded results using a regular\nexpression. Each matched pair is stored in a map where the key is the test\ncase name and the value is its result string. It returns this map along with\nan error if any step fails.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:121", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Open", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "pkgPath": "bufio", + "name": "NewScanner", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "Buffer", + "kind": "function" + }, + { + "name": "Scan", + "kind": "function" + }, + { + "name": "Text", + "kind": "function" + }, + { + "name": "FindStringSubmatch", + "kind": "function" + }, + { + "name": "Err", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/results", + "name": "checkResults", + "kind": "function", + "source": [ + "func checkResults(cmd *cobra.Command, _ []string) error {", + "\ttemplateFileName, _ := cmd.Flags().GetString(\"template\")", + "\tgenerateTemplate, _ := cmd.Flags().GetBool(\"generate-template\")", + "\tlogFileName, _ := cmd.Flags().GetString(\"log-file\")", + "", + "\t// Build a database with the test results from the log file", + "\tactualTestResults, err := getTestResultsDB(logFileName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the test results DB, err: %v\", err)", + "\t}", + "", + "\t// Generate a reference YAML template with the test results if required", + "\tif generateTemplate {", + "\t\treturn generateTemplateFile(actualTestResults)", + "\t}", + "", + "\t// Get the expected test results from the reference YAML template", + "\texpectedTestResults, err := getExpectedTestResults(templateFileName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the expected test results, err: %v\", err)", + "\t}", + "", + "\t// Match the results between the test results DB and the reference YAML template", + "\tvar mismatchedTestCases []string", + "\tfor testCase, testResult := range actualTestResults {", + "\t\tif testResult != expectedTestResults[testCase] {", + "\t\t\tmismatchedTestCases = append(mismatchedTestCases, testCase)", + "\t\t}", + "\t}", + "", + "\t// Verify that there are no unmatched expected test results", + "\tfor testCase := range expectedTestResults {", + "\t\tif _, exists := actualTestResults[testCase]; !exists {", + "\t\t\tmismatchedTestCases = append(mismatchedTestCases, testCase)", + "\t\t}", + "\t}", + "", + "\tif len(mismatchedTestCases) \u003e 0 {", + "\t\tfmt.Println(\"Expected results DO NOT match actual results\")", + "\t\tprintTestResultsMismatch(mismatchedTestCases, actualTestResults, expectedTestResults)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tfmt.Println(\"Expected results and actual results match\")", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getTestResultsDB(logFileName string) (map[string]string, error) {", + "\tresultsDB := make(map[string]string)", + "", + "\tfile, err := os.Open(logFileName)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not open file %q, err: %v\", logFileName, err)", + "\t}", + "\tdefer file.Close()", + "", + "\tre := regexp.MustCompile(`.*\\[(.*?)\\]\\s+Recording result\\s+\"(.*?)\"`)", + "", + "\tscanner := bufio.NewScanner(file)", + "\t// Fix for bufio.Scanner: token too long", + "\tconst kBytes64 = 64 * 1024", + "\tconst kBytes1024 = 1024 * 1024", + "\tbuf := make([]byte, 0, kBytes64)", + "\tscanner.Buffer(buf, kBytes1024)", + "\tfor scanner.Scan() {", + "\t\tline := scanner.Text()", + "\t\tmatch := re.FindStringSubmatch(line)", + "\t\tif match != nil {", + "\t\t\ttestCaseName := match[1]", + "\t\t\tresult := match[2]", + "\t\t\tresultsDB[testCaseName] = result", + "\t\t}", + "\t}", + "", + "\tif err := scanner.Err(); err != nil {", + "\t\treturn nil, fmt.Errorf(\"error scanning file, err: %v\", err)", + "\t}", + "", + "\treturn resultsDB, nil", + "}" + ] + }, + { + "name": "printTestResultsMismatch", + "qualifiedName": "printTestResultsMismatch", + "exported": false, + "signature": "func([]string, map[string]string, map[string]string)()", + "doc": "printTestResultsMismatch Displays a formatted table of test cases that did not match the expected results\n\nThe function receives a list of mismatched test case identifiers along with\nmaps of actual and expected outcomes. It prints a header, then iterates over\neach mismatched case, retrieving the corresponding expected and actual\nvalues—using a placeholder when either is missing—and outputs them in\naligned columns. Finally, it draws separators to delineate each row for\nreadability.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:196", + "calls": [ + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Repeat", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Repeat", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Repeat", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/results", + "name": "checkResults", + "kind": "function", + "source": [ + "func checkResults(cmd *cobra.Command, _ []string) error {", + "\ttemplateFileName, _ := cmd.Flags().GetString(\"template\")", + "\tgenerateTemplate, _ := cmd.Flags().GetBool(\"generate-template\")", + "\tlogFileName, _ := cmd.Flags().GetString(\"log-file\")", + "", + "\t// Build a database with the test results from the log file", + "\tactualTestResults, err := getTestResultsDB(logFileName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the test results DB, err: %v\", err)", + "\t}", + "", + "\t// Generate a reference YAML template with the test results if required", + "\tif generateTemplate {", + "\t\treturn generateTemplateFile(actualTestResults)", + "\t}", + "", + "\t// Get the expected test results from the reference YAML template", + "\texpectedTestResults, err := getExpectedTestResults(templateFileName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the expected test results, err: %v\", err)", + "\t}", + "", + "\t// Match the results between the test results DB and the reference YAML template", + "\tvar mismatchedTestCases []string", + "\tfor testCase, testResult := range actualTestResults {", + "\t\tif testResult != expectedTestResults[testCase] {", + "\t\t\tmismatchedTestCases = append(mismatchedTestCases, testCase)", + "\t\t}", + "\t}", + "", + "\t// Verify that there are no unmatched expected test results", + "\tfor testCase := range expectedTestResults {", + "\t\tif _, exists := actualTestResults[testCase]; !exists {", + "\t\t\tmismatchedTestCases = append(mismatchedTestCases, testCase)", + "\t\t}", + "\t}", + "", + "\tif len(mismatchedTestCases) \u003e 0 {", + "\t\tfmt.Println(\"Expected results DO NOT match actual results\")", + "\t\tprintTestResultsMismatch(mismatchedTestCases, actualTestResults, expectedTestResults)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tfmt.Println(\"Expected results and actual results match\")", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func printTestResultsMismatch(mismatchedTestCases []string, actualResults, expectedResults map[string]string) {", + "\tfmt.Printf(\"\\n\")", + "\tfmt.Println(strings.Repeat(\"-\", 96)) //nolint:mnd // table line", + "\tfmt.Printf(\"| %-58s %-19s %s |\\n\", \"TEST_CASE\", \"EXPECTED_RESULT\", \"ACTUAL_RESULT\")", + "\tfmt.Println(strings.Repeat(\"-\", 96)) //nolint:mnd // table line", + "\tfor _, testCase := range mismatchedTestCases {", + "\t\texpectedResult, exist := expectedResults[testCase]", + "\t\tif !exist {", + "\t\t\texpectedResult = resultMiss", + "\t\t}", + "\t\tactualResult, exist := actualResults[testCase]", + "\t\tif !exist {", + "\t\t\tactualResult = resultMiss", + "\t\t}", + "\t\tfmt.Printf(\"| %-54s %19s %17s |\\n\", testCase, expectedResult, actualResult)", + "\t\tfmt.Println(strings.Repeat(\"-\", 96)) //nolint:mnd // table line", + "\t}", + "}" + ] + } + ], + "globals": [ + { + "name": "checkResultsCmd", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:52" + } + ], + "consts": [ + { + "name": "TestResultsTemplateFileName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:18" + }, + { + "name": "TestResultsTemplateFilePermissions", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:19" + }, + { + "name": "resultFail", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:25" + }, + { + "name": "resultMiss", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:26" + }, + { + "name": "resultPass", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:23" + }, + { + "name": "resultSkip", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/check/results/results.go:24" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim", + "name": "claim", + "files": 1, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show", + "github.com/spf13/cobra" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates a subcommand for claim operations\n\nIt initializes the claim command by attaching its compare and show\nsubcommands, each of which provides functionality for comparing claim files\nor displaying claim information. The function returns the configured\ncobra.Command ready to be added to the main application root command.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/claim.go:22", + "calls": [ + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tclaimCompareFiles.Flags().StringVarP(", + "\t\t\u0026Claim1FilePathFlag, \"claim1\", \"1\", \"\",", + "\t\t\"existing claim1 file. (Required) first file to compare\",", + "\t)", + "\tclaimCompareFiles.Flags().StringVarP(", + "\t\t\u0026Claim2FilePathFlag, \"claim2\", \"2\", \"\",", + "\t\t\"existing claim2 file. (Required) second file to compare\",", + "\t)", + "\terr := claimCompareFiles.MarkFlagRequired(\"claim1\")", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to mark flag claim1 as required: %v\", err)", + "\t\treturn nil", + "\t}", + "\terr = claimCompareFiles.MarkFlagRequired(\"claim2\")", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to mark flag claim2 as required: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\treturn claimCompareFiles", + "}" + ] + }, + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tshowCommand.AddCommand(failures.NewCommand())", + "\tshowCommand.AddCommand(csv.NewCommand())", + "\treturn showCommand", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite", + "name": "newRootCmd", + "kind": "function", + "source": [ + "func newRootCmd() *cobra.Command {", + "\trootCmd := cobra.Command{", + "\t\tUse: \"certsuite\",", + "\t\tShort: \"A CLI tool for the Red Hat Best Practices Test Suite for Kubernetes.\",", + "\t}", + "", + "\trootCmd.AddCommand(claim.NewCommand())", + "\trootCmd.AddCommand(generate.NewCommand())", + "\trootCmd.AddCommand(check.NewCommand())", + "\trootCmd.AddCommand(run.NewCommand())", + "\trootCmd.AddCommand(info.NewCommand())", + "\trootCmd.AddCommand(version.NewCommand())", + "\trootCmd.AddCommand(upload.NewCommand())", + "", + "\treturn \u0026rootCmd", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tclaimCommand.AddCommand(compare.NewCommand())", + "\tclaimCommand.AddCommand(show.NewCommand())", + "", + "\treturn claimCommand", + "}" + ] + } + ], + "globals": [ + { + "name": "claimCommand", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/claim.go:10" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare", + "name": "compare", + "files": 1, + "imports": [ + "encoding/json", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/configurations", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/nodes", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/testcases", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/versions", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/spf13/cobra", + "os" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates a command for comparing two claim files\n\nIt defines flags for the paths of two existing claim files, marks both as\nrequired, and handles errors by logging them before returning nil if marking\nfails. The function then returns the configured command object for use in the\nCLI.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/compare.go:95", + "calls": [ + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "MarkFlagRequired", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "MarkFlagRequired", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tclaimCommand.AddCommand(compare.NewCommand())", + "\tclaimCommand.AddCommand(show.NewCommand())", + "", + "\treturn claimCommand", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tclaimCompareFiles.Flags().StringVarP(", + "\t\t\u0026Claim1FilePathFlag, \"claim1\", \"1\", \"\",", + "\t\t\"existing claim1 file. (Required) first file to compare\",", + "\t)", + "\tclaimCompareFiles.Flags().StringVarP(", + "\t\t\u0026Claim2FilePathFlag, \"claim2\", \"2\", \"\",", + "\t\t\"existing claim2 file. (Required) second file to compare\",", + "\t)", + "\terr := claimCompareFiles.MarkFlagRequired(\"claim1\")", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to mark flag claim1 as required: %v\", err)", + "\t\treturn nil", + "\t}", + "\terr = claimCompareFiles.MarkFlagRequired(\"claim2\")", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to mark flag claim2 as required: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\treturn claimCompareFiles", + "}" + ] + }, + { + "name": "claimCompare", + "qualifiedName": "claimCompare", + "exported": false, + "signature": "func(*cobra.Command, []string)(error)", + "doc": "claimCompare compares two claim files for differences\n\nThis function reads the paths provided by global flags, loads each file,\nunmarshals them into claim structures, and then generates diff reports for\nversions, test cases, configurations, and nodes. The resulting diffs are\nprinted to standard output. If any step fails, it logs a fatal error and\nexits.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/compare.go:125", + "calls": [ + { + "name": "claimCompareFilesfunc", + "kind": "function", + "source": [ + "func claimCompareFilesfunc(claim1, claim2 string) error {", + "\t// readfiles", + "\tclaimdata1, err := os.ReadFile(claim1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim1 file: %v\", err)", + "\t}", + "", + "\tclaimdata2, err := os.ReadFile(claim2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim2 file: %v\", err)", + "\t}", + "", + "\t// unmarshal the files", + "\tclaimFile1Data, err := unmarshalClaimFile(claimdata1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim1 file: %v\", err)", + "\t}", + "", + "\tclaimFile2Data, err := unmarshalClaimFile(claimdata2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim2 file: %v\", err)", + "\t}", + "", + "\t// Compare claim versions.", + "\tversionsDiff := versions.Compare(\u0026claimFile1Data.Claim.Versions, \u0026claimFile2Data.Claim.Versions)", + "\tfmt.Println(versionsDiff)", + "", + "\t// Show test cases results summary and differences.", + "\ttcsDiffReport := testcases.GetDiffReport(claimFile1Data.Claim.Results, claimFile2Data.Claim.Results)", + "\tfmt.Println(tcsDiffReport)", + "", + "\t// Show Certification Suite configuration differences.", + "\tclaim1Configurations := \u0026claimFile1Data.Claim.Configurations", + "\tclaim2Configurations := \u0026claimFile2Data.Claim.Configurations", + "\tconfigurationsDiffReport := configurations.GetDiffReport(claim1Configurations, claim2Configurations)", + "\tfmt.Println(configurationsDiffReport)", + "", + "\t// Show the cluster differences.", + "\tnodesDiff := nodes.GetDiffReport(\u0026claimFile1Data.Claim.Nodes, \u0026claimFile2Data.Claim.Nodes)", + "\tfmt.Print(nodesDiff)", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func claimCompare(_ *cobra.Command, _ []string) error {", + "\terr := claimCompareFilesfunc(Claim1FilePathFlag, Claim2FilePathFlag)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error comparing claim files: %v\", err)", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "name": "claimCompareFilesfunc", + "qualifiedName": "claimCompareFilesfunc", + "exported": false, + "signature": "func(string, string)(error)", + "doc": "claimCompareFilesfunc Reads two claim files, unmarshals them, and outputs structured comparison reports\n\nThe function loads the contents of two specified claim files and parses each\nJSON document into a claim schema structure. It then generates separate diff\nreports for the claim versions, test case results, configuration differences,\nand node details, printing each report to standard output. Errors during file\nreading or unmarshalling are wrapped with context and returned for handling.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/compare.go:140", + "calls": [ + { + "pkgPath": "os", + "name": "ReadFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "ReadFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "unmarshalClaimFile", + "kind": "function", + "source": [ + "func unmarshalClaimFile(claimdata []byte) (claim.Schema, error) {", + "\tvar claimDataResult claim.Schema", + "\terr := json.Unmarshal(claimdata, \u0026claimDataResult)", + "\tif err != nil {", + "\t\treturn claim.Schema{}, err", + "\t}", + "\treturn claimDataResult, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "unmarshalClaimFile", + "kind": "function", + "source": [ + "func unmarshalClaimFile(claimdata []byte) (claim.Schema, error) {", + "\tvar claimDataResult claim.Schema", + "\terr := json.Unmarshal(claimdata, \u0026claimDataResult)", + "\tif err != nil {", + "\t\treturn claim.Schema{}, err", + "\t}", + "\treturn claimDataResult, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/versions", + "name": "Compare", + "kind": "function", + "source": [ + "func Compare(claim1Versions, claim2Versions *officialClaimScheme.Versions) *DiffReport {", + "\t// Convert the versions struct type to agnostic map[string]interface{} objects so", + "\t// it can be compared using the diff.Compare func.", + "", + "\tbytes1, err := json.Marshal(claim1Versions)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to marshal versions from claim 1: %v\\nq\", err)", + "\t}", + "", + "\tbytes2, err := json.Marshal(claim2Versions)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to marshal versions from claim 2: %v\\n\", err)", + "\t}", + "", + "\t// Now let's unmarshal them into interface{} vars", + "\tvar v1, v2 interface{}", + "\terr = json.Unmarshal(bytes1, \u0026v1)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to unmarshal versions from claim 1: %v\\n\", err)", + "\t}", + "", + "\terr = json.Unmarshal(bytes2, \u0026v2)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to unmarshal versions from claim 2: %v\\n\", err)", + "\t}", + "", + "\treturn \u0026DiffReport{", + "\t\tDiffs: diff.Compare(\"VERSIONS\", v1, v2, nil),", + "\t}", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/testcases", + "name": "GetDiffReport", + "kind": "function", + "source": [ + "func GetDiffReport(resultsClaim1, resultsClaim2 claim.TestSuiteResults) *DiffReport {", + "\tconst tcResultNotFound = \"not found\"", + "", + "\treport := DiffReport{}", + "", + "\tclaim1Results := getTestCasesResultsMap(resultsClaim1)", + "\tclaim2Results := getTestCasesResultsMap(resultsClaim2)", + "", + "\ttcNames := getMergedTestCasesNames(claim1Results, claim2Results)", + "", + "\treport.TestCases = []TcResultDifference{}", + "\tfor _, name := range tcNames {", + "\t\tclaim1TcResult, found := claim1Results[name]", + "\t\tif !found {", + "\t\t\tclaim1TcResult = tcResultNotFound", + "\t\t}", + "", + "\t\tclaim2TcResult, found := claim2Results[name]", + "\t\tif !found {", + "\t\t\tclaim2TcResult = tcResultNotFound", + "\t\t}", + "", + "\t\tif claim1TcResult == claim2TcResult \u0026\u0026 claim1TcResult != tcResultNotFound {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\treport.TestCases = append(report.TestCases, TcResultDifference{", + "\t\t\tName: name,", + "\t\t\tClaim1Result: claim1TcResult,", + "\t\t\tClaim2Result: claim2TcResult,", + "\t\t})", + "", + "\t\treport.DifferentTestCasesResults++", + "\t}", + "", + "\treport.Claim1ResultsSummary = getTestCasesResultsSummary(claim1Results)", + "\treport.Claim2ResultsSummary = getTestCasesResultsSummary(claim2Results)", + "", + "\treturn \u0026report", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/configurations", + "name": "GetDiffReport", + "kind": "function", + "source": [ + "func GetDiffReport(claim1Configurations, claim2Configurations *claim.Configurations) *DiffReport {", + "\treturn \u0026DiffReport{", + "\t\tConfig: diff.Compare(\"Cert Suite Configuration\", claim1Configurations.Config, claim2Configurations.Config, nil),", + "\t\tAbnormalEvents: AbnormalEventsCount{", + "\t\t\tClaim1: len(claim1Configurations.AbnormalEvents),", + "\t\t\tClaim2: len(claim2Configurations.AbnormalEvents),", + "\t\t},", + "\t}", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/nodes", + "name": "GetDiffReport", + "kind": "function", + "source": [ + "func GetDiffReport(claim1Nodes, claim2Nodes *claim.Nodes) *DiffReport {", + "\treturn \u0026DiffReport{", + "\t\tNodes: diff.Compare(\"Nodes\", claim1Nodes.NodesSummary, claim2Nodes.NodesSummary, []string{\"labels\", \"annotations\"}),", + "\t\tCNI: diff.Compare(\"CNIs\", claim1Nodes.CniNetworks, claim2Nodes.CniNetworks, nil),", + "\t\tCSI: diff.Compare(\"CSIs\", claim1Nodes.CsiDriver, claim2Nodes.CsiDriver, nil),", + "\t\tHardware: diff.Compare(\"Hardware\", claim1Nodes.NodesHwInfo, claim2Nodes.NodesHwInfo, nil),", + "\t}", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare", + "name": "claimCompare", + "kind": "function", + "source": [ + "func claimCompare(_ *cobra.Command, _ []string) error {", + "\terr := claimCompareFilesfunc(Claim1FilePathFlag, Claim2FilePathFlag)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error comparing claim files: %v\", err)", + "\t}", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func claimCompareFilesfunc(claim1, claim2 string) error {", + "\t// readfiles", + "\tclaimdata1, err := os.ReadFile(claim1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim1 file: %v\", err)", + "\t}", + "", + "\tclaimdata2, err := os.ReadFile(claim2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim2 file: %v\", err)", + "\t}", + "", + "\t// unmarshal the files", + "\tclaimFile1Data, err := unmarshalClaimFile(claimdata1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim1 file: %v\", err)", + "\t}", + "", + "\tclaimFile2Data, err := unmarshalClaimFile(claimdata2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim2 file: %v\", err)", + "\t}", + "", + "\t// Compare claim versions.", + "\tversionsDiff := versions.Compare(\u0026claimFile1Data.Claim.Versions, \u0026claimFile2Data.Claim.Versions)", + "\tfmt.Println(versionsDiff)", + "", + "\t// Show test cases results summary and differences.", + "\ttcsDiffReport := testcases.GetDiffReport(claimFile1Data.Claim.Results, claimFile2Data.Claim.Results)", + "\tfmt.Println(tcsDiffReport)", + "", + "\t// Show Certification Suite configuration differences.", + "\tclaim1Configurations := \u0026claimFile1Data.Claim.Configurations", + "\tclaim2Configurations := \u0026claimFile2Data.Claim.Configurations", + "\tconfigurationsDiffReport := configurations.GetDiffReport(claim1Configurations, claim2Configurations)", + "\tfmt.Println(configurationsDiffReport)", + "", + "\t// Show the cluster differences.", + "\tnodesDiff := nodes.GetDiffReport(\u0026claimFile1Data.Claim.Nodes, \u0026claimFile2Data.Claim.Nodes)", + "\tfmt.Print(nodesDiff)", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "unmarshalClaimFile", + "qualifiedName": "unmarshalClaimFile", + "exported": false, + "signature": "func([]byte)(claim.Schema, error)", + "doc": "unmarshalClaimFile Parses raw claim data into a structured schema\n\nThis function receives raw JSON bytes representing a claim file, attempts to\nunmarshal them into the claim.Schema type, and returns either the populated\nstruct or an error if parsing fails. It uses standard library JSON decoding\nand propagates any unmarshaling errors back to the caller.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/compare.go:190", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare", + "name": "claimCompareFilesfunc", + "kind": "function", + "source": [ + "func claimCompareFilesfunc(claim1, claim2 string) error {", + "\t// readfiles", + "\tclaimdata1, err := os.ReadFile(claim1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim1 file: %v\", err)", + "\t}", + "", + "\tclaimdata2, err := os.ReadFile(claim2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim2 file: %v\", err)", + "\t}", + "", + "\t// unmarshal the files", + "\tclaimFile1Data, err := unmarshalClaimFile(claimdata1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim1 file: %v\", err)", + "\t}", + "", + "\tclaimFile2Data, err := unmarshalClaimFile(claimdata2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim2 file: %v\", err)", + "\t}", + "", + "\t// Compare claim versions.", + "\tversionsDiff := versions.Compare(\u0026claimFile1Data.Claim.Versions, \u0026claimFile2Data.Claim.Versions)", + "\tfmt.Println(versionsDiff)", + "", + "\t// Show test cases results summary and differences.", + "\ttcsDiffReport := testcases.GetDiffReport(claimFile1Data.Claim.Results, claimFile2Data.Claim.Results)", + "\tfmt.Println(tcsDiffReport)", + "", + "\t// Show Certification Suite configuration differences.", + "\tclaim1Configurations := \u0026claimFile1Data.Claim.Configurations", + "\tclaim2Configurations := \u0026claimFile2Data.Claim.Configurations", + "\tconfigurationsDiffReport := configurations.GetDiffReport(claim1Configurations, claim2Configurations)", + "\tfmt.Println(configurationsDiffReport)", + "", + "\t// Show the cluster differences.", + "\tnodesDiff := nodes.GetDiffReport(\u0026claimFile1Data.Claim.Nodes, \u0026claimFile2Data.Claim.Nodes)", + "\tfmt.Print(nodesDiff)", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func unmarshalClaimFile(claimdata []byte) (claim.Schema, error) {", + "\tvar claimDataResult claim.Schema", + "\terr := json.Unmarshal(claimdata, \u0026claimDataResult)", + "\tif err != nil {", + "\t\treturn claim.Schema{}, err", + "\t}", + "\treturn claimDataResult, nil", + "}" + ] + } + ], + "globals": [ + { + "name": "Claim1FilePathFlag", + "exported": true, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/compare.go:77" + }, + { + "name": "Claim2FilePathFlag", + "exported": true, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/compare.go:78" + }, + { + "name": "claimCompareFiles", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/compare.go:80" + } + ], + "consts": [ + { + "name": "longHelp", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/compare.go:17" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/configurations", + "name": "configurations", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim" + ], + "structs": [ + { + "name": "AbnormalEventsCount", + "exported": true, + "doc": "AbnormalEventsCount Displays counts of abnormal events for two claims\n\nThis struct holds integer counts of abnormal events for two distinct claims,\nnamed Claim1 and Claim2. The String method formats these values into a\nreadable table with headers, producing a string that summarizes the event\ncounts for comparison purposes.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/configurations/configurations.go:16", + "fields": { + "Claim1": "int", + "Claim2": "int" + }, + "methodNames": [ + "String" + ], + "source": [ + "type AbnormalEventsCount struct {", + "\tClaim1 int `json:\"claim1\"`", + "\tClaim2 int `json:\"claim2\"`", + "}" + ] + }, + { + "name": "DiffReport", + "exported": true, + "doc": "DiffReport captures configuration differences and abnormal event counts\n\nThis structure contains a diff of Cert Suite configuration objects and a\ncount of abnormal events for two claims. The Config field holds the result\nfrom a diff comparison, while AbnormalEvents stores how many abnormal events\neach claim reported. It is used to report and display discrepancies between\nclaims.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/configurations/configurations.go:48", + "fields": { + "AbnormalEvents": "AbnormalEventsCount", + "Config": "*diff.Diffs" + }, + "methodNames": [ + "String" + ], + "source": [ + "type DiffReport struct {", + "\tConfig *diff.Diffs `json:\"CertSuiteConfig\"`", + "\tAbnormalEvents AbnormalEventsCount `json:\"abnormalEventsCount\"`", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "String", + "qualifiedName": "AbnormalEventsCount.String", + "exported": true, + "receiver": "AbnormalEventsCount", + "signature": "func()(string)", + "doc": "AbnormalEventsCount.String Formats abnormal event counts for two claims\n\nThis method builds a multi-line string that displays the number of abnormal\nevents detected in two separate claims. It starts with a header line, then\nadds a formatted table row showing the claim identifiers and their\ncorresponding counts using printf-style formatting. The resulting string is\nreturned for display or logging.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/configurations/configurations.go:28", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *AbnormalEventsCount) String() string {", + "\tconst (", + "\t\trowHeaderFmt = \"%-12s%-s\\n\"", + "\t\trowDataFmt = \"%-12d%-d\\n\"", + "\t)", + "", + "\tstr := \"Cluster abnormal events count\\n\"", + "\tstr += fmt.Sprintf(rowHeaderFmt, \"CLAIM 1\", \"CLAIM 2\")", + "\tstr += fmt.Sprintf(rowDataFmt, c.Claim1, c.Claim2)", + "", + "\treturn str", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "DiffReport.String", + "exported": true, + "receiver": "DiffReport", + "signature": "func()(string)", + "doc": "DiffReport.String Formats the diff report into a readable string\n\nThis method builds a formatted representation of a configuration comparison,\nbeginning with header lines and then appending the configuration details\nfollowed by any abnormal events. It concatenates strings from the embedded\nConfig and AbnormalEvents fields and returns the final result as a single\nstring.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/configurations/configurations.go:60", + "calls": [ + { + "name": "DiffReport.String", + "kind": "function", + "source": [ + "func (d *DiffReport) String() string {", + "\tstr := \"CONFIGURATIONS\\n\"", + "\tstr += \"--------------\\n\\n\"", + "", + "\tstr += d.Config.String()", + "", + "\tstr += \"\\n\"", + "\tstr += d.AbnormalEvents.String()", + "", + "\treturn str", + "}" + ] + }, + { + "name": "DiffReport.String", + "kind": "function", + "source": [ + "func (d *DiffReport) String() string {", + "\tstr := \"CONFIGURATIONS\\n\"", + "\tstr += \"--------------\\n\\n\"", + "", + "\tstr += d.Config.String()", + "", + "\tstr += \"\\n\"", + "\tstr += d.AbnormalEvents.String()", + "", + "\treturn str", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/configurations", + "name": "DiffReport.String", + "kind": "function", + "source": [ + "func (d *DiffReport) String() string {", + "\tstr := \"CONFIGURATIONS\\n\"", + "\tstr += \"--------------\\n\\n\"", + "", + "\tstr += d.Config.String()", + "", + "\tstr += \"\\n\"", + "\tstr += d.AbnormalEvents.String()", + "", + "\treturn str", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (d *DiffReport) String() string {", + "\tstr := \"CONFIGURATIONS\\n\"", + "\tstr += \"--------------\\n\\n\"", + "", + "\tstr += d.Config.String()", + "", + "\tstr += \"\\n\"", + "\tstr += d.AbnormalEvents.String()", + "", + "\treturn str", + "}" + ] + }, + { + "name": "GetDiffReport", + "qualifiedName": "GetDiffReport", + "exported": true, + "signature": "func(*claim.Configurations, *claim.Configurations)(*DiffReport)", + "doc": "GetDiffReport Creates a report of configuration differences\n\nThe function compares two configuration objects from claim files, generating\na DiffReport that includes field-by-field differences in the main\nconfiguration map and counts of abnormal events present in each file. It uses\nan external diff utility to compute the detailed comparison and returns the\nassembled report for further processing or display.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/configurations/configurations.go:79", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff", + "name": "Compare", + "kind": "function", + "source": [ + "func Compare(objectName string, claim1Object, claim2Object interface{}, filters []string) *Diffs {", + "\tobjectsDiffs := Diffs{Name: objectName}", + "", + "\tclaim1Fields := traverse(claim1Object, \"\", filters)", + "\tclaim2Fields := traverse(claim2Object, \"\", filters)", + "", + "\t// Build helper maps, to make it easier to find fields.", + "\tclaim1FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim1Fields {", + "\t\tclaim1FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\tclaim2FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim2Fields {", + "\t\tclaim2FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\t// Start comparing, keeping the original order.", + "\tfor _, claim1Field := range claim1Fields {", + "\t\t// Does the field (path) in claim1 exist in claim2?", + "\t\tif claim2Value, exist := claim2FieldsMap[claim1Field.Path]; exist {", + "\t\t\t// Do they have the same value?", + "\t\t\tif !reflect.DeepEqual(claim1Field.Value, claim2Value) {", + "\t\t\t\tobjectsDiffs.Fields = append(objectsDiffs.Fields, FieldDiff{", + "\t\t\t\t\tFieldPath: claim1Field.Path,", + "\t\t\t\t\tClaim1Value: claim1Field.Value,", + "\t\t\t\t\tClaim2Value: claim2Value})", + "\t\t\t}", + "\t\t} else {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim1Field.Path, claim1Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim1Only = append(objectsDiffs.FieldsInClaim1Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\t// Fields that appear in both claim files have been already checked,", + "\t// so we only need to search fields in claim2 that will not exist in claim 1.", + "\tfor _, claim2Field := range claim2Fields {", + "\t\tif _, exist := claim1FieldsMap[claim2Field.Path]; !exist {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim2Field.Path, claim2Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim2Only = append(objectsDiffs.FieldsInClaim2Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\treturn \u0026objectsDiffs", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare", + "name": "claimCompareFilesfunc", + "kind": "function", + "source": [ + "func claimCompareFilesfunc(claim1, claim2 string) error {", + "\t// readfiles", + "\tclaimdata1, err := os.ReadFile(claim1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim1 file: %v\", err)", + "\t}", + "", + "\tclaimdata2, err := os.ReadFile(claim2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim2 file: %v\", err)", + "\t}", + "", + "\t// unmarshal the files", + "\tclaimFile1Data, err := unmarshalClaimFile(claimdata1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim1 file: %v\", err)", + "\t}", + "", + "\tclaimFile2Data, err := unmarshalClaimFile(claimdata2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim2 file: %v\", err)", + "\t}", + "", + "\t// Compare claim versions.", + "\tversionsDiff := versions.Compare(\u0026claimFile1Data.Claim.Versions, \u0026claimFile2Data.Claim.Versions)", + "\tfmt.Println(versionsDiff)", + "", + "\t// Show test cases results summary and differences.", + "\ttcsDiffReport := testcases.GetDiffReport(claimFile1Data.Claim.Results, claimFile2Data.Claim.Results)", + "\tfmt.Println(tcsDiffReport)", + "", + "\t// Show Certification Suite configuration differences.", + "\tclaim1Configurations := \u0026claimFile1Data.Claim.Configurations", + "\tclaim2Configurations := \u0026claimFile2Data.Claim.Configurations", + "\tconfigurationsDiffReport := configurations.GetDiffReport(claim1Configurations, claim2Configurations)", + "\tfmt.Println(configurationsDiffReport)", + "", + "\t// Show the cluster differences.", + "\tnodesDiff := nodes.GetDiffReport(\u0026claimFile1Data.Claim.Nodes, \u0026claimFile2Data.Claim.Nodes)", + "\tfmt.Print(nodesDiff)", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetDiffReport(claim1Configurations, claim2Configurations *claim.Configurations) *DiffReport {", + "\treturn \u0026DiffReport{", + "\t\tConfig: diff.Compare(\"Cert Suite Configuration\", claim1Configurations.Config, claim2Configurations.Config, nil),", + "\t\tAbnormalEvents: AbnormalEventsCount{", + "\t\t\tClaim1: len(claim1Configurations.AbnormalEvents),", + "\t\t\tClaim2: len(claim2Configurations.AbnormalEvents),", + "\t\t},", + "\t}", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff", + "name": "diff", + "files": 1, + "imports": [ + "fmt", + "reflect", + "sort", + "strconv", + "strings" + ], + "structs": [ + { + "name": "Diffs", + "exported": true, + "doc": "Diffs Captures differences between two JSON objects\n\nThis structure records fields that differ, as well as those present only in\none of the compared claims. It stores the object name for contextual output\nand provides a method to format the differences into a readable table. The\nfields are populated by comparing flattened representations of each claim.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/diff/diff.go:17", + "fields": { + "Fields": "[]FieldDiff", + "FieldsInClaim1Only": "[]string", + "FieldsInClaim2Only": "[]string", + "Name": "string" + }, + "methodNames": [ + "String" + ], + "source": [ + "type Diffs struct {", + "\t// Name of the json object whose diffs are stored here.", + "\t// It will be used when serializing the data in table format.", + "\tName string", + "\t// CNI Fields that appear in both claim Fields but their values are different.", + "\tFields []FieldDiff", + "", + "\tFieldsInClaim1Only []string", + "\tFieldsInClaim2Only []string", + "}" + ] + }, + { + "name": "FieldDiff", + "exported": true, + "doc": "FieldDiff Represents a mismatch between two claim files\n\nThis structure records the location of a differing field along with its value\nfrom each claim file. It is used during comparison to track which fields\ndiffer, enabling further processing or reporting. The field path indicates\nwhere in the document the discrepancy occurs.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/diff/diff.go:34", + "fields": { + "Claim1Value": "interface{}", + "Claim2Value": "interface{}", + "FieldPath": "string" + }, + "methodNames": null, + "source": [ + "type FieldDiff struct {", + "\tFieldPath string `json:\"field\"`", + "\tClaim1Value interface{} `json:\"claim1Value\"`", + "\tClaim2Value interface{} `json:\"claim2Value\"`", + "}" + ] + }, + { + "name": "field", + "exported": false, + "doc": "field represents a node in the traversal result\n\nThis structure holds the full path to a value and the value itself as\nencountered during tree walking. The Path string records the hierarchical\nlocation using delimiters, while Value captures any type of data found at\nthat point. It is used by the traversal routine to aggregate matching fields\nfor comparison.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/diff/diff.go:170", + "fields": { + "Path": "string", + "Value": "interface{}" + }, + "methodNames": null, + "source": [ + "type field struct {", + "\tPath string", + "\tValue interface{}", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "Compare", + "qualifiedName": "Compare", + "exported": true, + "signature": "func(string, interface{}, interface{}, []string)(*Diffs)", + "doc": "Compare Compares two JSON structures for differences\n\nThis function takes two interface values that were previously unmarshaled\nfrom JSON, walks each tree to collect paths and values, then compares\ncorresponding entries. It records mismatched values, fields present only in\nthe first object, and fields present only in the second. Optional filters\nallow limiting comparison to specified subtrees.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/diff/diff.go:117", + "calls": [ + { + "name": "traverse", + "kind": "function", + "source": [ + "func traverse(node interface{}, path string, filters []string) []field {", + "\tif node == nil {", + "\t\treturn nil", + "\t}", + "", + "\tleavePathDelimiter := `/`", + "\tfields := []field{}", + "", + "\tswitch value := node.(type) {", + "\t// map object", + "\tcase map[string]interface{}:", + "\t\t// Get all keys for sorting", + "\t\tkeys := make([]string, 0)", + "\t\tfor k := range value {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "", + "\t\t// Sort keys", + "\t\tsort.Strings(keys)", + "\t\tfor _, key := range keys {", + "\t\t\tfields = append(fields, traverse(value[key], path+leavePathDelimiter+key, filters)...)", + "\t\t}", + "\t// list object", + "\tcase []interface{}:", + "\t\tfor i, v := range value {", + "\t\t\tfields = append(fields, traverse(v, path+leavePathDelimiter+strconv.Itoa(i), filters)...)", + "\t\t}", + "\t// simple value (int, string...)", + "\tdefault:", + "\t\t// No filters: append every field's path=value", + "\t\tif len(filters) == 0 {", + "\t\t\tfields = append(fields, field{", + "\t\t\t\tPath: path,", + "\t\t\t\tValue: value,", + "\t\t\t})", + "\t\t}", + "", + "\t\t// Append field's whose path matches some filter.", + "\t\tfor _, filter := range filters {", + "\t\t\tif strings.Contains(path, \"/\"+filter+\"/\") {", + "\t\t\t\tfields = append(fields, field{", + "\t\t\t\t\tPath: path,", + "\t\t\t\t\tValue: value,", + "\t\t\t\t})", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "traverse", + "kind": "function", + "source": [ + "func traverse(node interface{}, path string, filters []string) []field {", + "\tif node == nil {", + "\t\treturn nil", + "\t}", + "", + "\tleavePathDelimiter := `/`", + "\tfields := []field{}", + "", + "\tswitch value := node.(type) {", + "\t// map object", + "\tcase map[string]interface{}:", + "\t\t// Get all keys for sorting", + "\t\tkeys := make([]string, 0)", + "\t\tfor k := range value {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "", + "\t\t// Sort keys", + "\t\tsort.Strings(keys)", + "\t\tfor _, key := range keys {", + "\t\t\tfields = append(fields, traverse(value[key], path+leavePathDelimiter+key, filters)...)", + "\t\t}", + "\t// list object", + "\tcase []interface{}:", + "\t\tfor i, v := range value {", + "\t\t\tfields = append(fields, traverse(v, path+leavePathDelimiter+strconv.Itoa(i), filters)...)", + "\t\t}", + "\t// simple value (int, string...)", + "\tdefault:", + "\t\t// No filters: append every field's path=value", + "\t\tif len(filters) == 0 {", + "\t\t\tfields = append(fields, field{", + "\t\t\t\tPath: path,", + "\t\t\t\tValue: value,", + "\t\t\t})", + "\t\t}", + "", + "\t\t// Append field's whose path matches some filter.", + "\t\tfor _, filter := range filters {", + "\t\t\tif strings.Contains(path, \"/\"+filter+\"/\") {", + "\t\t\t\tfields = append(fields, field{", + "\t\t\t\t\tPath: path,", + "\t\t\t\t\tValue: value,", + "\t\t\t\t})", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "pkgPath": "reflect", + "name": "DeepEqual", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/configurations", + "name": "GetDiffReport", + "kind": "function", + "source": [ + "func GetDiffReport(claim1Configurations, claim2Configurations *claim.Configurations) *DiffReport {", + "\treturn \u0026DiffReport{", + "\t\tConfig: diff.Compare(\"Cert Suite Configuration\", claim1Configurations.Config, claim2Configurations.Config, nil),", + "\t\tAbnormalEvents: AbnormalEventsCount{", + "\t\t\tClaim1: len(claim1Configurations.AbnormalEvents),", + "\t\t\tClaim2: len(claim2Configurations.AbnormalEvents),", + "\t\t},", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/nodes", + "name": "GetDiffReport", + "kind": "function", + "source": [ + "func GetDiffReport(claim1Nodes, claim2Nodes *claim.Nodes) *DiffReport {", + "\treturn \u0026DiffReport{", + "\t\tNodes: diff.Compare(\"Nodes\", claim1Nodes.NodesSummary, claim2Nodes.NodesSummary, []string{\"labels\", \"annotations\"}),", + "\t\tCNI: diff.Compare(\"CNIs\", claim1Nodes.CniNetworks, claim2Nodes.CniNetworks, nil),", + "\t\tCSI: diff.Compare(\"CSIs\", claim1Nodes.CsiDriver, claim2Nodes.CsiDriver, nil),", + "\t\tHardware: diff.Compare(\"Hardware\", claim1Nodes.NodesHwInfo, claim2Nodes.NodesHwInfo, nil),", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/versions", + "name": "Compare", + "kind": "function", + "source": [ + "func Compare(claim1Versions, claim2Versions *officialClaimScheme.Versions) *DiffReport {", + "\t// Convert the versions struct type to agnostic map[string]interface{} objects so", + "\t// it can be compared using the diff.Compare func.", + "", + "\tbytes1, err := json.Marshal(claim1Versions)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to marshal versions from claim 1: %v\\nq\", err)", + "\t}", + "", + "\tbytes2, err := json.Marshal(claim2Versions)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to marshal versions from claim 2: %v\\n\", err)", + "\t}", + "", + "\t// Now let's unmarshal them into interface{} vars", + "\tvar v1, v2 interface{}", + "\terr = json.Unmarshal(bytes1, \u0026v1)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to unmarshal versions from claim 1: %v\\n\", err)", + "\t}", + "", + "\terr = json.Unmarshal(bytes2, \u0026v2)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to unmarshal versions from claim 2: %v\\n\", err)", + "\t}", + "", + "\treturn \u0026DiffReport{", + "\t\tDiffs: diff.Compare(\"VERSIONS\", v1, v2, nil),", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Compare(objectName string, claim1Object, claim2Object interface{}, filters []string) *Diffs {", + "\tobjectsDiffs := Diffs{Name: objectName}", + "", + "\tclaim1Fields := traverse(claim1Object, \"\", filters)", + "\tclaim2Fields := traverse(claim2Object, \"\", filters)", + "", + "\t// Build helper maps, to make it easier to find fields.", + "\tclaim1FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim1Fields {", + "\t\tclaim1FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\tclaim2FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim2Fields {", + "\t\tclaim2FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\t// Start comparing, keeping the original order.", + "\tfor _, claim1Field := range claim1Fields {", + "\t\t// Does the field (path) in claim1 exist in claim2?", + "\t\tif claim2Value, exist := claim2FieldsMap[claim1Field.Path]; exist {", + "\t\t\t// Do they have the same value?", + "\t\t\tif !reflect.DeepEqual(claim1Field.Value, claim2Value) {", + "\t\t\t\tobjectsDiffs.Fields = append(objectsDiffs.Fields, FieldDiff{", + "\t\t\t\t\tFieldPath: claim1Field.Path,", + "\t\t\t\t\tClaim1Value: claim1Field.Value,", + "\t\t\t\t\tClaim2Value: claim2Value})", + "\t\t\t}", + "\t\t} else {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim1Field.Path, claim1Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim1Only = append(objectsDiffs.FieldsInClaim1Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\t// Fields that appear in both claim files have been already checked,", + "\t// so we only need to search fields in claim2 that will not exist in claim 1.", + "\tfor _, claim2Field := range claim2Fields {", + "\t\tif _, exist := claim1FieldsMap[claim2Field.Path]; !exist {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim2Field.Path, claim2Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim2Only = append(objectsDiffs.FieldsInClaim2Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\treturn \u0026objectsDiffs", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "Diffs.String", + "exported": true, + "receiver": "Diffs", + "signature": "func()(string)", + "doc": "Diffs.String Formats a readable report of claim differences\n\nThe method builds a string that lists fields with differing values between\ntwo claims, as well as fields present only in one claim or the other. It\ncalculates column widths based on longest field paths and values to align the\ntable neatly. If no differences exist it displays a placeholder indicating\nnone were found.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/diff/diff.go:47", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (d *Diffs) String() string {", + "\tconst (", + "\t\tnoDiffs = \"\u003cnone\u003e\"", + "\t\tcolumnsGapSize = 5", + "\t)", + "", + "\t// Get the length of the longest field path so we can use it as the column size.", + "\tmaxFieldPathLength := len(\"FIELD\")", + "\t// Same for the column for the values from the claim1 file.", + "\tmaxClaim1FieldValueLength := len(\"CLAIM 1\")", + "\tfor _, diff := range d.Fields {", + "\t\tfieldPathLength := len(diff.FieldPath)", + "\t\tif fieldPathLength \u003e maxFieldPathLength {", + "\t\t\tmaxFieldPathLength = len(diff.FieldPath)", + "\t\t}", + "", + "\t\tclaim1ValueLength := len(fmt.Sprint(diff.Claim1Value))", + "\t\tif claim1ValueLength \u003e maxClaim1FieldValueLength {", + "\t\t\tmaxClaim1FieldValueLength = claim1ValueLength", + "\t\t}", + "\t}", + "", + "\t// Add an extra gap to avoid columns to appear too close.", + "\tfieldRowLen := maxFieldPathLength + columnsGapSize", + "\tclaim1FieldValueRowLen := maxClaim1FieldValueLength + columnsGapSize", + "", + "\t// Create the format string using those dynamic widths.", + "\tcniDiffRowFmt := \"%-\" + fmt.Sprint(fieldRowLen) + \"s%-\" + fmt.Sprint(claim1FieldValueRowLen) + \"v%-v\\n\"", + "", + "\t// Generate a line per different field with their values in both claim files.", + "\tstr := d.Name + \": Differences\\n\"", + "\tstr += fmt.Sprintf(cniDiffRowFmt, \"FIELD\", \"CLAIM 1\", \"CLAIM 2\")", + "\tif len(d.Fields) != 0 {", + "\t\tfor _, diff := range d.Fields {", + "\t\t\tstr += fmt.Sprintf(cniDiffRowFmt, diff.FieldPath, diff.Claim1Value, diff.Claim2Value)", + "\t\t}", + "\t} else {", + "\t\tstr += noDiffs + \"\\n\"", + "\t}", + "", + "\t// Generate a line per field that was found in claim1 only.", + "\tstr += \"\\n\" + d.Name + \": Only in CLAIM 1\\n\"", + "\tif len(d.FieldsInClaim1Only) \u003e 0 {", + "\t\tfor _, field := range d.FieldsInClaim1Only {", + "\t\t\tstr += field + \"\\n\"", + "\t\t}", + "\t} else {", + "\t\tstr += noDiffs + \"\\n\"", + "\t}", + "", + "\t// Generate a line per field that was found in claim2 only.", + "\tstr += \"\\n\" + d.Name + \": Only in CLAIM 2\\n\"", + "\tif len(d.FieldsInClaim2Only) \u003e 0 {", + "\t\tfor _, field := range d.FieldsInClaim2Only {", + "\t\t\tstr += field + \"\\n\"", + "\t\t}", + "\t} else {", + "\t\tstr += noDiffs + \"\\n\"", + "\t}", + "", + "\treturn str", + "}" + ] + }, + { + "name": "traverse", + "qualifiedName": "traverse", + "exported": false, + "signature": "func(interface{}, string, []string)([]field)", + "doc": "traverse recursively collects leaf paths and values from a nested data structure\n\nThe function walks through maps, slices, or simple values, building a path\nstring for each leaf node separated by slashes. It optionally filters the\ncollected fields based on provided substrings in the path. The result is a\nslice of field structs containing the full path and the corresponding value.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/diff/diff.go:181", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Strings", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "traverse", + "kind": "function", + "source": [ + "func traverse(node interface{}, path string, filters []string) []field {", + "\tif node == nil {", + "\t\treturn nil", + "\t}", + "", + "\tleavePathDelimiter := `/`", + "\tfields := []field{}", + "", + "\tswitch value := node.(type) {", + "\t// map object", + "\tcase map[string]interface{}:", + "\t\t// Get all keys for sorting", + "\t\tkeys := make([]string, 0)", + "\t\tfor k := range value {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "", + "\t\t// Sort keys", + "\t\tsort.Strings(keys)", + "\t\tfor _, key := range keys {", + "\t\t\tfields = append(fields, traverse(value[key], path+leavePathDelimiter+key, filters)...)", + "\t\t}", + "\t// list object", + "\tcase []interface{}:", + "\t\tfor i, v := range value {", + "\t\t\tfields = append(fields, traverse(v, path+leavePathDelimiter+strconv.Itoa(i), filters)...)", + "\t\t}", + "\t// simple value (int, string...)", + "\tdefault:", + "\t\t// No filters: append every field's path=value", + "\t\tif len(filters) == 0 {", + "\t\t\tfields = append(fields, field{", + "\t\t\t\tPath: path,", + "\t\t\t\tValue: value,", + "\t\t\t})", + "\t\t}", + "", + "\t\t// Append field's whose path matches some filter.", + "\t\tfor _, filter := range filters {", + "\t\t\tif strings.Contains(path, \"/\"+filter+\"/\") {", + "\t\t\t\tfields = append(fields, field{", + "\t\t\t\t\tPath: path,", + "\t\t\t\t\tValue: value,", + "\t\t\t\t})", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "traverse", + "kind": "function", + "source": [ + "func traverse(node interface{}, path string, filters []string) []field {", + "\tif node == nil {", + "\t\treturn nil", + "\t}", + "", + "\tleavePathDelimiter := `/`", + "\tfields := []field{}", + "", + "\tswitch value := node.(type) {", + "\t// map object", + "\tcase map[string]interface{}:", + "\t\t// Get all keys for sorting", + "\t\tkeys := make([]string, 0)", + "\t\tfor k := range value {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "", + "\t\t// Sort keys", + "\t\tsort.Strings(keys)", + "\t\tfor _, key := range keys {", + "\t\t\tfields = append(fields, traverse(value[key], path+leavePathDelimiter+key, filters)...)", + "\t\t}", + "\t// list object", + "\tcase []interface{}:", + "\t\tfor i, v := range value {", + "\t\t\tfields = append(fields, traverse(v, path+leavePathDelimiter+strconv.Itoa(i), filters)...)", + "\t\t}", + "\t// simple value (int, string...)", + "\tdefault:", + "\t\t// No filters: append every field's path=value", + "\t\tif len(filters) == 0 {", + "\t\t\tfields = append(fields, field{", + "\t\t\t\tPath: path,", + "\t\t\t\tValue: value,", + "\t\t\t})", + "\t\t}", + "", + "\t\t// Append field's whose path matches some filter.", + "\t\tfor _, filter := range filters {", + "\t\t\tif strings.Contains(path, \"/\"+filter+\"/\") {", + "\t\t\t\tfields = append(fields, field{", + "\t\t\t\t\tPath: path,", + "\t\t\t\t\tValue: value,", + "\t\t\t\t})", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff", + "name": "Compare", + "kind": "function", + "source": [ + "func Compare(objectName string, claim1Object, claim2Object interface{}, filters []string) *Diffs {", + "\tobjectsDiffs := Diffs{Name: objectName}", + "", + "\tclaim1Fields := traverse(claim1Object, \"\", filters)", + "\tclaim2Fields := traverse(claim2Object, \"\", filters)", + "", + "\t// Build helper maps, to make it easier to find fields.", + "\tclaim1FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim1Fields {", + "\t\tclaim1FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\tclaim2FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim2Fields {", + "\t\tclaim2FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\t// Start comparing, keeping the original order.", + "\tfor _, claim1Field := range claim1Fields {", + "\t\t// Does the field (path) in claim1 exist in claim2?", + "\t\tif claim2Value, exist := claim2FieldsMap[claim1Field.Path]; exist {", + "\t\t\t// Do they have the same value?", + "\t\t\tif !reflect.DeepEqual(claim1Field.Value, claim2Value) {", + "\t\t\t\tobjectsDiffs.Fields = append(objectsDiffs.Fields, FieldDiff{", + "\t\t\t\t\tFieldPath: claim1Field.Path,", + "\t\t\t\t\tClaim1Value: claim1Field.Value,", + "\t\t\t\t\tClaim2Value: claim2Value})", + "\t\t\t}", + "\t\t} else {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim1Field.Path, claim1Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim1Only = append(objectsDiffs.FieldsInClaim1Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\t// Fields that appear in both claim files have been already checked,", + "\t// so we only need to search fields in claim2 that will not exist in claim 1.", + "\tfor _, claim2Field := range claim2Fields {", + "\t\tif _, exist := claim1FieldsMap[claim2Field.Path]; !exist {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim2Field.Path, claim2Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim2Only = append(objectsDiffs.FieldsInClaim2Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\treturn \u0026objectsDiffs", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff", + "name": "traverse", + "kind": "function", + "source": [ + "func traverse(node interface{}, path string, filters []string) []field {", + "\tif node == nil {", + "\t\treturn nil", + "\t}", + "", + "\tleavePathDelimiter := `/`", + "\tfields := []field{}", + "", + "\tswitch value := node.(type) {", + "\t// map object", + "\tcase map[string]interface{}:", + "\t\t// Get all keys for sorting", + "\t\tkeys := make([]string, 0)", + "\t\tfor k := range value {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "", + "\t\t// Sort keys", + "\t\tsort.Strings(keys)", + "\t\tfor _, key := range keys {", + "\t\t\tfields = append(fields, traverse(value[key], path+leavePathDelimiter+key, filters)...)", + "\t\t}", + "\t// list object", + "\tcase []interface{}:", + "\t\tfor i, v := range value {", + "\t\t\tfields = append(fields, traverse(v, path+leavePathDelimiter+strconv.Itoa(i), filters)...)", + "\t\t}", + "\t// simple value (int, string...)", + "\tdefault:", + "\t\t// No filters: append every field's path=value", + "\t\tif len(filters) == 0 {", + "\t\t\tfields = append(fields, field{", + "\t\t\t\tPath: path,", + "\t\t\t\tValue: value,", + "\t\t\t})", + "\t\t}", + "", + "\t\t// Append field's whose path matches some filter.", + "\t\tfor _, filter := range filters {", + "\t\t\tif strings.Contains(path, \"/\"+filter+\"/\") {", + "\t\t\t\tfields = append(fields, field{", + "\t\t\t\t\tPath: path,", + "\t\t\t\t\tValue: value,", + "\t\t\t\t})", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn fields", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func traverse(node interface{}, path string, filters []string) []field {", + "\tif node == nil {", + "\t\treturn nil", + "\t}", + "", + "\tleavePathDelimiter := `/`", + "\tfields := []field{}", + "", + "\tswitch value := node.(type) {", + "\t// map object", + "\tcase map[string]interface{}:", + "\t\t// Get all keys for sorting", + "\t\tkeys := make([]string, 0)", + "\t\tfor k := range value {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "", + "\t\t// Sort keys", + "\t\tsort.Strings(keys)", + "\t\tfor _, key := range keys {", + "\t\t\tfields = append(fields, traverse(value[key], path+leavePathDelimiter+key, filters)...)", + "\t\t}", + "\t// list object", + "\tcase []interface{}:", + "\t\tfor i, v := range value {", + "\t\t\tfields = append(fields, traverse(v, path+leavePathDelimiter+strconv.Itoa(i), filters)...)", + "\t\t}", + "\t// simple value (int, string...)", + "\tdefault:", + "\t\t// No filters: append every field's path=value", + "\t\tif len(filters) == 0 {", + "\t\t\tfields = append(fields, field{", + "\t\t\t\tPath: path,", + "\t\t\t\tValue: value,", + "\t\t\t})", + "\t\t}", + "", + "\t\t// Append field's whose path matches some filter.", + "\t\tfor _, filter := range filters {", + "\t\t\tif strings.Contains(path, \"/\"+filter+\"/\") {", + "\t\t\t\tfields = append(fields, field{", + "\t\t\t\t\tPath: path,", + "\t\t\t\t\tValue: value,", + "\t\t\t\t})", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn fields", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/nodes", + "name": "nodes", + "files": 1, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim" + ], + "structs": [ + { + "name": "DiffReport", + "exported": true, + "doc": "DiffReport Summarizes differences between two node claims\n\nIt aggregates comparison results for nodes, CNI networks, CSI drivers, and\nhardware information into separate diff objects. Each field holds a report of\nchanges or missing entries between the two provided claim files. The struct\nprovides a consolidated view that can be rendered as a human‑readable\nstring.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/nodes/nodes.go:15", + "fields": { + "CNI": "*diff.Diffs", + "CSI": "*diff.Diffs", + "Hardware": "*diff.Diffs", + "Nodes": "*diff.Diffs" + }, + "methodNames": [ + "String" + ], + "source": [ + "type DiffReport struct {", + "\tNodes *diff.Diffs `json:\"nodes\"`", + "\tCNI *diff.Diffs `json:\"CNI\"`", + "\tCSI *diff.Diffs `json:\"CSI\"`", + "\tHardware *diff.Diffs `json:\"hardware\"`", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "String", + "qualifiedName": "DiffReport.String", + "exported": true, + "receiver": "DiffReport", + "signature": "func()(string)", + "doc": "DiffReport.String Formats node differences into a readable table\n\nIt builds a string starting with a header and separator, then appends the\nstring representations of any non‑nil subreports for Nodes, CNI, CSI, and\nHardware, each followed by a newline. The resulting text lists discrepancies\nfound in cluster nodes across two claim files.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/nodes/nodes.go:28", + "calls": [ + { + "name": "DiffReport.String", + "kind": "function", + "source": [ + "func (d DiffReport) String() string {", + "\tstr := \"CLUSTER NODES DIFFERENCES\\n\"", + "\tstr += \"-------------------------\\n\\n\"", + "", + "\tif d.Nodes != nil {", + "\t\tstr += d.Nodes.String() + \"\\n\"", + "\t}", + "", + "\tif d.CNI != nil {", + "\t\tstr += d.CNI.String() + \"\\n\"", + "\t}", + "", + "\tif d.CSI != nil {", + "\t\tstr += d.CSI.String() + \"\\n\"", + "\t}", + "", + "\tif d.Hardware != nil {", + "\t\tstr += d.Hardware.String() + \"\\n\"", + "\t}", + "", + "\treturn str", + "}" + ] + }, + { + "name": "DiffReport.String", + "kind": "function", + "source": [ + "func (d DiffReport) String() string {", + "\tstr := \"CLUSTER NODES DIFFERENCES\\n\"", + "\tstr += \"-------------------------\\n\\n\"", + "", + "\tif d.Nodes != nil {", + "\t\tstr += d.Nodes.String() + \"\\n\"", + "\t}", + "", + "\tif d.CNI != nil {", + "\t\tstr += d.CNI.String() + \"\\n\"", + "\t}", + "", + "\tif d.CSI != nil {", + "\t\tstr += d.CSI.String() + \"\\n\"", + "\t}", + "", + "\tif d.Hardware != nil {", + "\t\tstr += d.Hardware.String() + \"\\n\"", + "\t}", + "", + "\treturn str", + "}" + ] + }, + { + "name": "DiffReport.String", + "kind": "function", + "source": [ + "func (d DiffReport) String() string {", + "\tstr := \"CLUSTER NODES DIFFERENCES\\n\"", + "\tstr += \"-------------------------\\n\\n\"", + "", + "\tif d.Nodes != nil {", + "\t\tstr += d.Nodes.String() + \"\\n\"", + "\t}", + "", + "\tif d.CNI != nil {", + "\t\tstr += d.CNI.String() + \"\\n\"", + "\t}", + "", + "\tif d.CSI != nil {", + "\t\tstr += d.CSI.String() + \"\\n\"", + "\t}", + "", + "\tif d.Hardware != nil {", + "\t\tstr += d.Hardware.String() + \"\\n\"", + "\t}", + "", + "\treturn str", + "}" + ] + }, + { + "name": "DiffReport.String", + "kind": "function", + "source": [ + "func (d DiffReport) String() string {", + "\tstr := \"CLUSTER NODES DIFFERENCES\\n\"", + "\tstr += \"-------------------------\\n\\n\"", + "", + "\tif d.Nodes != nil {", + "\t\tstr += d.Nodes.String() + \"\\n\"", + "\t}", + "", + "\tif d.CNI != nil {", + "\t\tstr += d.CNI.String() + \"\\n\"", + "\t}", + "", + "\tif d.CSI != nil {", + "\t\tstr += d.CSI.String() + \"\\n\"", + "\t}", + "", + "\tif d.Hardware != nil {", + "\t\tstr += d.Hardware.String() + \"\\n\"", + "\t}", + "", + "\treturn str", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/nodes", + "name": "DiffReport.String", + "kind": "function", + "source": [ + "func (d DiffReport) String() string {", + "\tstr := \"CLUSTER NODES DIFFERENCES\\n\"", + "\tstr += \"-------------------------\\n\\n\"", + "", + "\tif d.Nodes != nil {", + "\t\tstr += d.Nodes.String() + \"\\n\"", + "\t}", + "", + "\tif d.CNI != nil {", + "\t\tstr += d.CNI.String() + \"\\n\"", + "\t}", + "", + "\tif d.CSI != nil {", + "\t\tstr += d.CSI.String() + \"\\n\"", + "\t}", + "", + "\tif d.Hardware != nil {", + "\t\tstr += d.Hardware.String() + \"\\n\"", + "\t}", + "", + "\treturn str", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (d DiffReport) String() string {", + "\tstr := \"CLUSTER NODES DIFFERENCES\\n\"", + "\tstr += \"-------------------------\\n\\n\"", + "", + "\tif d.Nodes != nil {", + "\t\tstr += d.Nodes.String() + \"\\n\"", + "\t}", + "", + "\tif d.CNI != nil {", + "\t\tstr += d.CNI.String() + \"\\n\"", + "\t}", + "", + "\tif d.CSI != nil {", + "\t\tstr += d.CSI.String() + \"\\n\"", + "\t}", + "", + "\tif d.Hardware != nil {", + "\t\tstr += d.Hardware.String() + \"\\n\"", + "\t}", + "", + "\treturn str", + "}" + ] + }, + { + "name": "GetDiffReport", + "qualifiedName": "GetDiffReport", + "exported": true, + "signature": "func(*claim.Nodes, *claim.Nodes)(*DiffReport)", + "doc": "GetDiffReport Creates a report of differences between two node claim sets\n\nThis function takes pointers to two node claim structures and returns a\nDiffReport containing four diff objects: Nodes, CNIs, CSIs, and Hardware.\nEach field is produced by calling the Compare helper with appropriate data\nslices and optional filters for labels and annotations. The resulting report\naggregates all differences for downstream display or analysis.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/nodes/nodes.go:58", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff", + "name": "Compare", + "kind": "function", + "source": [ + "func Compare(objectName string, claim1Object, claim2Object interface{}, filters []string) *Diffs {", + "\tobjectsDiffs := Diffs{Name: objectName}", + "", + "\tclaim1Fields := traverse(claim1Object, \"\", filters)", + "\tclaim2Fields := traverse(claim2Object, \"\", filters)", + "", + "\t// Build helper maps, to make it easier to find fields.", + "\tclaim1FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim1Fields {", + "\t\tclaim1FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\tclaim2FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim2Fields {", + "\t\tclaim2FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\t// Start comparing, keeping the original order.", + "\tfor _, claim1Field := range claim1Fields {", + "\t\t// Does the field (path) in claim1 exist in claim2?", + "\t\tif claim2Value, exist := claim2FieldsMap[claim1Field.Path]; exist {", + "\t\t\t// Do they have the same value?", + "\t\t\tif !reflect.DeepEqual(claim1Field.Value, claim2Value) {", + "\t\t\t\tobjectsDiffs.Fields = append(objectsDiffs.Fields, FieldDiff{", + "\t\t\t\t\tFieldPath: claim1Field.Path,", + "\t\t\t\t\tClaim1Value: claim1Field.Value,", + "\t\t\t\t\tClaim2Value: claim2Value})", + "\t\t\t}", + "\t\t} else {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim1Field.Path, claim1Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim1Only = append(objectsDiffs.FieldsInClaim1Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\t// Fields that appear in both claim files have been already checked,", + "\t// so we only need to search fields in claim2 that will not exist in claim 1.", + "\tfor _, claim2Field := range claim2Fields {", + "\t\tif _, exist := claim1FieldsMap[claim2Field.Path]; !exist {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim2Field.Path, claim2Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim2Only = append(objectsDiffs.FieldsInClaim2Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\treturn \u0026objectsDiffs", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff", + "name": "Compare", + "kind": "function", + "source": [ + "func Compare(objectName string, claim1Object, claim2Object interface{}, filters []string) *Diffs {", + "\tobjectsDiffs := Diffs{Name: objectName}", + "", + "\tclaim1Fields := traverse(claim1Object, \"\", filters)", + "\tclaim2Fields := traverse(claim2Object, \"\", filters)", + "", + "\t// Build helper maps, to make it easier to find fields.", + "\tclaim1FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim1Fields {", + "\t\tclaim1FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\tclaim2FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim2Fields {", + "\t\tclaim2FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\t// Start comparing, keeping the original order.", + "\tfor _, claim1Field := range claim1Fields {", + "\t\t// Does the field (path) in claim1 exist in claim2?", + "\t\tif claim2Value, exist := claim2FieldsMap[claim1Field.Path]; exist {", + "\t\t\t// Do they have the same value?", + "\t\t\tif !reflect.DeepEqual(claim1Field.Value, claim2Value) {", + "\t\t\t\tobjectsDiffs.Fields = append(objectsDiffs.Fields, FieldDiff{", + "\t\t\t\t\tFieldPath: claim1Field.Path,", + "\t\t\t\t\tClaim1Value: claim1Field.Value,", + "\t\t\t\t\tClaim2Value: claim2Value})", + "\t\t\t}", + "\t\t} else {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim1Field.Path, claim1Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim1Only = append(objectsDiffs.FieldsInClaim1Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\t// Fields that appear in both claim files have been already checked,", + "\t// so we only need to search fields in claim2 that will not exist in claim 1.", + "\tfor _, claim2Field := range claim2Fields {", + "\t\tif _, exist := claim1FieldsMap[claim2Field.Path]; !exist {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim2Field.Path, claim2Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim2Only = append(objectsDiffs.FieldsInClaim2Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\treturn \u0026objectsDiffs", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff", + "name": "Compare", + "kind": "function", + "source": [ + "func Compare(objectName string, claim1Object, claim2Object interface{}, filters []string) *Diffs {", + "\tobjectsDiffs := Diffs{Name: objectName}", + "", + "\tclaim1Fields := traverse(claim1Object, \"\", filters)", + "\tclaim2Fields := traverse(claim2Object, \"\", filters)", + "", + "\t// Build helper maps, to make it easier to find fields.", + "\tclaim1FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim1Fields {", + "\t\tclaim1FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\tclaim2FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim2Fields {", + "\t\tclaim2FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\t// Start comparing, keeping the original order.", + "\tfor _, claim1Field := range claim1Fields {", + "\t\t// Does the field (path) in claim1 exist in claim2?", + "\t\tif claim2Value, exist := claim2FieldsMap[claim1Field.Path]; exist {", + "\t\t\t// Do they have the same value?", + "\t\t\tif !reflect.DeepEqual(claim1Field.Value, claim2Value) {", + "\t\t\t\tobjectsDiffs.Fields = append(objectsDiffs.Fields, FieldDiff{", + "\t\t\t\t\tFieldPath: claim1Field.Path,", + "\t\t\t\t\tClaim1Value: claim1Field.Value,", + "\t\t\t\t\tClaim2Value: claim2Value})", + "\t\t\t}", + "\t\t} else {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim1Field.Path, claim1Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim1Only = append(objectsDiffs.FieldsInClaim1Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\t// Fields that appear in both claim files have been already checked,", + "\t// so we only need to search fields in claim2 that will not exist in claim 1.", + "\tfor _, claim2Field := range claim2Fields {", + "\t\tif _, exist := claim1FieldsMap[claim2Field.Path]; !exist {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim2Field.Path, claim2Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim2Only = append(objectsDiffs.FieldsInClaim2Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\treturn \u0026objectsDiffs", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff", + "name": "Compare", + "kind": "function", + "source": [ + "func Compare(objectName string, claim1Object, claim2Object interface{}, filters []string) *Diffs {", + "\tobjectsDiffs := Diffs{Name: objectName}", + "", + "\tclaim1Fields := traverse(claim1Object, \"\", filters)", + "\tclaim2Fields := traverse(claim2Object, \"\", filters)", + "", + "\t// Build helper maps, to make it easier to find fields.", + "\tclaim1FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim1Fields {", + "\t\tclaim1FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\tclaim2FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim2Fields {", + "\t\tclaim2FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\t// Start comparing, keeping the original order.", + "\tfor _, claim1Field := range claim1Fields {", + "\t\t// Does the field (path) in claim1 exist in claim2?", + "\t\tif claim2Value, exist := claim2FieldsMap[claim1Field.Path]; exist {", + "\t\t\t// Do they have the same value?", + "\t\t\tif !reflect.DeepEqual(claim1Field.Value, claim2Value) {", + "\t\t\t\tobjectsDiffs.Fields = append(objectsDiffs.Fields, FieldDiff{", + "\t\t\t\t\tFieldPath: claim1Field.Path,", + "\t\t\t\t\tClaim1Value: claim1Field.Value,", + "\t\t\t\t\tClaim2Value: claim2Value})", + "\t\t\t}", + "\t\t} else {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim1Field.Path, claim1Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim1Only = append(objectsDiffs.FieldsInClaim1Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\t// Fields that appear in both claim files have been already checked,", + "\t// so we only need to search fields in claim2 that will not exist in claim 1.", + "\tfor _, claim2Field := range claim2Fields {", + "\t\tif _, exist := claim1FieldsMap[claim2Field.Path]; !exist {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim2Field.Path, claim2Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim2Only = append(objectsDiffs.FieldsInClaim2Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\treturn \u0026objectsDiffs", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare", + "name": "claimCompareFilesfunc", + "kind": "function", + "source": [ + "func claimCompareFilesfunc(claim1, claim2 string) error {", + "\t// readfiles", + "\tclaimdata1, err := os.ReadFile(claim1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim1 file: %v\", err)", + "\t}", + "", + "\tclaimdata2, err := os.ReadFile(claim2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim2 file: %v\", err)", + "\t}", + "", + "\t// unmarshal the files", + "\tclaimFile1Data, err := unmarshalClaimFile(claimdata1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim1 file: %v\", err)", + "\t}", + "", + "\tclaimFile2Data, err := unmarshalClaimFile(claimdata2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim2 file: %v\", err)", + "\t}", + "", + "\t// Compare claim versions.", + "\tversionsDiff := versions.Compare(\u0026claimFile1Data.Claim.Versions, \u0026claimFile2Data.Claim.Versions)", + "\tfmt.Println(versionsDiff)", + "", + "\t// Show test cases results summary and differences.", + "\ttcsDiffReport := testcases.GetDiffReport(claimFile1Data.Claim.Results, claimFile2Data.Claim.Results)", + "\tfmt.Println(tcsDiffReport)", + "", + "\t// Show Certification Suite configuration differences.", + "\tclaim1Configurations := \u0026claimFile1Data.Claim.Configurations", + "\tclaim2Configurations := \u0026claimFile2Data.Claim.Configurations", + "\tconfigurationsDiffReport := configurations.GetDiffReport(claim1Configurations, claim2Configurations)", + "\tfmt.Println(configurationsDiffReport)", + "", + "\t// Show the cluster differences.", + "\tnodesDiff := nodes.GetDiffReport(\u0026claimFile1Data.Claim.Nodes, \u0026claimFile2Data.Claim.Nodes)", + "\tfmt.Print(nodesDiff)", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetDiffReport(claim1Nodes, claim2Nodes *claim.Nodes) *DiffReport {", + "\treturn \u0026DiffReport{", + "\t\tNodes: diff.Compare(\"Nodes\", claim1Nodes.NodesSummary, claim2Nodes.NodesSummary, []string{\"labels\", \"annotations\"}),", + "\t\tCNI: diff.Compare(\"CNIs\", claim1Nodes.CniNetworks, claim2Nodes.CniNetworks, nil),", + "\t\tCSI: diff.Compare(\"CSIs\", claim1Nodes.CsiDriver, claim2Nodes.CsiDriver, nil),", + "\t\tHardware: diff.Compare(\"Hardware\", claim1Nodes.NodesHwInfo, claim2Nodes.NodesHwInfo, nil),", + "\t}", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/testcases", + "name": "testcases", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim", + "sort" + ], + "structs": [ + { + "name": "DiffReport", + "exported": true, + "doc": "DiffReport Summarizes test result differences between two claim files\n\nThis structure holds a summary of passed, skipped, and failed tests for each\nclaim file, along with a list of individual test cases whose outcomes differ.\nIt tracks the total number of differing test cases and provides a string\nrepresentation that lists both the overall status counts and the specific\ndifferences. The data is used to report and compare results between two sets\nof claim executions.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/testcases/testcases.go:41", + "fields": { + "Claim1ResultsSummary": "TcResultsSummary", + "Claim2ResultsSummary": "TcResultsSummary", + "DifferentTestCasesResults": "int", + "TestCases": "[]TcResultDifference" + }, + "methodNames": [ + "String" + ], + "source": [ + "type DiffReport struct {", + "\tClaim1ResultsSummary TcResultsSummary `json:\"claimFile1ResultsSummary\"`", + "\tClaim2ResultsSummary TcResultsSummary `json:\"claimFile2ResultsSummary\"`", + "", + "\tTestCases []TcResultDifference `json:\"resultsDifferences\"`", + "\tDifferentTestCasesResults int `json:\"differentTestCasesResults\"`", + "}" + ] + }, + { + "name": "TcResultDifference", + "exported": true, + "doc": "TcResultDifference Represents a discrepancy between two claim results\n\nThis structure holds the name of a test case along with the outcomes from two\ndifferent claims. By comparing Claim1Result and Claim2Result, users can\nidentify mismatches or confirm consistency across claim evaluations.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/testcases/testcases.go:27", + "fields": { + "Claim1Result": "string", + "Claim2Result": "string", + "Name": "string" + }, + "methodNames": null, + "source": [ + "type TcResultDifference struct {", + "\tName string", + "\tClaim1Result string", + "\tClaim2Result string", + "}" + ] + }, + { + "name": "TcResultsSummary", + "exported": true, + "doc": "TcResultsSummary provides a count of test case outcomes\n\nThis structure holds three integer counters: how many tests passed, were\nskipped, and failed. It is populated by iterating over result strings and\nincrementing the corresponding field. The counts can be used to report\noverall test performance.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/testcases/testcases.go:16", + "fields": { + "Failed": "int", + "Passed": "int", + "Skipped": "int" + }, + "methodNames": null, + "source": [ + "type TcResultsSummary struct {", + "\tPassed int", + "\tSkipped int", + "\tFailed int", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "String", + "qualifiedName": "DiffReport.String", + "exported": true, + "receiver": "DiffReport", + "signature": "func()(string)", + "doc": "DiffReport.String Formats a detailed report of test case comparisons\n\nThe method builds a human‑readable string containing two tables: one\nsummarizing the count of passed, skipped and failed cases for each claim, and\nanother listing individual test cases that differ between the claims. It uses\nformatted printing to align columns and returns the combined text.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/testcases/testcases.go:169", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (r *DiffReport) String() string {", + "\tconst (", + "\t\ttcDiffRowFmt = \"%-60s%-10s%-s\\n\"", + "\t\ttcStatusSummaryRowFmt = \"%-15s%-20s%-s\\n\"", + "\t)", + "", + "\tstr := \"RESULTS SUMMARY\\n\"", + "\tstr += \"---------------\\n\"", + "\tstr += fmt.Sprintf(tcStatusSummaryRowFmt, \"STATUS\", \"# in CLAIM-1\", \"# in CLAIM-2\")", + "\tstr += fmt.Sprintf(tcStatusSummaryRowFmt, \"passed\", fmt.Sprintf(\"%d\", r.Claim1ResultsSummary.Passed), fmt.Sprintf(\"%d\", r.Claim2ResultsSummary.Passed))", + "\tstr += fmt.Sprintf(tcStatusSummaryRowFmt, \"skipped\", fmt.Sprintf(\"%d\", r.Claim1ResultsSummary.Skipped), fmt.Sprintf(\"%d\", r.Claim2ResultsSummary.Skipped))", + "\tstr += fmt.Sprintf(tcStatusSummaryRowFmt, \"failed\", fmt.Sprintf(\"%d\", r.Claim1ResultsSummary.Failed), fmt.Sprintf(\"%d\", r.Claim2ResultsSummary.Failed))", + "\tstr += \"\\n\"", + "", + "\tstr += \"RESULTS DIFFERENCES\\n\"", + "\tstr += \"-------------------\\n\"", + "\tif len(r.TestCases) == 0 {", + "\t\tstr += \"\u003cnone\u003e\\n\"", + "\t\treturn str", + "\t}", + "", + "\tstr += fmt.Sprintf(tcDiffRowFmt, \"TEST CASE NAME\", \"CLAIM-1\", \"CLAIM-2\")", + "\tfor _, diff := range r.TestCases {", + "\t\tstr += fmt.Sprintf(tcDiffRowFmt, diff.Name, diff.Claim1Result, diff.Claim2Result)", + "\t}", + "", + "\treturn str", + "}" + ] + }, + { + "name": "GetDiffReport", + "qualifiedName": "GetDiffReport", + "exported": true, + "signature": "func(claim.TestSuiteResults, claim.TestSuiteResults)(*DiffReport)", + "doc": "GetDiffReport Creates a report of differences between two sets of test results\n\nThe function compares test case outcomes from two claim files, marking any\nmissing cases as \"not found\". It builds a list of differing results, counts\nthe number of discrepancies, and summarizes each claim’s passed, skipped,\nand failed totals. The returned DiffReport contains this information for\nfurther analysis.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/testcases/testcases.go:122", + "calls": [ + { + "name": "getTestCasesResultsMap", + "kind": "function", + "source": [ + "func getTestCasesResultsMap(testSuiteResults claim.TestSuiteResults) map[string]string {", + "\ttestCaseResults := map[string]string{}", + "", + "\tfor testCase := range testSuiteResults {", + "\t\ttestCaseResults[testSuiteResults[testCase].TestID.ID] = testSuiteResults[testCase].State", + "\t}", + "", + "\treturn testCaseResults", + "}" + ] + }, + { + "name": "getTestCasesResultsMap", + "kind": "function", + "source": [ + "func getTestCasesResultsMap(testSuiteResults claim.TestSuiteResults) map[string]string {", + "\ttestCaseResults := map[string]string{}", + "", + "\tfor testCase := range testSuiteResults {", + "\t\ttestCaseResults[testSuiteResults[testCase].TestID.ID] = testSuiteResults[testCase].State", + "\t}", + "", + "\treturn testCaseResults", + "}" + ] + }, + { + "name": "getMergedTestCasesNames", + "kind": "function", + "source": [ + "func getMergedTestCasesNames(results1, results2 map[string]string) []string {", + "\ttestCasesNamesMap := map[string]struct{}{}", + "", + "\tfor name := range results1 {", + "\t\ttestCasesNamesMap[name] = struct{}{}", + "\t}", + "", + "\tfor name := range results2 {", + "\t\ttestCasesNamesMap[name] = struct{}{}", + "\t}", + "", + "\t// get the full list of names and sort it", + "\tnames := []string{}", + "\tfor name := range testCasesNamesMap {", + "\t\tnames = append(names, name)", + "\t}", + "", + "\tsort.Strings(names)", + "\treturn names", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "getTestCasesResultsSummary", + "kind": "function", + "source": [ + "func getTestCasesResultsSummary(results map[string]string) TcResultsSummary {", + "\tsummary := TcResultsSummary{}", + "", + "\tfor _, result := range results {", + "\t\tswitch result {", + "\t\tcase claim.TestCaseResultPassed:", + "\t\t\tsummary.Passed++", + "\t\tcase claim.TestCaseResultSkipped:", + "\t\t\tsummary.Skipped++", + "\t\tcase claim.TestCaseResultFailed:", + "\t\t\tsummary.Failed++", + "\t\t}", + "\t}", + "", + "\treturn summary", + "}" + ] + }, + { + "name": "getTestCasesResultsSummary", + "kind": "function", + "source": [ + "func getTestCasesResultsSummary(results map[string]string) TcResultsSummary {", + "\tsummary := TcResultsSummary{}", + "", + "\tfor _, result := range results {", + "\t\tswitch result {", + "\t\tcase claim.TestCaseResultPassed:", + "\t\t\tsummary.Passed++", + "\t\tcase claim.TestCaseResultSkipped:", + "\t\t\tsummary.Skipped++", + "\t\tcase claim.TestCaseResultFailed:", + "\t\t\tsummary.Failed++", + "\t\t}", + "\t}", + "", + "\treturn summary", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare", + "name": "claimCompareFilesfunc", + "kind": "function", + "source": [ + "func claimCompareFilesfunc(claim1, claim2 string) error {", + "\t// readfiles", + "\tclaimdata1, err := os.ReadFile(claim1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim1 file: %v\", err)", + "\t}", + "", + "\tclaimdata2, err := os.ReadFile(claim2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim2 file: %v\", err)", + "\t}", + "", + "\t// unmarshal the files", + "\tclaimFile1Data, err := unmarshalClaimFile(claimdata1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim1 file: %v\", err)", + "\t}", + "", + "\tclaimFile2Data, err := unmarshalClaimFile(claimdata2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim2 file: %v\", err)", + "\t}", + "", + "\t// Compare claim versions.", + "\tversionsDiff := versions.Compare(\u0026claimFile1Data.Claim.Versions, \u0026claimFile2Data.Claim.Versions)", + "\tfmt.Println(versionsDiff)", + "", + "\t// Show test cases results summary and differences.", + "\ttcsDiffReport := testcases.GetDiffReport(claimFile1Data.Claim.Results, claimFile2Data.Claim.Results)", + "\tfmt.Println(tcsDiffReport)", + "", + "\t// Show Certification Suite configuration differences.", + "\tclaim1Configurations := \u0026claimFile1Data.Claim.Configurations", + "\tclaim2Configurations := \u0026claimFile2Data.Claim.Configurations", + "\tconfigurationsDiffReport := configurations.GetDiffReport(claim1Configurations, claim2Configurations)", + "\tfmt.Println(configurationsDiffReport)", + "", + "\t// Show the cluster differences.", + "\tnodesDiff := nodes.GetDiffReport(\u0026claimFile1Data.Claim.Nodes, \u0026claimFile2Data.Claim.Nodes)", + "\tfmt.Print(nodesDiff)", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetDiffReport(resultsClaim1, resultsClaim2 claim.TestSuiteResults) *DiffReport {", + "\tconst tcResultNotFound = \"not found\"", + "", + "\treport := DiffReport{}", + "", + "\tclaim1Results := getTestCasesResultsMap(resultsClaim1)", + "\tclaim2Results := getTestCasesResultsMap(resultsClaim2)", + "", + "\ttcNames := getMergedTestCasesNames(claim1Results, claim2Results)", + "", + "\treport.TestCases = []TcResultDifference{}", + "\tfor _, name := range tcNames {", + "\t\tclaim1TcResult, found := claim1Results[name]", + "\t\tif !found {", + "\t\t\tclaim1TcResult = tcResultNotFound", + "\t\t}", + "", + "\t\tclaim2TcResult, found := claim2Results[name]", + "\t\tif !found {", + "\t\t\tclaim2TcResult = tcResultNotFound", + "\t\t}", + "", + "\t\tif claim1TcResult == claim2TcResult \u0026\u0026 claim1TcResult != tcResultNotFound {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\treport.TestCases = append(report.TestCases, TcResultDifference{", + "\t\t\tName: name,", + "\t\t\tClaim1Result: claim1TcResult,", + "\t\t\tClaim2Result: claim2TcResult,", + "\t\t})", + "", + "\t\treport.DifferentTestCasesResults++", + "\t}", + "", + "\treport.Claim1ResultsSummary = getTestCasesResultsSummary(claim1Results)", + "\treport.Claim2ResultsSummary = getTestCasesResultsSummary(claim2Results)", + "", + "\treturn \u0026report", + "}" + ] + }, + { + "name": "getMergedTestCasesNames", + "qualifiedName": "getMergedTestCasesNames", + "exported": false, + "signature": "func(map[string]string, map[string]string)([]string)", + "doc": "getMergedTestCasesNames Collects all unique test case names from two result maps\n\nThe function iterates over each input map, adding every key to a temporary\nset to eliminate duplicates. After gathering the keys, it converts the set\ninto a slice and sorts the entries alphabetically. The sorted list of test\ncase names is returned for further processing.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/testcases/testcases.go:71", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Strings", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/testcases", + "name": "GetDiffReport", + "kind": "function", + "source": [ + "func GetDiffReport(resultsClaim1, resultsClaim2 claim.TestSuiteResults) *DiffReport {", + "\tconst tcResultNotFound = \"not found\"", + "", + "\treport := DiffReport{}", + "", + "\tclaim1Results := getTestCasesResultsMap(resultsClaim1)", + "\tclaim2Results := getTestCasesResultsMap(resultsClaim2)", + "", + "\ttcNames := getMergedTestCasesNames(claim1Results, claim2Results)", + "", + "\treport.TestCases = []TcResultDifference{}", + "\tfor _, name := range tcNames {", + "\t\tclaim1TcResult, found := claim1Results[name]", + "\t\tif !found {", + "\t\t\tclaim1TcResult = tcResultNotFound", + "\t\t}", + "", + "\t\tclaim2TcResult, found := claim2Results[name]", + "\t\tif !found {", + "\t\t\tclaim2TcResult = tcResultNotFound", + "\t\t}", + "", + "\t\tif claim1TcResult == claim2TcResult \u0026\u0026 claim1TcResult != tcResultNotFound {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\treport.TestCases = append(report.TestCases, TcResultDifference{", + "\t\t\tName: name,", + "\t\t\tClaim1Result: claim1TcResult,", + "\t\t\tClaim2Result: claim2TcResult,", + "\t\t})", + "", + "\t\treport.DifferentTestCasesResults++", + "\t}", + "", + "\treport.Claim1ResultsSummary = getTestCasesResultsSummary(claim1Results)", + "\treport.Claim2ResultsSummary = getTestCasesResultsSummary(claim2Results)", + "", + "\treturn \u0026report", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getMergedTestCasesNames(results1, results2 map[string]string) []string {", + "\ttestCasesNamesMap := map[string]struct{}{}", + "", + "\tfor name := range results1 {", + "\t\ttestCasesNamesMap[name] = struct{}{}", + "\t}", + "", + "\tfor name := range results2 {", + "\t\ttestCasesNamesMap[name] = struct{}{}", + "\t}", + "", + "\t// get the full list of names and sort it", + "\tnames := []string{}", + "\tfor name := range testCasesNamesMap {", + "\t\tnames = append(names, name)", + "\t}", + "", + "\tsort.Strings(names)", + "\treturn names", + "}" + ] + }, + { + "name": "getTestCasesResultsMap", + "qualifiedName": "getTestCasesResultsMap", + "exported": false, + "signature": "func(claim.TestSuiteResults)(map[string]string)", + "doc": "getTestCasesResultsMap Creates a map from test case identifiers to their execution state\n\nThis helper traverses the provided test suite results, extracting each test\ncase's unique ID and its current . It builds a string-to-string mapping where\nkeys are the IDs and values are the states. The resulting map is used by\nother functions to compare outcomes between different claim results.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/testcases/testcases.go:55", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/testcases", + "name": "GetDiffReport", + "kind": "function", + "source": [ + "func GetDiffReport(resultsClaim1, resultsClaim2 claim.TestSuiteResults) *DiffReport {", + "\tconst tcResultNotFound = \"not found\"", + "", + "\treport := DiffReport{}", + "", + "\tclaim1Results := getTestCasesResultsMap(resultsClaim1)", + "\tclaim2Results := getTestCasesResultsMap(resultsClaim2)", + "", + "\ttcNames := getMergedTestCasesNames(claim1Results, claim2Results)", + "", + "\treport.TestCases = []TcResultDifference{}", + "\tfor _, name := range tcNames {", + "\t\tclaim1TcResult, found := claim1Results[name]", + "\t\tif !found {", + "\t\t\tclaim1TcResult = tcResultNotFound", + "\t\t}", + "", + "\t\tclaim2TcResult, found := claim2Results[name]", + "\t\tif !found {", + "\t\t\tclaim2TcResult = tcResultNotFound", + "\t\t}", + "", + "\t\tif claim1TcResult == claim2TcResult \u0026\u0026 claim1TcResult != tcResultNotFound {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\treport.TestCases = append(report.TestCases, TcResultDifference{", + "\t\t\tName: name,", + "\t\t\tClaim1Result: claim1TcResult,", + "\t\t\tClaim2Result: claim2TcResult,", + "\t\t})", + "", + "\t\treport.DifferentTestCasesResults++", + "\t}", + "", + "\treport.Claim1ResultsSummary = getTestCasesResultsSummary(claim1Results)", + "\treport.Claim2ResultsSummary = getTestCasesResultsSummary(claim2Results)", + "", + "\treturn \u0026report", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getTestCasesResultsMap(testSuiteResults claim.TestSuiteResults) map[string]string {", + "\ttestCaseResults := map[string]string{}", + "", + "\tfor testCase := range testSuiteResults {", + "\t\ttestCaseResults[testSuiteResults[testCase].TestID.ID] = testSuiteResults[testCase].State", + "\t}", + "", + "\treturn testCaseResults", + "}" + ] + }, + { + "name": "getTestCasesResultsSummary", + "qualifiedName": "getTestCasesResultsSummary", + "exported": false, + "signature": "func(map[string]string)(TcResultsSummary)", + "doc": "getTestCasesResultsSummary Aggregates test case results into a summary count\n\nThe function iterates over a mapping of test case names to result strings and\ntallies the number of passed, skipped, and failed cases. It increments\ncounters in a TcResultsSummary structure based on predefined result\nconstants. The populated summary is then returned for use elsewhere.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/testcases/testcases.go:98", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/testcases", + "name": "GetDiffReport", + "kind": "function", + "source": [ + "func GetDiffReport(resultsClaim1, resultsClaim2 claim.TestSuiteResults) *DiffReport {", + "\tconst tcResultNotFound = \"not found\"", + "", + "\treport := DiffReport{}", + "", + "\tclaim1Results := getTestCasesResultsMap(resultsClaim1)", + "\tclaim2Results := getTestCasesResultsMap(resultsClaim2)", + "", + "\ttcNames := getMergedTestCasesNames(claim1Results, claim2Results)", + "", + "\treport.TestCases = []TcResultDifference{}", + "\tfor _, name := range tcNames {", + "\t\tclaim1TcResult, found := claim1Results[name]", + "\t\tif !found {", + "\t\t\tclaim1TcResult = tcResultNotFound", + "\t\t}", + "", + "\t\tclaim2TcResult, found := claim2Results[name]", + "\t\tif !found {", + "\t\t\tclaim2TcResult = tcResultNotFound", + "\t\t}", + "", + "\t\tif claim1TcResult == claim2TcResult \u0026\u0026 claim1TcResult != tcResultNotFound {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\treport.TestCases = append(report.TestCases, TcResultDifference{", + "\t\t\tName: name,", + "\t\t\tClaim1Result: claim1TcResult,", + "\t\t\tClaim2Result: claim2TcResult,", + "\t\t})", + "", + "\t\treport.DifferentTestCasesResults++", + "\t}", + "", + "\treport.Claim1ResultsSummary = getTestCasesResultsSummary(claim1Results)", + "\treport.Claim2ResultsSummary = getTestCasesResultsSummary(claim2Results)", + "", + "\treturn \u0026report", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getTestCasesResultsSummary(results map[string]string) TcResultsSummary {", + "\tsummary := TcResultsSummary{}", + "", + "\tfor _, result := range results {", + "\t\tswitch result {", + "\t\tcase claim.TestCaseResultPassed:", + "\t\t\tsummary.Passed++", + "\t\tcase claim.TestCaseResultSkipped:", + "\t\t\tsummary.Skipped++", + "\t\tcase claim.TestCaseResultFailed:", + "\t\t\tsummary.Failed++", + "\t\t}", + "\t}", + "", + "\treturn summary", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/versions", + "name": "versions", + "files": 1, + "imports": [ + "encoding/json", + "github.com/redhat-best-practices-for-k8s/certsuite-claim/pkg/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff", + "log" + ], + "structs": [ + { + "name": "DiffReport", + "exported": true, + "doc": "DiffReport Represents the differences between two claim versions\n\nThis struct holds a pointer to a diff.Diffs object that captures all detected\nchanges when comparing two sets of claim versions. The String method formats\nthose differences into a human-readable string, or returns an empty\nrepresentation if no differences exist.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/versions/versions.go:17", + "fields": { + "Diffs": "*diff.Diffs" + }, + "methodNames": [ + "String" + ], + "source": [ + "type DiffReport struct {", + "\tDiffs *diff.Diffs `json:\"differences\"`", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "Compare", + "qualifiedName": "Compare", + "exported": true, + "signature": "func(*officialClaimScheme.Versions, *officialClaimScheme.Versions)(*DiffReport)", + "doc": "Compare compares two claim version structures\n\nThe function serializes each versions object to JSON, then unmarshals them\ninto generic interface values so they can be compared by the diff package. It\nreturns a report containing differences between the two sets of versions.\nErrors during marshaling or unmarshaling cause the program to log a fatal\nmessage.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/versions/versions.go:43", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "Marshal", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Marshal", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff", + "name": "Compare", + "kind": "function", + "source": [ + "func Compare(objectName string, claim1Object, claim2Object interface{}, filters []string) *Diffs {", + "\tobjectsDiffs := Diffs{Name: objectName}", + "", + "\tclaim1Fields := traverse(claim1Object, \"\", filters)", + "\tclaim2Fields := traverse(claim2Object, \"\", filters)", + "", + "\t// Build helper maps, to make it easier to find fields.", + "\tclaim1FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim1Fields {", + "\t\tclaim1FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\tclaim2FieldsMap := map[string]interface{}{}", + "\tfor _, field := range claim2Fields {", + "\t\tclaim2FieldsMap[field.Path] = field.Value", + "\t}", + "", + "\t// Start comparing, keeping the original order.", + "\tfor _, claim1Field := range claim1Fields {", + "\t\t// Does the field (path) in claim1 exist in claim2?", + "\t\tif claim2Value, exist := claim2FieldsMap[claim1Field.Path]; exist {", + "\t\t\t// Do they have the same value?", + "\t\t\tif !reflect.DeepEqual(claim1Field.Value, claim2Value) {", + "\t\t\t\tobjectsDiffs.Fields = append(objectsDiffs.Fields, FieldDiff{", + "\t\t\t\t\tFieldPath: claim1Field.Path,", + "\t\t\t\t\tClaim1Value: claim1Field.Value,", + "\t\t\t\t\tClaim2Value: claim2Value})", + "\t\t\t}", + "\t\t} else {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim1Field.Path, claim1Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim1Only = append(objectsDiffs.FieldsInClaim1Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\t// Fields that appear in both claim files have been already checked,", + "\t// so we only need to search fields in claim2 that will not exist in claim 1.", + "\tfor _, claim2Field := range claim2Fields {", + "\t\tif _, exist := claim1FieldsMap[claim2Field.Path]; !exist {", + "\t\t\tfieldAndValue := fmt.Sprintf(\"%s=%v\", claim2Field.Path, claim2Field.Value)", + "\t\t\tobjectsDiffs.FieldsInClaim2Only = append(objectsDiffs.FieldsInClaim2Only, fieldAndValue)", + "\t\t}", + "\t}", + "", + "\treturn \u0026objectsDiffs", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare", + "name": "claimCompareFilesfunc", + "kind": "function", + "source": [ + "func claimCompareFilesfunc(claim1, claim2 string) error {", + "\t// readfiles", + "\tclaimdata1, err := os.ReadFile(claim1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim1 file: %v\", err)", + "\t}", + "", + "\tclaimdata2, err := os.ReadFile(claim2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed reading claim2 file: %v\", err)", + "\t}", + "", + "\t// unmarshal the files", + "\tclaimFile1Data, err := unmarshalClaimFile(claimdata1)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim1 file: %v\", err)", + "\t}", + "", + "\tclaimFile2Data, err := unmarshalClaimFile(claimdata2)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal claim2 file: %v\", err)", + "\t}", + "", + "\t// Compare claim versions.", + "\tversionsDiff := versions.Compare(\u0026claimFile1Data.Claim.Versions, \u0026claimFile2Data.Claim.Versions)", + "\tfmt.Println(versionsDiff)", + "", + "\t// Show test cases results summary and differences.", + "\ttcsDiffReport := testcases.GetDiffReport(claimFile1Data.Claim.Results, claimFile2Data.Claim.Results)", + "\tfmt.Println(tcsDiffReport)", + "", + "\t// Show Certification Suite configuration differences.", + "\tclaim1Configurations := \u0026claimFile1Data.Claim.Configurations", + "\tclaim2Configurations := \u0026claimFile2Data.Claim.Configurations", + "\tconfigurationsDiffReport := configurations.GetDiffReport(claim1Configurations, claim2Configurations)", + "\tfmt.Println(configurationsDiffReport)", + "", + "\t// Show the cluster differences.", + "\tnodesDiff := nodes.GetDiffReport(\u0026claimFile1Data.Claim.Nodes, \u0026claimFile2Data.Claim.Nodes)", + "\tfmt.Print(nodesDiff)", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Compare(claim1Versions, claim2Versions *officialClaimScheme.Versions) *DiffReport {", + "\t// Convert the versions struct type to agnostic map[string]interface{} objects so", + "\t// it can be compared using the diff.Compare func.", + "", + "\tbytes1, err := json.Marshal(claim1Versions)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to marshal versions from claim 1: %v\\nq\", err)", + "\t}", + "", + "\tbytes2, err := json.Marshal(claim2Versions)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to marshal versions from claim 2: %v\\n\", err)", + "\t}", + "", + "\t// Now let's unmarshal them into interface{} vars", + "\tvar v1, v2 interface{}", + "\terr = json.Unmarshal(bytes1, \u0026v1)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to unmarshal versions from claim 1: %v\\n\", err)", + "\t}", + "", + "\terr = json.Unmarshal(bytes2, \u0026v2)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to unmarshal versions from claim 2: %v\\n\", err)", + "\t}", + "", + "\treturn \u0026DiffReport{", + "\t\tDiffs: diff.Compare(\"VERSIONS\", v1, v2, nil),", + "\t}", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "DiffReport.String", + "exported": true, + "receiver": "DiffReport", + "signature": "func()(string)", + "doc": "DiffReport.String Returns a formatted string representation of the diff report\n\nWhen called on a DiffReport instance, this method checks if its internal\nDiffs field is nil. If it is, it creates an empty Diffs object and returns\nits string form; otherwise, it delegates to the existing Diffs object's\nString method. The resulting string summarizes the differences captured by\nthe report.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/compare/versions/versions.go:28", + "calls": [ + { + "name": "DiffReport.String", + "kind": "function", + "source": [ + "func (d *DiffReport) String() string {", + "\tif d.Diffs == nil {", + "\t\treturn (\u0026diff.Diffs{}).String()", + "\t}", + "", + "\treturn d.Diffs.String()", + "}" + ] + }, + { + "name": "DiffReport.String", + "kind": "function", + "source": [ + "func (d *DiffReport) String() string {", + "\tif d.Diffs == nil {", + "\t\treturn (\u0026diff.Diffs{}).String()", + "\t}", + "", + "\treturn d.Diffs.String()", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/versions", + "name": "DiffReport.String", + "kind": "function", + "source": [ + "func (d *DiffReport) String() string {", + "\tif d.Diffs == nil {", + "\t\treturn (\u0026diff.Diffs{}).String()", + "\t}", + "", + "\treturn d.Diffs.String()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (d *DiffReport) String() string {", + "\tif d.Diffs == nil {", + "\t\treturn (\u0026diff.Diffs{}).String()", + "\t}", + "", + "\treturn d.Diffs.String()", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show", + "name": "show", + "files": 1, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/csv", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures", + "github.com/spf13/cobra" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates the show command with its subcommands\n\nThis function constructs a Cobra command responsible for displaying claim\ninformation. It registers two child commands—one that shows failures and\nanother that outputs CSV dumps—by adding them to the parent command before\nreturning it. The returned command can then be integrated into the larger CLI\nhierarchy.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/show.go:23", + "calls": [ + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tshowFailuresCommand.Flags().StringVarP(\u0026claimFilePathFlag, \"claim\", \"c\", \"\",", + "\t\t\"Required: Existing claim file path.\",", + "\t)", + "", + "\terr := showFailuresCommand.MarkFlagRequired(\"claim\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to mark claim file path as required parameter: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// This command accepts a (optional) list of comma separated suite to filter the", + "\t// output. Only the failures from those test suites will be printed.", + "\tshowFailuresCommand.Flags().StringVarP(\u0026testSuitesFlag, \"testsuites\", \"s\", \"\",", + "\t\t\"Optional: comma separated list of test suites names whose failures will be shown.\",", + "\t)", + "", + "\t// The format of the output can be changed. Default is plain text, but it can also print", + "\t// it in json format.", + "\tshowFailuresCommand.Flags().StringVarP(\u0026outputFormatFlag, \"output\", \"o\", outputFormatText,", + "\t\tfmt.Sprintf(\"Optional: output format. Available formats: %v\", availableOutputFormats),", + "\t)", + "", + "\treturn showFailuresCommand", + "}" + ] + }, + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/csv", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tCSVDumpCommand.Flags().StringVarP(\u0026claimFilePathFlag, \"claim-file\", \"c\", \"\",", + "\t\t\"Required: path to claim file.\",", + "\t)", + "", + "\terr := CSVDumpCommand.MarkFlagRequired(\"claim-file\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to mark claim file path as required parameter: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\tCSVDumpCommand.Flags().StringVarP(\u0026CNFNameFlag, \"cnf-name\", \"n\", \"\",", + "\t\t\"Required: CNF name.\",", + "\t)", + "", + "\terr = CSVDumpCommand.MarkFlagRequired(\"cnf-name\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to mark CNF name as required parameter: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\tCSVDumpCommand.Flags().StringVarP(\u0026CNFListFilePathFlag, \"cnf-type\", \"t\", \"\",", + "\t\t\"Required: path to JSON file mapping CNF name to CNF type.\",", + "\t)", + "", + "\terr = CSVDumpCommand.MarkFlagRequired(\"cnf-type\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to mark CNF type JSON path as required parameter: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\tCSVDumpCommand.Flags().BoolVarP(\u0026addHeaderFlag, \"add-header\", \"a\", false,", + "\t\t\"Optional: if present, adds a header to the CSV file\",", + "\t)", + "", + "\treturn CSVDumpCommand", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tclaimCommand.AddCommand(compare.NewCommand())", + "\tclaimCommand.AddCommand(show.NewCommand())", + "", + "\treturn claimCommand", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tshowCommand.AddCommand(failures.NewCommand())", + "\tshowCommand.AddCommand(csv.NewCommand())", + "\treturn showCommand", + "}" + ] + } + ], + "globals": [ + { + "name": "showCommand", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/show.go:10" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/csv", + "name": "csv", + "files": 1, + "imports": [ + "encoding/csv", + "encoding/json", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite-claim/pkg/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "github.com/spf13/cobra", + "io", + "log", + "os" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates a command for exporting claim data to CSV\n\nThis function configures a command with required flags for the claim file\npath, CNF name, and CNF type mapping file, as well as an optional flag to\ninclude a header row. It marks each required flag, handling any errors by\nlogging a fatal message. The configured command is then returned for use in\nthe CLI.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/csv/csv.go:54", + "calls": [ + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "MarkFlagRequired", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "MarkFlagRequired", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "MarkFlagRequired", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "name": "BoolVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tshowCommand.AddCommand(failures.NewCommand())", + "\tshowCommand.AddCommand(csv.NewCommand())", + "\treturn showCommand", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tCSVDumpCommand.Flags().StringVarP(\u0026claimFilePathFlag, \"claim-file\", \"c\", \"\",", + "\t\t\"Required: path to claim file.\",", + "\t)", + "", + "\terr := CSVDumpCommand.MarkFlagRequired(\"claim-file\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to mark claim file path as required parameter: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\tCSVDumpCommand.Flags().StringVarP(\u0026CNFNameFlag, \"cnf-name\", \"n\", \"\",", + "\t\t\"Required: CNF name.\",", + "\t)", + "", + "\terr = CSVDumpCommand.MarkFlagRequired(\"cnf-name\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to mark CNF name as required parameter: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\tCSVDumpCommand.Flags().StringVarP(\u0026CNFListFilePathFlag, \"cnf-type\", \"t\", \"\",", + "\t\t\"Required: path to JSON file mapping CNF name to CNF type.\",", + "\t)", + "", + "\terr = CSVDumpCommand.MarkFlagRequired(\"cnf-type\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to mark CNF type JSON path as required parameter: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\tCSVDumpCommand.Flags().BoolVarP(\u0026addHeaderFlag, \"add-header\", \"a\", false,", + "\t\t\"Optional: if present, adds a header to the CSV file\",", + "\t)", + "", + "\treturn CSVDumpCommand", + "}" + ] + }, + { + "name": "buildCSV", + "qualifiedName": "buildCSV", + "exported": false, + "signature": "func(*claim.Schema, string, map[string]claimschema.TestCaseDescription)([][]string)", + "doc": "buildCSV Creates CSV rows from claim data with remediation, CNF type, and optional header\n\nIt iterates over each test result in the claim schema, building a record that\nincludes operator versions, test identifiers, suite names, descriptions,\nstates, timestamps, skip reasons, check details, captured output, remediation\nactions, CNF type, and mandatory/optional status. If a header flag is set, a\nheader row is added first. The function returns a slice of string slices\nready for CSV writing.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/csv/csv.go:158", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/csv", + "name": "dumpCsv", + "kind": "function", + "source": [ + "func dumpCsv(_ *cobra.Command, _ []string) error {", + "\t// set log output to stderr", + "\tlog.SetOutput(os.Stderr)", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// loads the mapping between CNF name and type", + "\tCNFTypeMap, err := loadCNFTypeMap(CNFListFilePathFlag)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to load CNF type map (%s): %v\", CNFListFilePathFlag, err)", + "\t\treturn nil", + "\t}", + "", + "\t// builds a catalog map indexed by test ID", + "\tcatalogMap := buildCatalogByID()", + "", + "\t// get CNF type", + "\tcnfType := CNFTypeMap[CNFNameFlag]", + "", + "\t// builds CSV file", + "\tresultsCsv := buildCSV(claimScheme, cnfType, catalogMap)", + "", + "\t// initializes CSV writer", + "\twriter := csv.NewWriter(os.Stdout)", + "", + "\t// writes all CSV records", + "\terr = writer.WriteAll(resultsCsv)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to write results CSV to screen, err: %s\", err)", + "\t\treturn nil", + "\t}", + "\t// flushes buffer to screen", + "\twriter.Flush()", + "\t// Check for any writing errors", + "\tif err := writer.Error(); err != nil {", + "\t\tpanic(err)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func buildCSV(claimScheme *claim.Schema, cnfType string, catalogMap map[string]claimschema.TestCaseDescription) (resultsCSVRecords [][]string) {", + "\tif cnfType == \"\" {", + "\t\tcnfType = identifiers.NonTelco", + "\t}", + "", + "\t// add header if flag is present (defaults to no header)", + "\tif addHeaderFlag {", + "\t\tresultsCSVRecords = append(resultsCSVRecords, []string{", + "\t\t\t\"CNFName\", \"OperatorVersion\", \"testID\", \"Suite\",", + "\t\t\t\"Description\", \"State\",", + "\t\t\t\"StartTime\", \"EndTime\",", + "\t\t\t\"SkipReason\", \"CheckDetails\", \"Output\",", + "\t\t\t\"Remediation\", \"CNFType\",", + "\t\t\t\"Mandatory/Optional\",", + "\t\t})", + "\t}", + "", + "\topVers := \"\"", + "\tfor i, op := range claimScheme.Claim.TestOperators {", + "\t\tif i == 0 {", + "\t\t\topVers = op.Version", + "\t\t} else {", + "\t\t\topVers = opVers + \", \" + op.Version", + "\t\t}", + "\t}", + "", + "\tfor testID := range claimScheme.Claim.Results {", + "\t\t// initialize record", + "\t\trecord := []string{}", + "\t\t// creates and appends new CSV record", + "\t\trecord = append(record,", + "\t\t\tCNFNameFlag,", + "\t\t\topVers,", + "\t\t\ttestID,", + "\t\t\tclaimScheme.Claim.Results[testID].TestID.Suite,", + "\t\t\tclaimScheme.Claim.Results[testID].CatalogInfo.Description,", + "\t\t\tclaimScheme.Claim.Results[testID].State,", + "\t\t\tclaimScheme.Claim.Results[testID].StartTime,", + "\t\t\tclaimScheme.Claim.Results[testID].EndTime,", + "\t\t\tclaimScheme.Claim.Results[testID].SkipReason,", + "\t\t\tclaimScheme.Claim.Results[testID].CheckDetails,", + "\t\t\tclaimScheme.Claim.Results[testID].CapturedTestOutput,", + "\t\t\tcatalogMap[testID].Remediation,", + "\t\t\tcnfType, // Append the CNF type", + "\t\t\tclaimScheme.Claim.Results[testID].CategoryClassification[cnfType],", + "\t\t)", + "", + "\t\tresultsCSVRecords = append(resultsCSVRecords, record)", + "\t}", + "\treturn resultsCSVRecords", + "}" + ] + }, + { + "name": "buildCatalogByID", + "qualifiedName": "buildCatalogByID", + "exported": false, + "signature": "func()(map[string]claimschema.TestCaseDescription)", + "doc": "buildCatalogByID Creates a map of test case descriptions keyed by ID\n\nIt initializes an empty mapping, then iterates over the global catalog\ncollection, inserting each entry into the map using its identifier as the\nkey. The resulting map is returned for quick lookup of test cases by their\nunique IDs.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/csv/csv.go:248", + "calls": [ + { + "name": "make", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/csv", + "name": "dumpCsv", + "kind": "function", + "source": [ + "func dumpCsv(_ *cobra.Command, _ []string) error {", + "\t// set log output to stderr", + "\tlog.SetOutput(os.Stderr)", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// loads the mapping between CNF name and type", + "\tCNFTypeMap, err := loadCNFTypeMap(CNFListFilePathFlag)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to load CNF type map (%s): %v\", CNFListFilePathFlag, err)", + "\t\treturn nil", + "\t}", + "", + "\t// builds a catalog map indexed by test ID", + "\tcatalogMap := buildCatalogByID()", + "", + "\t// get CNF type", + "\tcnfType := CNFTypeMap[CNFNameFlag]", + "", + "\t// builds CSV file", + "\tresultsCsv := buildCSV(claimScheme, cnfType, catalogMap)", + "", + "\t// initializes CSV writer", + "\twriter := csv.NewWriter(os.Stdout)", + "", + "\t// writes all CSV records", + "\terr = writer.WriteAll(resultsCsv)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to write results CSV to screen, err: %s\", err)", + "\t\treturn nil", + "\t}", + "\t// flushes buffer to screen", + "\twriter.Flush()", + "\t// Check for any writing errors", + "\tif err := writer.Error(); err != nil {", + "\t\tpanic(err)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func buildCatalogByID() (catalogMap map[string]claimschema.TestCaseDescription) {", + "\tcatalogMap = make(map[string]claimschema.TestCaseDescription)", + "", + "\tfor index := range identifiers.Catalog {", + "\t\tcatalogMap[index.Id] = identifiers.Catalog[index]", + "\t}", + "\treturn catalogMap", + "}" + ] + }, + { + "name": "dumpCsv", + "qualifiedName": "dumpCsv", + "exported": false, + "signature": "func(*cobra.Command, []string)(error)", + "doc": "dumpCsv Exports claim results to CSV format\n\nThis function parses a claim file, validates its version, loads CNF type\nmappings, builds a catalog map, and then constructs CSV records for each test\nresult. It writes the assembled data to standard output using a CSV writer,\nhandling any errors that occur during parsing or writing. The function\nreturns nil on success or an error describing what failed.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/csv/csv.go:99", + "calls": [ + { + "pkgPath": "log", + "name": "SetOutput", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim", + "name": "Parse", + "kind": "function", + "source": [ + "func Parse(filePath string) (*Schema, error) {", + "\tfileBytes, err := os.ReadFile(filePath)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failure reading file: %v\", err)", + "\t}", + "", + "\tclaimFile := Schema{}", + "\terr = json.Unmarshal(fileBytes, \u0026claimFile)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to unmarshal file: %v\", err)", + "\t}", + "", + "\treturn \u0026claimFile, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim", + "name": "CheckVersion", + "kind": "function", + "source": [ + "func CheckVersion(version string) error {", + "\tclaimSemVersion, err := semver.NewVersion(version)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"claim file version %q is not valid: %v\", version, err)", + "\t}", + "", + "\tsupportedSemVersion, err := semver.NewVersion(supportedClaimFormatVersion)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"supported claim file version v%v is not valid: v%v\", supportedClaimFormatVersion, err)", + "\t}", + "", + "\tif claimSemVersion.Compare(supportedSemVersion) != 0 {", + "\t\treturn fmt.Errorf(\"claim format version v%v is not supported. Supported version is v%v\",", + "\t\t\tclaimSemVersion, supportedSemVersion)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "loadCNFTypeMap", + "kind": "function", + "source": [ + "func loadCNFTypeMap(path string) (CNFTypeMap map[string]string, err error) { //nolint:gocritic // CNF is a valid acronym", + "\t// Open the CSV file", + "\tfile, err := os.Open(path)", + "\tif err != nil {", + "\t\treturn CNFTypeMap, fmt.Errorf(\"error opening text file: %s, err:%s\", path, err)", + "\t}", + "\tdefer file.Close()", + "\t// initialize map", + "\tCNFTypeMap = make(map[string]string)", + "", + "\t// read the file", + "\tdata, err := io.ReadAll(file)", + "\tif err != nil {", + "\t\treturn CNFTypeMap, fmt.Errorf(\"error reading JSON file: %s, err:%s\", path, err)", + "\t}", + "", + "\terr = json.Unmarshal(data, \u0026CNFTypeMap)", + "\tif err != nil {", + "\t\tfmt.Println(\"Error un-marshaling CNF type JSON:\", err)", + "\t\treturn", + "\t}", + "", + "\treturn CNFTypeMap, nil", + "}" + ] + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "name": "buildCatalogByID", + "kind": "function", + "source": [ + "func buildCatalogByID() (catalogMap map[string]claimschema.TestCaseDescription) {", + "\tcatalogMap = make(map[string]claimschema.TestCaseDescription)", + "", + "\tfor index := range identifiers.Catalog {", + "\t\tcatalogMap[index.Id] = identifiers.Catalog[index]", + "\t}", + "\treturn catalogMap", + "}" + ] + }, + { + "name": "buildCSV", + "kind": "function", + "source": [ + "func buildCSV(claimScheme *claim.Schema, cnfType string, catalogMap map[string]claimschema.TestCaseDescription) (resultsCSVRecords [][]string) {", + "\tif cnfType == \"\" {", + "\t\tcnfType = identifiers.NonTelco", + "\t}", + "", + "\t// add header if flag is present (defaults to no header)", + "\tif addHeaderFlag {", + "\t\tresultsCSVRecords = append(resultsCSVRecords, []string{", + "\t\t\t\"CNFName\", \"OperatorVersion\", \"testID\", \"Suite\",", + "\t\t\t\"Description\", \"State\",", + "\t\t\t\"StartTime\", \"EndTime\",", + "\t\t\t\"SkipReason\", \"CheckDetails\", \"Output\",", + "\t\t\t\"Remediation\", \"CNFType\",", + "\t\t\t\"Mandatory/Optional\",", + "\t\t})", + "\t}", + "", + "\topVers := \"\"", + "\tfor i, op := range claimScheme.Claim.TestOperators {", + "\t\tif i == 0 {", + "\t\t\topVers = op.Version", + "\t\t} else {", + "\t\t\topVers = opVers + \", \" + op.Version", + "\t\t}", + "\t}", + "", + "\tfor testID := range claimScheme.Claim.Results {", + "\t\t// initialize record", + "\t\trecord := []string{}", + "\t\t// creates and appends new CSV record", + "\t\trecord = append(record,", + "\t\t\tCNFNameFlag,", + "\t\t\topVers,", + "\t\t\ttestID,", + "\t\t\tclaimScheme.Claim.Results[testID].TestID.Suite,", + "\t\t\tclaimScheme.Claim.Results[testID].CatalogInfo.Description,", + "\t\t\tclaimScheme.Claim.Results[testID].State,", + "\t\t\tclaimScheme.Claim.Results[testID].StartTime,", + "\t\t\tclaimScheme.Claim.Results[testID].EndTime,", + "\t\t\tclaimScheme.Claim.Results[testID].SkipReason,", + "\t\t\tclaimScheme.Claim.Results[testID].CheckDetails,", + "\t\t\tclaimScheme.Claim.Results[testID].CapturedTestOutput,", + "\t\t\tcatalogMap[testID].Remediation,", + "\t\t\tcnfType, // Append the CNF type", + "\t\t\tclaimScheme.Claim.Results[testID].CategoryClassification[cnfType],", + "\t\t)", + "", + "\t\tresultsCSVRecords = append(resultsCSVRecords, record)", + "\t}", + "\treturn resultsCSVRecords", + "}" + ] + }, + { + "pkgPath": "encoding/csv", + "name": "NewWriter", + "kind": "function" + }, + { + "name": "WriteAll", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "name": "Flush", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "panic", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func dumpCsv(_ *cobra.Command, _ []string) error {", + "\t// set log output to stderr", + "\tlog.SetOutput(os.Stderr)", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// loads the mapping between CNF name and type", + "\tCNFTypeMap, err := loadCNFTypeMap(CNFListFilePathFlag)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to load CNF type map (%s): %v\", CNFListFilePathFlag, err)", + "\t\treturn nil", + "\t}", + "", + "\t// builds a catalog map indexed by test ID", + "\tcatalogMap := buildCatalogByID()", + "", + "\t// get CNF type", + "\tcnfType := CNFTypeMap[CNFNameFlag]", + "", + "\t// builds CSV file", + "\tresultsCsv := buildCSV(claimScheme, cnfType, catalogMap)", + "", + "\t// initializes CSV writer", + "\twriter := csv.NewWriter(os.Stdout)", + "", + "\t// writes all CSV records", + "\terr = writer.WriteAll(resultsCsv)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to write results CSV to screen, err: %s\", err)", + "\t\treturn nil", + "\t}", + "\t// flushes buffer to screen", + "\twriter.Flush()", + "\t// Check for any writing errors", + "\tif err := writer.Error(); err != nil {", + "\t\tpanic(err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "loadCNFTypeMap", + "qualifiedName": "loadCNFTypeMap", + "exported": false, + "signature": "func(string)(map[string]string, error)", + "doc": "loadCNFTypeMap Loads a mapping of CNF names to their types\n\nThis routine opens the specified file, reads its contents, and unmarshals the\ndata into a string-to-string map that associates each CNF name with its\ncorresponding type. If any step fails—opening, reading, or decoding—the\nfunction returns an error describing the issue; otherwise it supplies the\npopulated map.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/csv/csv.go:217", + "calls": [ + { + "pkgPath": "os", + "name": "Open", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "io", + "name": "ReadAll", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/csv", + "name": "dumpCsv", + "kind": "function", + "source": [ + "func dumpCsv(_ *cobra.Command, _ []string) error {", + "\t// set log output to stderr", + "\tlog.SetOutput(os.Stderr)", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// loads the mapping between CNF name and type", + "\tCNFTypeMap, err := loadCNFTypeMap(CNFListFilePathFlag)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to load CNF type map (%s): %v\", CNFListFilePathFlag, err)", + "\t\treturn nil", + "\t}", + "", + "\t// builds a catalog map indexed by test ID", + "\tcatalogMap := buildCatalogByID()", + "", + "\t// get CNF type", + "\tcnfType := CNFTypeMap[CNFNameFlag]", + "", + "\t// builds CSV file", + "\tresultsCsv := buildCSV(claimScheme, cnfType, catalogMap)", + "", + "\t// initializes CSV writer", + "\twriter := csv.NewWriter(os.Stdout)", + "", + "\t// writes all CSV records", + "\terr = writer.WriteAll(resultsCsv)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to write results CSV to screen, err: %s\", err)", + "\t\treturn nil", + "\t}", + "\t// flushes buffer to screen", + "\twriter.Flush()", + "\t// Check for any writing errors", + "\tif err := writer.Error(); err != nil {", + "\t\tpanic(err)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadCNFTypeMap(path string) (CNFTypeMap map[string]string, err error) { //nolint:gocritic // CNF is a valid acronym", + "\t// Open the CSV file", + "\tfile, err := os.Open(path)", + "\tif err != nil {", + "\t\treturn CNFTypeMap, fmt.Errorf(\"error opening text file: %s, err:%s\", path, err)", + "\t}", + "\tdefer file.Close()", + "\t// initialize map", + "\tCNFTypeMap = make(map[string]string)", + "", + "\t// read the file", + "\tdata, err := io.ReadAll(file)", + "\tif err != nil {", + "\t\treturn CNFTypeMap, fmt.Errorf(\"error reading JSON file: %s, err:%s\", path, err)", + "\t}", + "", + "\terr = json.Unmarshal(data, \u0026CNFTypeMap)", + "\tif err != nil {", + "\t\tfmt.Println(\"Error un-marshaling CNF type JSON:\", err)", + "\t\treturn", + "\t}", + "", + "\treturn CNFTypeMap, nil", + "}" + ] + } + ], + "globals": [ + { + "name": "CNFListFilePathFlag", + "exported": true, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/csv/csv.go:20" + }, + { + "name": "CNFNameFlag", + "exported": true, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/csv/csv.go:19" + }, + { + "name": "CSVDumpCommand", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/csv/csv.go:23" + }, + { + "name": "addHeaderFlag", + "exported": false, + "type": "bool", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/csv/csv.go:21" + }, + { + "name": "claimFilePathFlag", + "exported": false, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/csv/csv.go:18" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures", + "name": "failures", + "files": 2, + "imports": [ + "encoding/json", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/spf13/cobra", + "log", + "os", + "strings" + ], + "structs": [ + { + "name": "FailedTestCase", + "exported": true, + "doc": "FailedTestCase Represents a test case that did not pass\n\nIt holds the name and description of the test case, optional details about\nthe check, and any objects that failed to meet compliance criteria. The\nstructure is used to aggregate failure information for reporting or logging\npurposes.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/types.go:72", + "fields": { + "CheckDetails": "string", + "NonCompliantObjects": "[]NonCompliantObject", + "TestCaseDescription": "string", + "TestCaseName": "string" + }, + "methodNames": null, + "source": [ + "type FailedTestCase struct {", + "\tTestCaseName string `json:\"name\"`", + "\tTestCaseDescription string `json:\"description\"`", + "\tCheckDetails string `json:\"checkDetails,omitempty\"`", + "\tNonCompliantObjects []NonCompliantObject `json:\"nonCompliantObjects,omitempty\"`", + "}" + ] + }, + { + "name": "FailedTestSuite", + "exported": true, + "doc": "FailedTestSuite represents a test suite with failures\n\nThis struct holds the name of a test suite and a list of its failing test\ncases. It is used when reporting or displaying results, allowing consumers to\nsee which specific tests failed within each suite.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/types.go:84", + "fields": { + "FailingTestCases": "[]FailedTestCase", + "TestSuiteName": "string" + }, + "methodNames": null, + "source": [ + "type FailedTestSuite struct {", + "\tTestSuiteName string `json:\"name\"`", + "\tFailingTestCases []FailedTestCase `json:\"failures\"`", + "}" + ] + }, + { + "name": "NonCompliantObject", + "exported": true, + "doc": "NonCompliantObject represents a non‑compliant object extracted from failure data\n\nThis type holds information about objects that failed compliance checks,\nincluding the object's kind, the reason for failure, and its specification\ndetails. The Spec field aggregates key/value pairs representing the object's\nconfiguration at the time of the check. Instances are created by parsing JSON\noutput from a compliance test and converting it into a more convenient\nstructure for reporting.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/types.go:13", + "fields": { + "Reason": "string", + "Spec": "ObjectSpec", + "Type": "string" + }, + "methodNames": null, + "source": [ + "type NonCompliantObject struct {", + "\tType string `json:\"type\"`", + "\tReason string `json:\"reason\"`", + "\tSpec ObjectSpec `json:\"spec\"`", + "}" + ] + }, + { + "name": "ObjectSpec", + "exported": true, + "doc": "ObjectSpec Represents a collection of key/value pairs for JSON output\n\nThis structure holds an ordered list of fields where each field has a string\nkey and value. It provides methods to add new fields and to marshal the\ncollection into a valid JSON object. If no fields are present, marshaling\nreturns an empty JSON object.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/types.go:25", + "fields": { + "Fields": "[]struct{Key, Value string}" + }, + "methodNames": [ + "AddField", + "MarshalJSON" + ], + "source": [ + "type ObjectSpec struct {", + "\tFields []struct{ Key, Value string }", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates a command to display claim failures\n\nThe function builds a Cobra command that requires a path to an existing claim\nfile and optionally accepts a comma‑separated list of test suites to filter\nthe output. It also allows specifying the output format, defaulting to plain\ntext but supporting JSON. Errors during flag configuration are logged\nfatally, after which the command is returned for registration.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:114", + "calls": [ + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "MarkFlagRequired", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tshowCommand.AddCommand(failures.NewCommand())", + "\tshowCommand.AddCommand(csv.NewCommand())", + "\treturn showCommand", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tshowFailuresCommand.Flags().StringVarP(\u0026claimFilePathFlag, \"claim\", \"c\", \"\",", + "\t\t\"Required: Existing claim file path.\",", + "\t)", + "", + "\terr := showFailuresCommand.MarkFlagRequired(\"claim\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to mark claim file path as required parameter: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// This command accepts a (optional) list of comma separated suite to filter the", + "\t// output. Only the failures from those test suites will be printed.", + "\tshowFailuresCommand.Flags().StringVarP(\u0026testSuitesFlag, \"testsuites\", \"s\", \"\",", + "\t\t\"Optional: comma separated list of test suites names whose failures will be shown.\",", + "\t)", + "", + "\t// The format of the output can be changed. Default is plain text, but it can also print", + "\t// it in json format.", + "\tshowFailuresCommand.Flags().StringVarP(\u0026outputFormatFlag, \"output\", \"o\", outputFormatText,", + "\t\tfmt.Sprintf(\"Optional: output format. Available formats: %v\", availableOutputFormats),", + "\t)", + "", + "\treturn showFailuresCommand", + "}" + ] + }, + { + "name": "AddField", + "qualifiedName": "ObjectSpec.AddField", + "exported": true, + "receiver": "ObjectSpec", + "signature": "func(string, string)()", + "doc": "ObjectSpec.AddField Adds a key/value pair to the object's specification\n\nThis method appends a new field containing the provided key and value strings\nto the spec's internal slice of fields. It does not return any value or\nperform validation, simply extending the slice. The updated spec can then be\nused elsewhere to represent object metadata.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/types.go:35", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures", + "name": "getNonCompliantObjectsFromFailureReason", + "kind": "function", + "source": [ + "func getNonCompliantObjectsFromFailureReason(checkDetails string) ([]NonCompliantObject, error) {", + "\tobjects := struct {", + "\t\tCompliant []testhelper.ReportObject `json:\"CompliantObjectsOut\"`", + "\t\tNonCompliant []testhelper.ReportObject `json:\"NonCompliantObjectsOut\"`", + "\t}{}", + "", + "\terr := json.Unmarshal([]byte(checkDetails), \u0026objects)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to decode checkDetails %s: %v\", checkDetails, err)", + "\t}", + "", + "\t// Now let's create a list of our NonCompliantObject-type items.", + "\tnonCompliantObjects := []NonCompliantObject{}", + "\tfor _, object := range objects.NonCompliant {", + "\t\toutputObject := NonCompliantObject{Type: object.ObjectType, Reason: object.ObjectFieldsValues[0]}", + "\t\tfor i := 1; i \u003c len(object.ObjectFieldsKeys); i++ {", + "\t\t\toutputObject.Spec.AddField(object.ObjectFieldsKeys[i], object.ObjectFieldsValues[i])", + "\t\t}", + "", + "\t\tnonCompliantObjects = append(nonCompliantObjects, outputObject)", + "\t}", + "", + "\treturn nonCompliantObjects, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (spec *ObjectSpec) AddField(key, value string) {", + "\tspec.Fields = append(spec.Fields, struct {", + "\t\tKey string", + "\t\tValue string", + "\t}{key, value})", + "}" + ] + }, + { + "name": "MarshalJSON", + "qualifiedName": "ObjectSpec.MarshalJSON", + "exported": true, + "receiver": "ObjectSpec", + "signature": "func()([]byte, error)", + "doc": "ObjectSpec.MarshalJSON Converts the ObjectSpec into JSON bytes\n\nThe method checks if there are any fields; if none, it returns an empty JSON\nobject. Otherwise, it builds a JSON string by iterating over each field and\nformatting key/value pairs as quoted strings separated by commas. The\nresulting byte slice is returned with no error.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/types.go:48", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (spec *ObjectSpec) MarshalJSON() ([]byte, error) {", + "\tif len(spec.Fields) == 0 {", + "\t\treturn []byte(\"{}\"), nil", + "\t}", + "", + "\tspecStr := \"{\"", + "\tfor i := range spec.Fields {", + "\t\tif i != 0 {", + "\t\t\tspecStr += \", \"", + "\t\t}", + "\t\tspecStr += fmt.Sprintf(\"%q:%q\", spec.Fields[i].Key, spec.Fields[i].Value)", + "\t}", + "", + "\tspecStr += \"}\"", + "", + "\treturn []byte(specStr), nil", + "}" + ] + }, + { + "name": "getFailedTestCasesByTestSuite", + "qualifiedName": "getFailedTestCasesByTestSuite", + "exported": false, + "signature": "func(map[string][]*claim.TestCaseResult, map[string]bool)([]FailedTestSuite)", + "doc": "getFailedTestCasesByTestSuite generates a list of failing test suites from parsed claim data\n\nThe function iterates over test suite results, filtering by the target suites\nif specified. For each failed test case it extracts details, attempts to\nparse non‑compliant objects, and records either the parsed objects or the\nraw failure reason on error. It returns a slice of structures that represent\nonly those test suites containing at least one failing test case.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:275", + "calls": [ + { + "name": "getNonCompliantObjectsFromFailureReason", + "kind": "function", + "source": [ + "func getNonCompliantObjectsFromFailureReason(checkDetails string) ([]NonCompliantObject, error) {", + "\tobjects := struct {", + "\t\tCompliant []testhelper.ReportObject `json:\"CompliantObjectsOut\"`", + "\t\tNonCompliant []testhelper.ReportObject `json:\"NonCompliantObjectsOut\"`", + "\t}{}", + "", + "\terr := json.Unmarshal([]byte(checkDetails), \u0026objects)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to decode checkDetails %s: %v\", checkDetails, err)", + "\t}", + "", + "\t// Now let's create a list of our NonCompliantObject-type items.", + "\tnonCompliantObjects := []NonCompliantObject{}", + "\tfor _, object := range objects.NonCompliant {", + "\t\toutputObject := NonCompliantObject{Type: object.ObjectType, Reason: object.ObjectFieldsValues[0]}", + "\t\tfor i := 1; i \u003c len(object.ObjectFieldsKeys); i++ {", + "\t\t\toutputObject.Spec.AddField(object.ObjectFieldsKeys[i], object.ObjectFieldsValues[i])", + "\t\t}", + "", + "\t\tnonCompliantObjects = append(nonCompliantObjects, outputObject)", + "\t}", + "", + "\treturn nonCompliantObjects, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures", + "name": "showFailures", + "kind": "function", + "source": [ + "func showFailures(_ *cobra.Command, _ []string) error {", + "\toutputFormat, err := parseOutputFormatFlag()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Order test case results by test suite, using a helper map.", + "\tresultsByTestSuite := map[string][]*claim.TestCaseResult{}", + "\tfor id := range claimScheme.Claim.Results {", + "\t\ttcResult := claimScheme.Claim.Results[id]", + "\t\tresultsByTestSuite[tcResult.TestID.Suite] = append(resultsByTestSuite[tcResult.TestID.Suite], \u0026tcResult)", + "\t}", + "", + "\ttargetTestSuites := parseTargetTestSuitesFlag()", + "\t// From the target test suites, get their failed test cases and put them in", + "\t// our custom types.", + "\ttestSuites := getFailedTestCasesByTestSuite(resultsByTestSuite, targetTestSuites)", + "", + "\tswitch outputFormat {", + "\tcase outputFormatJSON:", + "\t\tprintFailuresJSON(testSuites)", + "\tdefault:", + "\t\tprintFailuresText(testSuites)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getFailedTestCasesByTestSuite(claimResultsByTestSuite map[string][]*claim.TestCaseResult, targetTestSuites map[string]bool) []FailedTestSuite {", + "\ttestSuites := []FailedTestSuite{}", + "\tfor testSuite := range claimResultsByTestSuite {", + "\t\tif targetTestSuites != nil \u0026\u0026 !targetTestSuites[testSuite] {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfailedTcs := []FailedTestCase{}", + "\t\tfor _, tc := range claimResultsByTestSuite[testSuite] {", + "\t\t\tif tc.State != \"failed\" {", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tfailingTc := FailedTestCase{", + "\t\t\t\tTestCaseName: tc.TestID.ID,", + "\t\t\t\tTestCaseDescription: tc.CatalogInfo.Description,", + "\t\t\t}", + "", + "\t\t\tnonCompliantObjects, err := getNonCompliantObjectsFromFailureReason(tc.CheckDetails)", + "\t\t\tif err != nil {", + "\t\t\t\t// This means the test case doesn't use the report objects yet. Just use the raw failure reason instead.", + "\t\t\t\t// Also, send the error into stderr, so it can be filtered out with \"2\u003e/errors.txt\" or \"2\u003e/dev/null\".", + "\t\t\t\tfmt.Fprintf(os.Stderr, \"Failed to parse non compliant objects from test case %s (test suite %s): %v\", tc.TestID.ID, testSuite, err)", + "\t\t\t\tfailingTc.CheckDetails = tc.CheckDetails", + "\t\t\t} else {", + "\t\t\t\tfailingTc.NonCompliantObjects = nonCompliantObjects", + "\t\t\t}", + "", + "\t\t\tfailedTcs = append(failedTcs, failingTc)", + "\t\t}", + "", + "\t\tif len(failedTcs) \u003e 0 {", + "\t\t\ttestSuites = append(testSuites, FailedTestSuite{", + "\t\t\t\tTestSuiteName: testSuite,", + "\t\t\t\tFailingTestCases: failedTcs,", + "\t\t\t})", + "\t\t}", + "\t}", + "", + "\treturn testSuites", + "}" + ] + }, + { + "name": "getNonCompliantObjectsFromFailureReason", + "qualifiedName": "getNonCompliantObjectsFromFailureReason", + "exported": false, + "signature": "func(string)([]NonCompliantObject, error)", + "doc": "getNonCompliantObjectsFromFailureReason parses a test case failure payload into non‑compliant objects\n\nThe function receives the JSON string that represents a test case’s check\ndetails, decodes it to extract compliant and non‑compliant report objects,\nand then builds a slice of NonCompliantObject structures. It returns the\nconstructed list along with an error if the payload cannot be decoded. The\noutput includes each object's type, reason, and any additional specification\nfields.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:184", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "ObjectSpec.AddField", + "kind": "function", + "source": [ + "func (spec *ObjectSpec) AddField(key, value string) {", + "\tspec.Fields = append(spec.Fields, struct {", + "\t\tKey string", + "\t\tValue string", + "\t}{key, value})", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures", + "name": "getFailedTestCasesByTestSuite", + "kind": "function", + "source": [ + "func getFailedTestCasesByTestSuite(claimResultsByTestSuite map[string][]*claim.TestCaseResult, targetTestSuites map[string]bool) []FailedTestSuite {", + "\ttestSuites := []FailedTestSuite{}", + "\tfor testSuite := range claimResultsByTestSuite {", + "\t\tif targetTestSuites != nil \u0026\u0026 !targetTestSuites[testSuite] {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfailedTcs := []FailedTestCase{}", + "\t\tfor _, tc := range claimResultsByTestSuite[testSuite] {", + "\t\t\tif tc.State != \"failed\" {", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tfailingTc := FailedTestCase{", + "\t\t\t\tTestCaseName: tc.TestID.ID,", + "\t\t\t\tTestCaseDescription: tc.CatalogInfo.Description,", + "\t\t\t}", + "", + "\t\t\tnonCompliantObjects, err := getNonCompliantObjectsFromFailureReason(tc.CheckDetails)", + "\t\t\tif err != nil {", + "\t\t\t\t// This means the test case doesn't use the report objects yet. Just use the raw failure reason instead.", + "\t\t\t\t// Also, send the error into stderr, so it can be filtered out with \"2\u003e/errors.txt\" or \"2\u003e/dev/null\".", + "\t\t\t\tfmt.Fprintf(os.Stderr, \"Failed to parse non compliant objects from test case %s (test suite %s): %v\", tc.TestID.ID, testSuite, err)", + "\t\t\t\tfailingTc.CheckDetails = tc.CheckDetails", + "\t\t\t} else {", + "\t\t\t\tfailingTc.NonCompliantObjects = nonCompliantObjects", + "\t\t\t}", + "", + "\t\t\tfailedTcs = append(failedTcs, failingTc)", + "\t\t}", + "", + "\t\tif len(failedTcs) \u003e 0 {", + "\t\t\ttestSuites = append(testSuites, FailedTestSuite{", + "\t\t\t\tTestSuiteName: testSuite,", + "\t\t\t\tFailingTestCases: failedTcs,", + "\t\t\t})", + "\t\t}", + "\t}", + "", + "\treturn testSuites", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getNonCompliantObjectsFromFailureReason(checkDetails string) ([]NonCompliantObject, error) {", + "\tobjects := struct {", + "\t\tCompliant []testhelper.ReportObject `json:\"CompliantObjectsOut\"`", + "\t\tNonCompliant []testhelper.ReportObject `json:\"NonCompliantObjectsOut\"`", + "\t}{}", + "", + "\terr := json.Unmarshal([]byte(checkDetails), \u0026objects)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to decode checkDetails %s: %v\", checkDetails, err)", + "\t}", + "", + "\t// Now let's create a list of our NonCompliantObject-type items.", + "\tnonCompliantObjects := []NonCompliantObject{}", + "\tfor _, object := range objects.NonCompliant {", + "\t\toutputObject := NonCompliantObject{Type: object.ObjectType, Reason: object.ObjectFieldsValues[0]}", + "\t\tfor i := 1; i \u003c len(object.ObjectFieldsKeys); i++ {", + "\t\t\toutputObject.Spec.AddField(object.ObjectFieldsKeys[i], object.ObjectFieldsValues[i])", + "\t\t}", + "", + "\t\tnonCompliantObjects = append(nonCompliantObjects, outputObject)", + "\t}", + "", + "\treturn nonCompliantObjects, nil", + "}" + ] + }, + { + "name": "parseOutputFormatFlag", + "qualifiedName": "parseOutputFormatFlag", + "exported": false, + "signature": "func()(string, error)", + "doc": "parseOutputFormatFlag Validates the output format flag\n\nIt checks whether the user-specified format matches one of the supported\nformats listed in \"availableOutputFormats\". If a match is found, it returns\nthat format string with no error; otherwise it returns an empty string and an\nerror explaining the invalid value and listing the allowed options.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:166", + "calls": [ + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures", + "name": "showFailures", + "kind": "function", + "source": [ + "func showFailures(_ *cobra.Command, _ []string) error {", + "\toutputFormat, err := parseOutputFormatFlag()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Order test case results by test suite, using a helper map.", + "\tresultsByTestSuite := map[string][]*claim.TestCaseResult{}", + "\tfor id := range claimScheme.Claim.Results {", + "\t\ttcResult := claimScheme.Claim.Results[id]", + "\t\tresultsByTestSuite[tcResult.TestID.Suite] = append(resultsByTestSuite[tcResult.TestID.Suite], \u0026tcResult)", + "\t}", + "", + "\ttargetTestSuites := parseTargetTestSuitesFlag()", + "\t// From the target test suites, get their failed test cases and put them in", + "\t// our custom types.", + "\ttestSuites := getFailedTestCasesByTestSuite(resultsByTestSuite, targetTestSuites)", + "", + "\tswitch outputFormat {", + "\tcase outputFormatJSON:", + "\t\tprintFailuresJSON(testSuites)", + "\tdefault:", + "\t\tprintFailuresText(testSuites)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func parseOutputFormatFlag() (string, error) {", + "\tfor _, outputFormat := range availableOutputFormats {", + "\t\tif outputFormat == outputFormatFlag {", + "\t\t\treturn outputFormat, nil", + "\t\t}", + "\t}", + "", + "\treturn \"\", fmt.Errorf(\"invalid output format flag %q - available formats: %v\", outputFormatFlag, availableOutputFormats)", + "}" + ] + }, + { + "name": "parseTargetTestSuitesFlag", + "qualifiedName": "parseTargetTestSuitesFlag", + "exported": false, + "signature": "func()(map[string]bool)", + "doc": "parseTargetTestSuitesFlag Creates a map of test suite names from the flag input\n\nThis function checks if the global test suites flag is empty; if so, it\nreturns nil. Otherwise, it splits the comma-separated string into individual\nsuite names, trims whitespace from each, and stores them as keys in a boolean\nmap set to true. The resulting map is used elsewhere to quickly determine\nwhether a given test suite should be processed.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:147", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures", + "name": "showFailures", + "kind": "function", + "source": [ + "func showFailures(_ *cobra.Command, _ []string) error {", + "\toutputFormat, err := parseOutputFormatFlag()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Order test case results by test suite, using a helper map.", + "\tresultsByTestSuite := map[string][]*claim.TestCaseResult{}", + "\tfor id := range claimScheme.Claim.Results {", + "\t\ttcResult := claimScheme.Claim.Results[id]", + "\t\tresultsByTestSuite[tcResult.TestID.Suite] = append(resultsByTestSuite[tcResult.TestID.Suite], \u0026tcResult)", + "\t}", + "", + "\ttargetTestSuites := parseTargetTestSuitesFlag()", + "\t// From the target test suites, get their failed test cases and put them in", + "\t// our custom types.", + "\ttestSuites := getFailedTestCasesByTestSuite(resultsByTestSuite, targetTestSuites)", + "", + "\tswitch outputFormat {", + "\tcase outputFormatJSON:", + "\t\tprintFailuresJSON(testSuites)", + "\tdefault:", + "\t\tprintFailuresText(testSuites)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func parseTargetTestSuitesFlag() map[string]bool {", + "\tif testSuitesFlag == \"\" {", + "\t\treturn nil", + "\t}", + "", + "\ttargetTestSuites := map[string]bool{}", + "\tfor _, testSuite := range strings.Split(testSuitesFlag, \",\") {", + "\t\ttargetTestSuites[strings.TrimSpace(testSuite)] = true", + "\t}", + "", + "\treturn targetTestSuites", + "}" + ] + }, + { + "name": "printFailuresJSON", + "qualifiedName": "printFailuresJSON", + "exported": false, + "signature": "func([]FailedTestSuite)()", + "doc": "printFailuresJSON Outputs failures as indented JSON\n\nThe function receives a slice of failure objects, wraps them in a struct with\na field named \"testSuites\", marshals this structure to pretty‑printed JSON,\nand prints the result. If marshalling fails it logs a fatal error and exits.\nThe output is written to standard output as a single line containing the JSON\nstring.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:254", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "MarshalIndent", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures", + "name": "showFailures", + "kind": "function", + "source": [ + "func showFailures(_ *cobra.Command, _ []string) error {", + "\toutputFormat, err := parseOutputFormatFlag()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Order test case results by test suite, using a helper map.", + "\tresultsByTestSuite := map[string][]*claim.TestCaseResult{}", + "\tfor id := range claimScheme.Claim.Results {", + "\t\ttcResult := claimScheme.Claim.Results[id]", + "\t\tresultsByTestSuite[tcResult.TestID.Suite] = append(resultsByTestSuite[tcResult.TestID.Suite], \u0026tcResult)", + "\t}", + "", + "\ttargetTestSuites := parseTargetTestSuitesFlag()", + "\t// From the target test suites, get their failed test cases and put them in", + "\t// our custom types.", + "\ttestSuites := getFailedTestCasesByTestSuite(resultsByTestSuite, targetTestSuites)", + "", + "\tswitch outputFormat {", + "\tcase outputFormatJSON:", + "\t\tprintFailuresJSON(testSuites)", + "\tdefault:", + "\t\tprintFailuresText(testSuites)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func printFailuresJSON(testSuites []FailedTestSuite) {", + "\ttype ClaimFailures struct {", + "\t\tFailures []FailedTestSuite `json:\"testSuites\"`", + "\t}", + "", + "\tclaimFailures := ClaimFailures{Failures: testSuites}", + "\tbytes, err := json.MarshalIndent(claimFailures, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"failed to marshal failures: %v\", err)", + "\t}", + "", + "\tfmt.Printf(\"%s\\n\", string(bytes))", + "}" + ] + }, + { + "name": "printFailuresText", + "qualifiedName": "printFailuresText", + "exported": false, + "signature": "func([]FailedTestSuite)()", + "doc": "printFailuresText Prints a plain text summary of failed test suites and cases\n\nThe function iterates over each test suite, outputting its name and then\ndetails for every failing test case. For each case it shows the name,\ndescription, and either a single failure reason or a list of non‑compliant\nobjects with type, reason, and spec fields. The information is formatted\nusing printf statements to produce a readable report.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:216", + "calls": [ + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures", + "name": "showFailures", + "kind": "function", + "source": [ + "func showFailures(_ *cobra.Command, _ []string) error {", + "\toutputFormat, err := parseOutputFormatFlag()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Order test case results by test suite, using a helper map.", + "\tresultsByTestSuite := map[string][]*claim.TestCaseResult{}", + "\tfor id := range claimScheme.Claim.Results {", + "\t\ttcResult := claimScheme.Claim.Results[id]", + "\t\tresultsByTestSuite[tcResult.TestID.Suite] = append(resultsByTestSuite[tcResult.TestID.Suite], \u0026tcResult)", + "\t}", + "", + "\ttargetTestSuites := parseTargetTestSuitesFlag()", + "\t// From the target test suites, get their failed test cases and put them in", + "\t// our custom types.", + "\ttestSuites := getFailedTestCasesByTestSuite(resultsByTestSuite, targetTestSuites)", + "", + "\tswitch outputFormat {", + "\tcase outputFormatJSON:", + "\t\tprintFailuresJSON(testSuites)", + "\tdefault:", + "\t\tprintFailuresText(testSuites)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func printFailuresText(testSuites []FailedTestSuite) {", + "\tfor _, ts := range testSuites {", + "\t\tfmt.Printf(\"Test Suite: %s\\n\", ts.TestSuiteName)", + "\t\tfor _, tc := range ts.FailingTestCases {", + "\t\t\tfmt.Printf(\" Test Case: %s\\n\", tc.TestCaseName)", + "\t\t\tfmt.Printf(\" Description: %s\\n\", tc.TestCaseDescription)", + "", + "\t\t\t// In case this tc was not using report objects, just print the failure reason string.", + "\t\t\tif len(tc.NonCompliantObjects) == 0 {", + "\t\t\t\tfmt.Printf(\" Failure reason: %s\\n\", tc.CheckDetails)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tfmt.Printf(\" Failure reasons:\\n\")", + "\t\t\tfor i := range tc.NonCompliantObjects {", + "\t\t\t\tnonCompliantObject := tc.NonCompliantObjects[i]", + "\t\t\t\tfmt.Printf(\" %2d - Type: %s, Reason: %s\\n\", i+1, nonCompliantObject.Type, nonCompliantObject.Reason)", + "\t\t\t\tfmt.Printf(\" \")", + "\t\t\t\tfor i := range nonCompliantObject.Spec.Fields {", + "\t\t\t\t\tif i != 0 {", + "\t\t\t\t\t\tfmt.Printf(\", \")", + "\t\t\t\t\t}", + "\t\t\t\t\tfield := nonCompliantObject.Spec.Fields[i]", + "\t\t\t\t\tfmt.Printf(\"%s: %s\", field.Key, field.Value)", + "\t\t\t\t}", + "\t\t\t\tfmt.Printf(\"\\n\")", + "\t\t\t}", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "showFailures", + "qualifiedName": "showFailures", + "exported": false, + "signature": "func(*cobra.Command, []string)(error)", + "doc": "showFailures Displays failed test cases from a claim file\n\nThe function reads the claim file, validates its format version, groups\nresults by test suite, filters for failures, and outputs them either in JSON\nor plain text based on a flag. It returns an error if parsing or validation\nfails.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:323", + "calls": [ + { + "name": "parseOutputFormatFlag", + "kind": "function", + "source": [ + "func parseOutputFormatFlag() (string, error) {", + "\tfor _, outputFormat := range availableOutputFormats {", + "\t\tif outputFormat == outputFormatFlag {", + "\t\t\treturn outputFormat, nil", + "\t\t}", + "\t}", + "", + "\treturn \"\", fmt.Errorf(\"invalid output format flag %q - available formats: %v\", outputFormatFlag, availableOutputFormats)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim", + "name": "Parse", + "kind": "function", + "source": [ + "func Parse(filePath string) (*Schema, error) {", + "\tfileBytes, err := os.ReadFile(filePath)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failure reading file: %v\", err)", + "\t}", + "", + "\tclaimFile := Schema{}", + "\terr = json.Unmarshal(fileBytes, \u0026claimFile)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to unmarshal file: %v\", err)", + "\t}", + "", + "\treturn \u0026claimFile, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim", + "name": "CheckVersion", + "kind": "function", + "source": [ + "func CheckVersion(version string) error {", + "\tclaimSemVersion, err := semver.NewVersion(version)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"claim file version %q is not valid: %v\", version, err)", + "\t}", + "", + "\tsupportedSemVersion, err := semver.NewVersion(supportedClaimFormatVersion)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"supported claim file version v%v is not valid: v%v\", supportedClaimFormatVersion, err)", + "\t}", + "", + "\tif claimSemVersion.Compare(supportedSemVersion) != 0 {", + "\t\treturn fmt.Errorf(\"claim format version v%v is not supported. Supported version is v%v\",", + "\t\t\tclaimSemVersion, supportedSemVersion)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "parseTargetTestSuitesFlag", + "kind": "function", + "source": [ + "func parseTargetTestSuitesFlag() map[string]bool {", + "\tif testSuitesFlag == \"\" {", + "\t\treturn nil", + "\t}", + "", + "\ttargetTestSuites := map[string]bool{}", + "\tfor _, testSuite := range strings.Split(testSuitesFlag, \",\") {", + "\t\ttargetTestSuites[strings.TrimSpace(testSuite)] = true", + "\t}", + "", + "\treturn targetTestSuites", + "}" + ] + }, + { + "name": "getFailedTestCasesByTestSuite", + "kind": "function", + "source": [ + "func getFailedTestCasesByTestSuite(claimResultsByTestSuite map[string][]*claim.TestCaseResult, targetTestSuites map[string]bool) []FailedTestSuite {", + "\ttestSuites := []FailedTestSuite{}", + "\tfor testSuite := range claimResultsByTestSuite {", + "\t\tif targetTestSuites != nil \u0026\u0026 !targetTestSuites[testSuite] {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfailedTcs := []FailedTestCase{}", + "\t\tfor _, tc := range claimResultsByTestSuite[testSuite] {", + "\t\t\tif tc.State != \"failed\" {", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tfailingTc := FailedTestCase{", + "\t\t\t\tTestCaseName: tc.TestID.ID,", + "\t\t\t\tTestCaseDescription: tc.CatalogInfo.Description,", + "\t\t\t}", + "", + "\t\t\tnonCompliantObjects, err := getNonCompliantObjectsFromFailureReason(tc.CheckDetails)", + "\t\t\tif err != nil {", + "\t\t\t\t// This means the test case doesn't use the report objects yet. Just use the raw failure reason instead.", + "\t\t\t\t// Also, send the error into stderr, so it can be filtered out with \"2\u003e/errors.txt\" or \"2\u003e/dev/null\".", + "\t\t\t\tfmt.Fprintf(os.Stderr, \"Failed to parse non compliant objects from test case %s (test suite %s): %v\", tc.TestID.ID, testSuite, err)", + "\t\t\t\tfailingTc.CheckDetails = tc.CheckDetails", + "\t\t\t} else {", + "\t\t\t\tfailingTc.NonCompliantObjects = nonCompliantObjects", + "\t\t\t}", + "", + "\t\t\tfailedTcs = append(failedTcs, failingTc)", + "\t\t}", + "", + "\t\tif len(failedTcs) \u003e 0 {", + "\t\t\ttestSuites = append(testSuites, FailedTestSuite{", + "\t\t\t\tTestSuiteName: testSuite,", + "\t\t\t\tFailingTestCases: failedTcs,", + "\t\t\t})", + "\t\t}", + "\t}", + "", + "\treturn testSuites", + "}" + ] + }, + { + "name": "printFailuresJSON", + "kind": "function", + "source": [ + "func printFailuresJSON(testSuites []FailedTestSuite) {", + "\ttype ClaimFailures struct {", + "\t\tFailures []FailedTestSuite `json:\"testSuites\"`", + "\t}", + "", + "\tclaimFailures := ClaimFailures{Failures: testSuites}", + "\tbytes, err := json.MarshalIndent(claimFailures, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"failed to marshal failures: %v\", err)", + "\t}", + "", + "\tfmt.Printf(\"%s\\n\", string(bytes))", + "}" + ] + }, + { + "name": "printFailuresText", + "kind": "function", + "source": [ + "func printFailuresText(testSuites []FailedTestSuite) {", + "\tfor _, ts := range testSuites {", + "\t\tfmt.Printf(\"Test Suite: %s\\n\", ts.TestSuiteName)", + "\t\tfor _, tc := range ts.FailingTestCases {", + "\t\t\tfmt.Printf(\" Test Case: %s\\n\", tc.TestCaseName)", + "\t\t\tfmt.Printf(\" Description: %s\\n\", tc.TestCaseDescription)", + "", + "\t\t\t// In case this tc was not using report objects, just print the failure reason string.", + "\t\t\tif len(tc.NonCompliantObjects) == 0 {", + "\t\t\t\tfmt.Printf(\" Failure reason: %s\\n\", tc.CheckDetails)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tfmt.Printf(\" Failure reasons:\\n\")", + "\t\t\tfor i := range tc.NonCompliantObjects {", + "\t\t\t\tnonCompliantObject := tc.NonCompliantObjects[i]", + "\t\t\t\tfmt.Printf(\" %2d - Type: %s, Reason: %s\\n\", i+1, nonCompliantObject.Type, nonCompliantObject.Reason)", + "\t\t\t\tfmt.Printf(\" \")", + "\t\t\t\tfor i := range nonCompliantObject.Spec.Fields {", + "\t\t\t\t\tif i != 0 {", + "\t\t\t\t\t\tfmt.Printf(\", \")", + "\t\t\t\t\t}", + "\t\t\t\t\tfield := nonCompliantObject.Spec.Fields[i]", + "\t\t\t\t\tfmt.Printf(\"%s: %s\", field.Key, field.Value)", + "\t\t\t\t}", + "\t\t\t\tfmt.Printf(\"\\n\")", + "\t\t\t}", + "\t\t}", + "\t}", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func showFailures(_ *cobra.Command, _ []string) error {", + "\toutputFormat, err := parseOutputFormatFlag()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Order test case results by test suite, using a helper map.", + "\tresultsByTestSuite := map[string][]*claim.TestCaseResult{}", + "\tfor id := range claimScheme.Claim.Results {", + "\t\ttcResult := claimScheme.Claim.Results[id]", + "\t\tresultsByTestSuite[tcResult.TestID.Suite] = append(resultsByTestSuite[tcResult.TestID.Suite], \u0026tcResult)", + "\t}", + "", + "\ttargetTestSuites := parseTargetTestSuitesFlag()", + "\t// From the target test suites, get their failed test cases and put them in", + "\t// our custom types.", + "\ttestSuites := getFailedTestCasesByTestSuite(resultsByTestSuite, targetTestSuites)", + "", + "\tswitch outputFormat {", + "\tcase outputFormatJSON:", + "\t\tprintFailuresJSON(testSuites)", + "\tdefault:", + "\t\tprintFailuresText(testSuites)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "globals": [ + { + "name": "availableOutputFormats", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:103" + }, + { + "name": "claimFilePathFlag", + "exported": false, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:16" + }, + { + "name": "outputFormatFlag", + "exported": false, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:18" + }, + { + "name": "showFailuresCommand", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:20" + }, + { + "name": "testSuitesFlag", + "exported": false, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:17" + } + ], + "consts": [ + { + "name": "outputFarmatInvalid", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:100" + }, + { + "name": "outputFormatJSON", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:99" + }, + { + "name": "outputFormatText", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/claim/show/failures/failures.go:98" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate", + "name": "generate", + "files": 1, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/feedback", + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/qe_coverage", + "github.com/spf13/cobra" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Builds the generate CLI command with its subcommands\n\nThis function initializes a cobra.Command for the generate group and\nregisters several child commands—catalog, feedback, config, and QE coverage\nreporting—by calling their NewCommand functions. It then returns the fully\nconfigured parent command ready to be added to the main application root. The\nreturned value is a pointer to the cobra.Command instance.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/generate.go:25", + "calls": [ + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tgenerateCmd.AddCommand(markdownGenerateCmd)", + "", + "\tgenerateCmd.AddCommand(markdownGenerateClassification)", + "\treturn generateCmd", + "}" + ] + }, + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/feedback", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tgenerateFeedbackJsFile.Flags().StringVarP(", + "\t\t\u0026feedbackJSONFilePath, \"feedback\", \"f\", \"\",", + "\t\t\"path to the feedback.json file\")", + "", + "\terr := generateFeedbackJsFile.MarkFlagRequired(\"feedback\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"failed to mark feedback flag as required: :%v\", err)", + "\t\treturn nil", + "\t}", + "\tgenerateFeedbackJsFile.Flags().StringVarP(", + "\t\t\u0026feedbackOutputPath, \"outputPath\", \"o\", \"\",", + "\t\t\"path to create on it the feedback.js file\")", + "", + "\terr = generateFeedbackJsFile.MarkFlagRequired(\"outputPath\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"failed to mark outputPath flag as required: :%v\", err)", + "\t\treturn nil", + "\t}", + "\treturn generateFeedbackJsFile", + "}" + ] + }, + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\treturn generateConfigCmd", + "}" + ] + }, + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/qe_coverage", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tqeCoverageReportCmd.PersistentFlags().String(\"suitename\", \"\", \"Displays the remaining tests not covered by QE for the specified suite name.\")", + "", + "\treturn qeCoverageReportCmd", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite", + "name": "newRootCmd", + "kind": "function", + "source": [ + "func newRootCmd() *cobra.Command {", + "\trootCmd := cobra.Command{", + "\t\tUse: \"certsuite\",", + "\t\tShort: \"A CLI tool for the Red Hat Best Practices Test Suite for Kubernetes.\",", + "\t}", + "", + "\trootCmd.AddCommand(claim.NewCommand())", + "\trootCmd.AddCommand(generate.NewCommand())", + "\trootCmd.AddCommand(check.NewCommand())", + "\trootCmd.AddCommand(run.NewCommand())", + "\trootCmd.AddCommand(info.NewCommand())", + "\trootCmd.AddCommand(version.NewCommand())", + "\trootCmd.AddCommand(upload.NewCommand())", + "", + "\treturn \u0026rootCmd", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tgenerate.AddCommand(catalog.NewCommand())", + "\tgenerate.AddCommand(feedback.NewCommand())", + "\tgenerate.AddCommand(config.NewCommand())", + "\tgenerate.AddCommand(qecoverage.NewCommand())", + "", + "\treturn generate", + "}" + ] + } + ], + "globals": [ + { + "name": "generate", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/generate.go:12" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "catalog", + "files": 1, + "imports": [ + "context", + "encoding/json", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite-claim/pkg/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "github.com/redhat-openshift-ecosystem/openshift-preflight/artifacts", + "github.com/redhat-openshift-ecosystem/openshift-preflight/container", + "github.com/redhat-openshift-ecosystem/openshift-preflight/operator", + "github.com/spf13/cobra", + "os", + "sort", + "strings" + ], + "structs": [ + { + "name": "Entry", + "exported": true, + "doc": "Entry represents a test entry with its name and identifier\n\nThis struct holds the display name of a test and an associated identifier\nthat includes the test's URL and version information. It is used as the value\ntype in catalogs generated from lists of identifiers, grouping entries by\ntheir suite names. The fields are unexported except for the struct itself,\nkeeping the internal representation hidden while still allowing external\npackages to create and use Entry instances.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:70", + "fields": { + "identifier": "claim.Identifier", + "testName": "string" + }, + "methodNames": null, + "source": [ + "type Entry struct {", + "\ttestName string", + "\tidentifier claim.Identifier // {url and version}", + "}" + ] + }, + { + "name": "catalogSummary", + "exported": false, + "doc": "catalogSummary Collects test suite statistics for catalog generation\n\nThis structure aggregates counts of total tests, total suites, and\nper‑suite test numbers while also tracking optional versus mandatory tests\nfor each scenario category. The fields are populated during catalog creation\nand used to format markdown summaries.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:81", + "fields": { + "testPerScenario": "map[string]map[string]int", + "testsPerSuite": "map[string]int", + "totalSuites": "int", + "totalTests": "int" + }, + "methodNames": null, + "source": [ + "type catalogSummary struct {", + "\ttotalSuites int", + "\ttotalTests int", + "\ttestsPerSuite map[string]int", + "\ttestPerScenario map[string]map[string]int", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "CreatePrintableCatalogFromIdentifiers", + "qualifiedName": "CreatePrintableCatalogFromIdentifiers", + "exported": true, + "signature": "func([]claim.Identifier)(map[string][]Entry)", + "doc": "CreatePrintableCatalogFromIdentifiers organizes identifiers into a suite‑based map\n\nThe function receives a slice of identifier objects, extracts each\nidentifier’s suite name, and groups the identifiers by that suite. For\nevery entry it creates an Entry containing the test name and the original\nidentifier, appending it to the corresponding slice in the result map. The\nreturned map maps suite names to lists of these entries, ready for further\nprocessing or display.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:110", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "outputTestCases", + "kind": "function", + "source": [ + "func outputTestCases() (outString string, summary catalogSummary) { //nolint:funlen", + "\t// Adds Preflight tests to catalog", + "\taddPreflightTestsToCatalog()", + "", + "\t// Building a separate data structure to store the key order for the map", + "\tkeys := make([]claim.Identifier, 0, len(identifiers.Catalog))", + "\tfor k := range identifiers.Catalog {", + "\t\tkeys = append(keys, k)", + "\t}", + "", + "\t// Sorting the map by identifier ID", + "\tsort.Slice(keys, func(i, j int) bool {", + "\t\treturn keys[i].Id \u003c keys[j].Id", + "\t})", + "", + "\tcatalog := CreatePrintableCatalogFromIdentifiers(keys)", + "\tif catalog == nil {", + "\t\treturn", + "\t}", + "\t// we need the list of suite's names", + "\tsuites := GetSuitesFromIdentifiers(keys)", + "", + "\t// Sort the list of suite names", + "\tsort.Strings(suites)", + "", + "\t// Iterating the map by test and suite names", + "\toutString = \"## Test Case list\\n\\n\" +", + "\t\t\"Test Cases are the specifications used to perform a meaningful test. \" +", + "\t\t\"Test cases may run once, or several times against several targets. The Red Hat Best Practices Test Suite for Kubernetes includes \" +", + "\t\t\"a number of normative and informative tests to ensure that workloads follow best practices. \" +", + "\t\t\"Here is the list of available Test Cases:\\n\"", + "", + "\tsummary.testPerScenario = make(map[string]map[string]int)", + "\tsummary.testsPerSuite = make(map[string]int)", + "\tsummary.totalSuites = len(suites)", + "\tfor _, suite := range suites {", + "\t\toutString += fmt.Sprintf(\"\\n### %s\\n\", suite)", + "\t\tfor _, k := range catalog[suite] {", + "\t\t\tsummary.testsPerSuite[suite]++", + "\t\t\tsummary.totalTests++", + "\t\t\t// Add the suite to the comma separate list of tags shown. The tags are also modified in the:", + "\t\t\t// GetTestIDAndLabels function.", + "\t\t\ttags := strings.ReplaceAll(identifiers.Catalog[k.identifier].Tags, \"\\n\", \" \") + \",\" + k.identifier.Suite", + "", + "\t\t\tkeys := make([]string, 0, len(identifiers.Catalog[k.identifier].CategoryClassification))", + "", + "\t\t\tfor scenario := range identifiers.Catalog[k.identifier].CategoryClassification {", + "\t\t\t\tkeys = append(keys, scenario)", + "\t\t\t\t_, ok := summary.testPerScenario[scenarioIDToText(scenario)]", + "\t\t\t\tif !ok {", + "\t\t\t\t\tchild := make(map[string]int)", + "\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)] = child", + "\t\t\t\t}", + "\t\t\t\tswitch scenario {", + "\t\t\t\tcase identifiers.NonTelco:", + "\t\t\t\t\ttag := identifiers.TagCommon", + "\t\t\t\t\tif identifiers.Catalog[k.identifier].Tags == tag {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\tdefault:", + "\t\t\t\t\ttag := strings.ToLower(scenario)", + "\t\t\t\t\tif strings.Contains(identifiers.Catalog[k.identifier].Tags, tag) {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tsort.Strings(keys)", + "\t\t\tclassificationString := \"|**Scenario**|**Optional/Mandatory**|\\n\"", + "\t\t\tfor _, j := range keys {", + "\t\t\t\tclassificationString += \"|\" + scenarioIDToText(j) + \"|\" + identifiers.Catalog[k.identifier].CategoryClassification[j] + \"|\\n\"", + "\t\t\t}", + "", + "\t\t\t// Every paragraph starts with a new line.", + "", + "\t\t\toutString += fmt.Sprintf(\"\\n#### %s\\n\\n\", k.testName)", + "\t\t\toutString += \"|Property|Description|\\n\"", + "\t\t\toutString += \"|---|---|\\n\"", + "\t\t\toutString += fmt.Sprintf(\"|Unique ID|%s|\\n\", k.identifier.Id)", + "\t\t\toutString += fmt.Sprintf(\"|Description|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Description, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Suggested Remediation|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Remediation, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Best Practice Reference|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].BestPracticeReference, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Exception Process|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].ExceptionProcess, \"\\n\", \" \"))", + "", + "\t\t\t// Add impact statement if available - fail if missing", + "\t\t\tif impact, exists := identifiers.ImpactMap[k.identifier.Id]; exists {", + "\t\t\t\toutString += fmt.Sprintf(\"|Impact Statement|%s|\\n\", strings.ReplaceAll(impact, \"\\n\", \" \"))", + "\t\t\t} else {", + "\t\t\t\tlog.Error(\"Test case %s is missing an impact statement in the ImpactMap\", k.identifier.Id)", + "\t\t\t\tfmt.Printf(\"ERROR: Test case %s is missing an impact statement in the ImpactMap\\n\", k.identifier.Id)", + "\t\t\t\tos.Exit(1)", + "\t\t\t}", + "", + "\t\t\toutString += fmt.Sprintf(\"|Tags|%s|\\n\", tags)", + "\t\t\toutString += classificationString", + "\t\t}", + "\t}", + "", + "\treturn outString, summary", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CreatePrintableCatalogFromIdentifiers(keys []claim.Identifier) map[string][]Entry {", + "\tcatalog := make(map[string][]Entry)", + "\t// we need the list of suite's names", + "\tfor _, i := range keys {", + "\t\tcatalog[i.Suite] = append(catalog[i.Suite], Entry{", + "\t\t\ttestName: i.Id,", + "\t\t\tidentifier: i,", + "\t\t})", + "\t}", + "\treturn catalog", + "}" + ] + }, + { + "name": "GetSuitesFromIdentifiers", + "qualifiedName": "GetSuitesFromIdentifiers", + "exported": true, + "signature": "func([]claim.Identifier)([]string)", + "doc": "GetSuitesFromIdentifiers Creates a list of unique test suite names from identifiers\n\nThis function iterates over a slice of identifier objects, collecting each\nidentifier's Suite field into a temporary slice. It then removes duplicate\nsuite names by calling a helper that returns only unique values. The\nresulting slice of distinct suite names is returned.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:128", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper", + "name": "Unique", + "kind": "function", + "source": [ + "func Unique(slice []string) []string {", + "\t// create a map with all the values as key", + "\tuniqMap := make(map[string]struct{})", + "\tfor _, v := range slice {", + "\t\tuniqMap[v] = struct{}{}", + "\t}", + "", + "\t// turn the map keys into a slice", + "\tuniqSlice := make([]string, 0, len(uniqMap))", + "\tfor v := range uniqMap {", + "\t\tuniqSlice = append(uniqSlice, v)", + "\t}", + "\treturn uniqSlice", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "outputTestCases", + "kind": "function", + "source": [ + "func outputTestCases() (outString string, summary catalogSummary) { //nolint:funlen", + "\t// Adds Preflight tests to catalog", + "\taddPreflightTestsToCatalog()", + "", + "\t// Building a separate data structure to store the key order for the map", + "\tkeys := make([]claim.Identifier, 0, len(identifiers.Catalog))", + "\tfor k := range identifiers.Catalog {", + "\t\tkeys = append(keys, k)", + "\t}", + "", + "\t// Sorting the map by identifier ID", + "\tsort.Slice(keys, func(i, j int) bool {", + "\t\treturn keys[i].Id \u003c keys[j].Id", + "\t})", + "", + "\tcatalog := CreatePrintableCatalogFromIdentifiers(keys)", + "\tif catalog == nil {", + "\t\treturn", + "\t}", + "\t// we need the list of suite's names", + "\tsuites := GetSuitesFromIdentifiers(keys)", + "", + "\t// Sort the list of suite names", + "\tsort.Strings(suites)", + "", + "\t// Iterating the map by test and suite names", + "\toutString = \"## Test Case list\\n\\n\" +", + "\t\t\"Test Cases are the specifications used to perform a meaningful test. \" +", + "\t\t\"Test cases may run once, or several times against several targets. The Red Hat Best Practices Test Suite for Kubernetes includes \" +", + "\t\t\"a number of normative and informative tests to ensure that workloads follow best practices. \" +", + "\t\t\"Here is the list of available Test Cases:\\n\"", + "", + "\tsummary.testPerScenario = make(map[string]map[string]int)", + "\tsummary.testsPerSuite = make(map[string]int)", + "\tsummary.totalSuites = len(suites)", + "\tfor _, suite := range suites {", + "\t\toutString += fmt.Sprintf(\"\\n### %s\\n\", suite)", + "\t\tfor _, k := range catalog[suite] {", + "\t\t\tsummary.testsPerSuite[suite]++", + "\t\t\tsummary.totalTests++", + "\t\t\t// Add the suite to the comma separate list of tags shown. The tags are also modified in the:", + "\t\t\t// GetTestIDAndLabels function.", + "\t\t\ttags := strings.ReplaceAll(identifiers.Catalog[k.identifier].Tags, \"\\n\", \" \") + \",\" + k.identifier.Suite", + "", + "\t\t\tkeys := make([]string, 0, len(identifiers.Catalog[k.identifier].CategoryClassification))", + "", + "\t\t\tfor scenario := range identifiers.Catalog[k.identifier].CategoryClassification {", + "\t\t\t\tkeys = append(keys, scenario)", + "\t\t\t\t_, ok := summary.testPerScenario[scenarioIDToText(scenario)]", + "\t\t\t\tif !ok {", + "\t\t\t\t\tchild := make(map[string]int)", + "\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)] = child", + "\t\t\t\t}", + "\t\t\t\tswitch scenario {", + "\t\t\t\tcase identifiers.NonTelco:", + "\t\t\t\t\ttag := identifiers.TagCommon", + "\t\t\t\t\tif identifiers.Catalog[k.identifier].Tags == tag {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\tdefault:", + "\t\t\t\t\ttag := strings.ToLower(scenario)", + "\t\t\t\t\tif strings.Contains(identifiers.Catalog[k.identifier].Tags, tag) {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tsort.Strings(keys)", + "\t\t\tclassificationString := \"|**Scenario**|**Optional/Mandatory**|\\n\"", + "\t\t\tfor _, j := range keys {", + "\t\t\t\tclassificationString += \"|\" + scenarioIDToText(j) + \"|\" + identifiers.Catalog[k.identifier].CategoryClassification[j] + \"|\\n\"", + "\t\t\t}", + "", + "\t\t\t// Every paragraph starts with a new line.", + "", + "\t\t\toutString += fmt.Sprintf(\"\\n#### %s\\n\\n\", k.testName)", + "\t\t\toutString += \"|Property|Description|\\n\"", + "\t\t\toutString += \"|---|---|\\n\"", + "\t\t\toutString += fmt.Sprintf(\"|Unique ID|%s|\\n\", k.identifier.Id)", + "\t\t\toutString += fmt.Sprintf(\"|Description|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Description, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Suggested Remediation|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Remediation, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Best Practice Reference|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].BestPracticeReference, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Exception Process|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].ExceptionProcess, \"\\n\", \" \"))", + "", + "\t\t\t// Add impact statement if available - fail if missing", + "\t\t\tif impact, exists := identifiers.ImpactMap[k.identifier.Id]; exists {", + "\t\t\t\toutString += fmt.Sprintf(\"|Impact Statement|%s|\\n\", strings.ReplaceAll(impact, \"\\n\", \" \"))", + "\t\t\t} else {", + "\t\t\t\tlog.Error(\"Test case %s is missing an impact statement in the ImpactMap\", k.identifier.Id)", + "\t\t\t\tfmt.Printf(\"ERROR: Test case %s is missing an impact statement in the ImpactMap\\n\", k.identifier.Id)", + "\t\t\t\tos.Exit(1)", + "\t\t\t}", + "", + "\t\t\toutString += fmt.Sprintf(\"|Tags|%s|\\n\", tags)", + "\t\t\toutString += classificationString", + "\t\t}", + "\t}", + "", + "\treturn outString, summary", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetSuitesFromIdentifiers(keys []claim.Identifier) []string {", + "\tvar suites []string", + "\tfor _, i := range keys {", + "\t\tsuites = append(suites, i.Suite)", + "\t}", + "\treturn arrayhelper.Unique(suites)", + "}" + ] + }, + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates a catalog generation command\n\nThis function builds a new command for the generate tool, adding\nsub‑commands that produce markdown documentation and classification files.\nIt returns the fully constructed command ready to be attached to the main CLI\ntree.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:493", + "calls": [ + { + "name": "AddCommand", + "kind": "function" + }, + { + "name": "AddCommand", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tgenerate.AddCommand(catalog.NewCommand())", + "\tgenerate.AddCommand(feedback.NewCommand())", + "\tgenerate.AddCommand(config.NewCommand())", + "\tgenerate.AddCommand(qecoverage.NewCommand())", + "", + "\treturn generate", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tgenerateCmd.AddCommand(markdownGenerateCmd)", + "", + "\tgenerateCmd.AddCommand(markdownGenerateClassification)", + "\treturn generateCmd", + "}" + ] + }, + { + "name": "addPreflightTestsToCatalog", + "qualifiedName": "addPreflightTestsToCatalog", + "exported": false, + "signature": "func()()", + "doc": "addPreflightTestsToCatalog Adds preflight test entries to the catalog\n\nThe function retrieves operator and container preflight tests via the\npreflight library, collects their metadata, and inserts each as a catalog\nentry with default remediation and classification values. It logs errors if\nartifact creation or list retrieval fails but continues processing remaining\ntests. Each test is added under the common preflight suite key, ensuring they\nappear in the generated test case documentation.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:166", + "calls": [ + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/artifacts", + "name": "NewMapWriter", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/artifacts", + "name": "ContextWithWriter", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/operator", + "name": "NewCheck", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/container", + "name": "NewCheck", + "kind": "function" + }, + { + "name": "List", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "Help", + "kind": "function" + }, + { + "name": "Name", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "Name", + "kind": "function" + }, + { + "name": "Metadata", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "outputTestCases", + "kind": "function", + "source": [ + "func outputTestCases() (outString string, summary catalogSummary) { //nolint:funlen", + "\t// Adds Preflight tests to catalog", + "\taddPreflightTestsToCatalog()", + "", + "\t// Building a separate data structure to store the key order for the map", + "\tkeys := make([]claim.Identifier, 0, len(identifiers.Catalog))", + "\tfor k := range identifiers.Catalog {", + "\t\tkeys = append(keys, k)", + "\t}", + "", + "\t// Sorting the map by identifier ID", + "\tsort.Slice(keys, func(i, j int) bool {", + "\t\treturn keys[i].Id \u003c keys[j].Id", + "\t})", + "", + "\tcatalog := CreatePrintableCatalogFromIdentifiers(keys)", + "\tif catalog == nil {", + "\t\treturn", + "\t}", + "\t// we need the list of suite's names", + "\tsuites := GetSuitesFromIdentifiers(keys)", + "", + "\t// Sort the list of suite names", + "\tsort.Strings(suites)", + "", + "\t// Iterating the map by test and suite names", + "\toutString = \"## Test Case list\\n\\n\" +", + "\t\t\"Test Cases are the specifications used to perform a meaningful test. \" +", + "\t\t\"Test cases may run once, or several times against several targets. The Red Hat Best Practices Test Suite for Kubernetes includes \" +", + "\t\t\"a number of normative and informative tests to ensure that workloads follow best practices. \" +", + "\t\t\"Here is the list of available Test Cases:\\n\"", + "", + "\tsummary.testPerScenario = make(map[string]map[string]int)", + "\tsummary.testsPerSuite = make(map[string]int)", + "\tsummary.totalSuites = len(suites)", + "\tfor _, suite := range suites {", + "\t\toutString += fmt.Sprintf(\"\\n### %s\\n\", suite)", + "\t\tfor _, k := range catalog[suite] {", + "\t\t\tsummary.testsPerSuite[suite]++", + "\t\t\tsummary.totalTests++", + "\t\t\t// Add the suite to the comma separate list of tags shown. The tags are also modified in the:", + "\t\t\t// GetTestIDAndLabels function.", + "\t\t\ttags := strings.ReplaceAll(identifiers.Catalog[k.identifier].Tags, \"\\n\", \" \") + \",\" + k.identifier.Suite", + "", + "\t\t\tkeys := make([]string, 0, len(identifiers.Catalog[k.identifier].CategoryClassification))", + "", + "\t\t\tfor scenario := range identifiers.Catalog[k.identifier].CategoryClassification {", + "\t\t\t\tkeys = append(keys, scenario)", + "\t\t\t\t_, ok := summary.testPerScenario[scenarioIDToText(scenario)]", + "\t\t\t\tif !ok {", + "\t\t\t\t\tchild := make(map[string]int)", + "\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)] = child", + "\t\t\t\t}", + "\t\t\t\tswitch scenario {", + "\t\t\t\tcase identifiers.NonTelco:", + "\t\t\t\t\ttag := identifiers.TagCommon", + "\t\t\t\t\tif identifiers.Catalog[k.identifier].Tags == tag {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\tdefault:", + "\t\t\t\t\ttag := strings.ToLower(scenario)", + "\t\t\t\t\tif strings.Contains(identifiers.Catalog[k.identifier].Tags, tag) {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tsort.Strings(keys)", + "\t\t\tclassificationString := \"|**Scenario**|**Optional/Mandatory**|\\n\"", + "\t\t\tfor _, j := range keys {", + "\t\t\t\tclassificationString += \"|\" + scenarioIDToText(j) + \"|\" + identifiers.Catalog[k.identifier].CategoryClassification[j] + \"|\\n\"", + "\t\t\t}", + "", + "\t\t\t// Every paragraph starts with a new line.", + "", + "\t\t\toutString += fmt.Sprintf(\"\\n#### %s\\n\\n\", k.testName)", + "\t\t\toutString += \"|Property|Description|\\n\"", + "\t\t\toutString += \"|---|---|\\n\"", + "\t\t\toutString += fmt.Sprintf(\"|Unique ID|%s|\\n\", k.identifier.Id)", + "\t\t\toutString += fmt.Sprintf(\"|Description|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Description, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Suggested Remediation|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Remediation, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Best Practice Reference|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].BestPracticeReference, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Exception Process|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].ExceptionProcess, \"\\n\", \" \"))", + "", + "\t\t\t// Add impact statement if available - fail if missing", + "\t\t\tif impact, exists := identifiers.ImpactMap[k.identifier.Id]; exists {", + "\t\t\t\toutString += fmt.Sprintf(\"|Impact Statement|%s|\\n\", strings.ReplaceAll(impact, \"\\n\", \" \"))", + "\t\t\t} else {", + "\t\t\t\tlog.Error(\"Test case %s is missing an impact statement in the ImpactMap\", k.identifier.Id)", + "\t\t\t\tfmt.Printf(\"ERROR: Test case %s is missing an impact statement in the ImpactMap\\n\", k.identifier.Id)", + "\t\t\t\tos.Exit(1)", + "\t\t\t}", + "", + "\t\t\toutString += fmt.Sprintf(\"|Tags|%s|\\n\", tags)", + "\t\t\toutString += classificationString", + "\t\t}", + "\t}", + "", + "\treturn outString, summary", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func addPreflightTestsToCatalog() {", + "\tconst dummy = \"dummy\"", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\tlog.Error(\"Error creating artifact, failed to add preflight tests to catalog: %v\", err)", + "\t\treturn", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\toptsOperator := []plibOperator.Option{}", + "\toptsContainer := []plibContainer.Option{}", + "\tcheckOperator := plibOperator.NewCheck(dummy, dummy, []byte(\"\"), optsOperator...)", + "\tcheckContainer := plibContainer.NewCheck(dummy, optsContainer...)", + "\t_, checksOperator, err := checkOperator.List(ctx)", + "\tif err != nil {", + "\t\tlog.Error(\"Error getting preflight operator tests: %v\", err)", + "\t}", + "\t_, checksContainer, err := checkContainer.List(ctx)", + "\tif err != nil {", + "\t\tlog.Error(\"Error getting preflight container tests: %v\", err)", + "\t}", + "", + "\tallChecks := checksOperator", + "\tallChecks = append(allChecks, checksContainer...)", + "", + "\tfor _, c := range allChecks {", + "\t\tremediation := c.Help().Suggestion", + "", + "\t\t// Custom override for specific preflight test remediation", + "\t\tif c.Name() == \"FollowsRestrictedNetworkEnablementGuidelines\" {", + "\t\t\tremediation = \"If consumers of your operator may need to do so on a restricted network, implement the guidelines outlined in OCP documentation: https://docs.redhat.com/en/documentation/openshift_container_platform/latest/html/disconnected_environments/olm-restricted-networks\"", + "\t\t}", + "", + "\t\t_ = identifiers.AddCatalogEntry(", + "\t\t\tc.Name(),", + "\t\t\tcommon.PreflightTestKey,", + "\t\t\tc.Metadata().Description,", + "\t\t\tremediation,", + "\t\t\tidentifiers.NoDocumentedProcess,", + "\t\t\tidentifiers.NoDocLink,", + "\t\t\ttrue,", + "\t\t\tmap[string]string{", + "\t\t\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\t\t\tidentifiers.Telco: identifiers.Optional,", + "\t\t\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\t\t\tidentifiers.Extended: identifiers.Optional,", + "\t\t\t},", + "\t\t\tidentifiers.TagCommon)", + "\t}", + "}" + ] + }, + { + "name": "emitTextFromFile", + "qualifiedName": "emitTextFromFile", + "exported": false, + "signature": "func(string)(error)", + "doc": "emitTextFromFile streams a file’s contents to standard output\n\nThis helper reads the entire content of the specified file into memory,\nconverts it to a string, and prints it directly to stdout. It returns any\nread error encountered; otherwise, it completes successfully with no value.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:93", + "calls": [ + { + "pkgPath": "os", + "name": "ReadFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func emitTextFromFile(filename string) error {", + "\ttext, err := os.ReadFile(filename)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tfmt.Print(string(text))", + "\treturn nil", + "}" + ] + }, + { + "name": "generateJS", + "qualifiedName": "generateJS", + "exported": false, + "signature": "func(*cobra.Command, []string)(error)", + "doc": "generateJS Outputs classification data as formatted JSON\n\nThis routine triggers the generation of JavaScript-friendly output by\ninvoking a helper that marshals classification identifiers into indented\nJSON. It captures any marshalling errors, logs them if they occur, and prints\nthe resulting string to standard output. The function returns nil on success\nor propagates an error when one is encountered.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:387", + "calls": [ + { + "name": "outputJS", + "kind": "function", + "source": [ + "func outputJS() {", + "\tout, err := json.MarshalIndent(identifiers.Classification, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Error(\"could not Marshall classification, err=%s\", err)", + "\t\treturn", + "\t}", + "\tfmt.Printf(\"classification= %s \", out)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func generateJS(_ *cobra.Command, _ []string) error {", + "\t// process the test cases", + "\toutputJS()", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "outputIntro", + "qualifiedName": "outputIntro", + "exported": false, + "signature": "func()(string)", + "doc": "outputIntro Generates introductory markdown for the catalog\n\nThis function builds a Markdown header that introduces the Red Hat Best\nPractices Test Suite for Kubernetes catalog, including a title and\ndescriptive paragraph. It concatenates static strings containing HTML\ncomments to disable specific linting rules, the main heading, and a paragraph\nexplaining the test areas, mandatory tests, and workload scenarios. The\nresulting string is returned for inclusion at the top of generated\ndocumentation.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:403", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "runGenerateMarkdownCmd", + "kind": "function", + "source": [ + "func runGenerateMarkdownCmd(_ *cobra.Command, _ []string) error {", + "\t// prints intro", + "\tintro := outputIntro()", + "\t// process the test cases", + "\ttcs, summaryRaw := outputTestCases()", + "\t// create summary", + "\tsummary := summaryToMD(summaryRaw)", + "", + "\tsccCategories := outputSccCategories()", + "\tfmt.Fprintf(os.Stdout, \"%s\", intro+summary+tcs+sccCategories)", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func outputIntro() (out string) {", + "\theaderStr :=", + "\t\t\"\u003c!-- markdownlint-disable line-length no-bare-urls blanks-around-lists ul-indent blanks-around-headings no-trailing-spaces --\u003e\\n\" +", + "\t\t\t\"# Red Hat Best Practices Test Suite for Kubernetes catalog\\n\\n\"", + "\tintroStr :=", + "\t\t\"The catalog for the Red Hat Best Practices Test Suite for Kubernetes contains a list of test cases \" +", + "\t\t\t\"aiming at testing best practices in various areas. Test suites are defined in 10 areas : `platform-alteration`, `access-control`, `affiliated-certification`, \" +", + "\t\t\t\"`lifecycle`, `manageability`,`networking`, `observability`, `operator`, and `performance.`\" +", + "\t\t\t\"\\n\\nDepending on the workload type, not all tests are required to pass to satisfy best practice requirements. The scenario section\" +", + "\t\t\t\" indicates which tests are mandatory or optional depending on the scenario. The following workload types / scenarios are defined: `Telco`, `Non-Telco`, `Far-Edge`, `Extended`.\\n\\n\"", + "", + "\treturn headerStr + introStr", + "}" + ] + }, + { + "name": "outputJS", + "qualifiedName": "outputJS", + "exported": false, + "signature": "func()()", + "doc": "outputJS Prints the classification data as formatted JSON\n\nThe function marshals a global classification structure into pretty-printed\nJSON. If marshalling fails, it logs an error and exits early. Otherwise, it\nwrites the resulting string to standard output.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:371", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "MarshalIndent", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "generateJS", + "kind": "function", + "source": [ + "func generateJS(_ *cobra.Command, _ []string) error {", + "\t// process the test cases", + "\toutputJS()", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func outputJS() {", + "\tout, err := json.MarshalIndent(identifiers.Classification, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Error(\"could not Marshall classification, err=%s\", err)", + "\t\treturn", + "\t}", + "\tfmt.Printf(\"classification= %s \", out)", + "}" + ] + }, + { + "name": "outputSccCategories", + "qualifiedName": "outputSccCategories", + "exported": false, + "signature": "func()(string)", + "doc": "outputSccCategories Provides a Markdown section describing security context categories\n\nThe function builds a string containing a header, an introductory note, and\nfour subsections that explain different SCC scenarios for Kubernetes\nworkloads. Each subsection lists the expected capabilities or restrictions\nassociated with that category. The resulting text is returned as a single\nstring.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:424", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "runGenerateMarkdownCmd", + "kind": "function", + "source": [ + "func runGenerateMarkdownCmd(_ *cobra.Command, _ []string) error {", + "\t// prints intro", + "\tintro := outputIntro()", + "\t// process the test cases", + "\ttcs, summaryRaw := outputTestCases()", + "\t// create summary", + "\tsummary := summaryToMD(summaryRaw)", + "", + "\tsccCategories := outputSccCategories()", + "\tfmt.Fprintf(os.Stdout, \"%s\", intro+summary+tcs+sccCategories)", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func outputSccCategories() (sccCategories string) {", + "\tsccCategories = \"\\n## Security Context Categories\\n\"", + "", + "\tintro := \"\\nSecurity context categories referred here are applicable to the [access control test case](#access-control-security-context).\\n\\n\"", + "", + "\tfirstCat := \"### 1st Category\\n\" +", + "\t\t\"Default SCC for all users if namespace does not use service mesh.\\n\\n\" +", + "\t\t\"Workloads under this category should: \\n\" +", + "\t\t\" - Use default CNI (OVN) network interface\\n\" +", + "\t\t\" - Not request NET_ADMIN or NET_RAW for advanced networking functions\\n\\n\"", + "", + "\tsecondCat := \"### 2nd Category\\n\" +", + "\t\t\"For workloads which utilize Service Mesh sidecars for mTLS or load balancing. These workloads must utilize an alternative SCC “restricted-no-uid0” to workaround a service mesh UID limitation. \" +", + "\t\t\"Workloads under this category should not run as root (UID0).\\n\\n\"", + "", + "\tthirdCat := \"### 3rd Category\\n\" +", + "\t\t\"For workloads with advanced networking functions/requirements (e.g. CAP_NET_RAW, CAP_NET_ADMIN, may run as root).\\n\\n\" +", + "\t\t\"For example:\\n\" +", + "\t\t\" - Manipulate the low-level protocol flags, such as the 802.1p priority, VLAN tag, DSCP value, etc.\\n\" +", + "\t\t\" - Manipulate the interface IP addresses or the routing table or the firewall rules on-the-fly.\\n\" +", + "\t\t\" - Process Ethernet packets\\n\" +", + "\t\t\"Workloads under this category may\\n\" +", + "\t\t\" - Use Macvlan interface to sending and receiving Ethernet packets\\n\" +", + "\t\t\" - Request CAP_NET_RAW for creating raw sockets\\n\" +", + "\t\t\" - Request CAP_NET_ADMIN for\\n\" +", + "\t\t\" - Modify the interface IP address on-the-fly\\n\" +", + "\t\t\" - Manipulating the routing table on-the-fly\\n\" +", + "\t\t\" - Manipulating firewall rules on-the-fly\\n\" +", + "\t\t\" - Setting packet DSCP value\\n\\n\"", + "", + "\tfourthCat := \"### 4th Category\\n\" +", + "\t\t\"For workloads handling user plane traffic or latency-sensitive payloads at line rate, such as load balancing, routing, deep packet inspection etc. \" +", + "\t\t\"Workloads under this category may also need to process the packets at a lower level.\\n\\n\" +", + "\t\t\"These workloads shall \\n\" +", + "\t\t\" - Use SR-IOV interfaces \\n\" +", + "\t\t\" - Fully or partially bypassing kernel networking stack with userspace networking technologies,\" +", + "\t\t\"such as DPDK, F-stack, VPP, OpenFastPath, etc. A userspace networking stack not only improves\" +", + "\t\t\"the performance but also reduces the need for CAP_NET_ADMIN and CAP_NET_RAW.\\n\" +", + "\t\t\"CAP_IPC_LOCK is mandatory for allocating hugepage memory, hence shall be granted to DPDK applications. If the workload is latency-sensitive and needs a real-time kernel, CAP_SYS_NICE would be required.\\n\"", + "", + "\treturn sccCategories + intro + firstCat + secondCat + thirdCat + fourthCat", + "}" + ] + }, + { + "name": "outputTestCases", + "qualifiedName": "outputTestCases", + "exported": false, + "signature": "func()(string, catalogSummary)", + "doc": "outputTestCases generates Markdown for test case catalog\n\nIt compiles all test cases, sorts them by ID and suite, builds a table of\nproperties and impact statements, and returns the formatted string along with\nstatistics about tests per scenario and suite.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:222", + "calls": [ + { + "name": "addPreflightTestsToCatalog", + "kind": "function", + "source": [ + "func addPreflightTestsToCatalog() {", + "\tconst dummy = \"dummy\"", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\tlog.Error(\"Error creating artifact, failed to add preflight tests to catalog: %v\", err)", + "\t\treturn", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\toptsOperator := []plibOperator.Option{}", + "\toptsContainer := []plibContainer.Option{}", + "\tcheckOperator := plibOperator.NewCheck(dummy, dummy, []byte(\"\"), optsOperator...)", + "\tcheckContainer := plibContainer.NewCheck(dummy, optsContainer...)", + "\t_, checksOperator, err := checkOperator.List(ctx)", + "\tif err != nil {", + "\t\tlog.Error(\"Error getting preflight operator tests: %v\", err)", + "\t}", + "\t_, checksContainer, err := checkContainer.List(ctx)", + "\tif err != nil {", + "\t\tlog.Error(\"Error getting preflight container tests: %v\", err)", + "\t}", + "", + "\tallChecks := checksOperator", + "\tallChecks = append(allChecks, checksContainer...)", + "", + "\tfor _, c := range allChecks {", + "\t\tremediation := c.Help().Suggestion", + "", + "\t\t// Custom override for specific preflight test remediation", + "\t\tif c.Name() == \"FollowsRestrictedNetworkEnablementGuidelines\" {", + "\t\t\tremediation = \"If consumers of your operator may need to do so on a restricted network, implement the guidelines outlined in OCP documentation: https://docs.redhat.com/en/documentation/openshift_container_platform/latest/html/disconnected_environments/olm-restricted-networks\"", + "\t\t}", + "", + "\t\t_ = identifiers.AddCatalogEntry(", + "\t\t\tc.Name(),", + "\t\t\tcommon.PreflightTestKey,", + "\t\t\tc.Metadata().Description,", + "\t\t\tremediation,", + "\t\t\tidentifiers.NoDocumentedProcess,", + "\t\t\tidentifiers.NoDocLink,", + "\t\t\ttrue,", + "\t\t\tmap[string]string{", + "\t\t\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\t\t\tidentifiers.Telco: identifiers.Optional,", + "\t\t\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\t\t\tidentifiers.Extended: identifiers.Optional,", + "\t\t\t},", + "\t\t\tidentifiers.TagCommon)", + "\t}", + "}" + ] + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Slice", + "kind": "function" + }, + { + "name": "CreatePrintableCatalogFromIdentifiers", + "kind": "function", + "source": [ + "func CreatePrintableCatalogFromIdentifiers(keys []claim.Identifier) map[string][]Entry {", + "\tcatalog := make(map[string][]Entry)", + "\t// we need the list of suite's names", + "\tfor _, i := range keys {", + "\t\tcatalog[i.Suite] = append(catalog[i.Suite], Entry{", + "\t\t\ttestName: i.Id,", + "\t\t\tidentifier: i,", + "\t\t})", + "\t}", + "\treturn catalog", + "}" + ] + }, + { + "name": "GetSuitesFromIdentifiers", + "kind": "function", + "source": [ + "func GetSuitesFromIdentifiers(keys []claim.Identifier) []string {", + "\tvar suites []string", + "\tfor _, i := range keys {", + "\t\tsuites = append(suites, i.Suite)", + "\t}", + "\treturn arrayhelper.Unique(suites)", + "}" + ] + }, + { + "pkgPath": "sort", + "name": "Strings", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "scenarioIDToText", + "kind": "function", + "source": [ + "func scenarioIDToText(id string) (text string) {", + "\tswitch id {", + "\tcase identifiers.FarEdge:", + "\t\ttext = \"Far-Edge\"", + "\tcase identifiers.Telco:", + "\t\ttext = \"Telco\"", + "\tcase identifiers.NonTelco:", + "\t\ttext = \"Non-Telco\"", + "\tcase identifiers.Extended:", + "\t\ttext = \"Extended\"", + "\tdefault:", + "\t\ttext = \"Unknown Scenario\"", + "\t}", + "\treturn text", + "}" + ] + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "scenarioIDToText", + "kind": "function", + "source": [ + "func scenarioIDToText(id string) (text string) {", + "\tswitch id {", + "\tcase identifiers.FarEdge:", + "\t\ttext = \"Far-Edge\"", + "\tcase identifiers.Telco:", + "\t\ttext = \"Telco\"", + "\tcase identifiers.NonTelco:", + "\t\ttext = \"Non-Telco\"", + "\tcase identifiers.Extended:", + "\t\ttext = \"Extended\"", + "\tdefault:", + "\t\ttext = \"Unknown Scenario\"", + "\t}", + "\treturn text", + "}" + ] + }, + { + "name": "scenarioIDToText", + "kind": "function", + "source": [ + "func scenarioIDToText(id string) (text string) {", + "\tswitch id {", + "\tcase identifiers.FarEdge:", + "\t\ttext = \"Far-Edge\"", + "\tcase identifiers.Telco:", + "\t\ttext = \"Telco\"", + "\tcase identifiers.NonTelco:", + "\t\ttext = \"Non-Telco\"", + "\tcase identifiers.Extended:", + "\t\ttext = \"Extended\"", + "\tdefault:", + "\t\ttext = \"Unknown Scenario\"", + "\t}", + "\treturn text", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "ToLower", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "scenarioIDToText", + "kind": "function", + "source": [ + "func scenarioIDToText(id string) (text string) {", + "\tswitch id {", + "\tcase identifiers.FarEdge:", + "\t\ttext = \"Far-Edge\"", + "\tcase identifiers.Telco:", + "\t\ttext = \"Telco\"", + "\tcase identifiers.NonTelco:", + "\t\ttext = \"Non-Telco\"", + "\tcase identifiers.Extended:", + "\t\ttext = \"Extended\"", + "\tdefault:", + "\t\ttext = \"Unknown Scenario\"", + "\t}", + "\treturn text", + "}" + ] + }, + { + "pkgPath": "sort", + "name": "Strings", + "kind": "function" + }, + { + "name": "scenarioIDToText", + "kind": "function", + "source": [ + "func scenarioIDToText(id string) (text string) {", + "\tswitch id {", + "\tcase identifiers.FarEdge:", + "\t\ttext = \"Far-Edge\"", + "\tcase identifiers.Telco:", + "\t\ttext = \"Telco\"", + "\tcase identifiers.NonTelco:", + "\t\ttext = \"Non-Telco\"", + "\tcase identifiers.Extended:", + "\t\ttext = \"Extended\"", + "\tdefault:", + "\t\ttext = \"Unknown Scenario\"", + "\t}", + "\treturn text", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Exit", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "runGenerateMarkdownCmd", + "kind": "function", + "source": [ + "func runGenerateMarkdownCmd(_ *cobra.Command, _ []string) error {", + "\t// prints intro", + "\tintro := outputIntro()", + "\t// process the test cases", + "\ttcs, summaryRaw := outputTestCases()", + "\t// create summary", + "\tsummary := summaryToMD(summaryRaw)", + "", + "\tsccCategories := outputSccCategories()", + "\tfmt.Fprintf(os.Stdout, \"%s\", intro+summary+tcs+sccCategories)", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func outputTestCases() (outString string, summary catalogSummary) { //nolint:funlen", + "\t// Adds Preflight tests to catalog", + "\taddPreflightTestsToCatalog()", + "", + "\t// Building a separate data structure to store the key order for the map", + "\tkeys := make([]claim.Identifier, 0, len(identifiers.Catalog))", + "\tfor k := range identifiers.Catalog {", + "\t\tkeys = append(keys, k)", + "\t}", + "", + "\t// Sorting the map by identifier ID", + "\tsort.Slice(keys, func(i, j int) bool {", + "\t\treturn keys[i].Id \u003c keys[j].Id", + "\t})", + "", + "\tcatalog := CreatePrintableCatalogFromIdentifiers(keys)", + "\tif catalog == nil {", + "\t\treturn", + "\t}", + "\t// we need the list of suite's names", + "\tsuites := GetSuitesFromIdentifiers(keys)", + "", + "\t// Sort the list of suite names", + "\tsort.Strings(suites)", + "", + "\t// Iterating the map by test and suite names", + "\toutString = \"## Test Case list\\n\\n\" +", + "\t\t\"Test Cases are the specifications used to perform a meaningful test. \" +", + "\t\t\"Test cases may run once, or several times against several targets. The Red Hat Best Practices Test Suite for Kubernetes includes \" +", + "\t\t\"a number of normative and informative tests to ensure that workloads follow best practices. \" +", + "\t\t\"Here is the list of available Test Cases:\\n\"", + "", + "\tsummary.testPerScenario = make(map[string]map[string]int)", + "\tsummary.testsPerSuite = make(map[string]int)", + "\tsummary.totalSuites = len(suites)", + "\tfor _, suite := range suites {", + "\t\toutString += fmt.Sprintf(\"\\n### %s\\n\", suite)", + "\t\tfor _, k := range catalog[suite] {", + "\t\t\tsummary.testsPerSuite[suite]++", + "\t\t\tsummary.totalTests++", + "\t\t\t// Add the suite to the comma separate list of tags shown. The tags are also modified in the:", + "\t\t\t// GetTestIDAndLabels function.", + "\t\t\ttags := strings.ReplaceAll(identifiers.Catalog[k.identifier].Tags, \"\\n\", \" \") + \",\" + k.identifier.Suite", + "", + "\t\t\tkeys := make([]string, 0, len(identifiers.Catalog[k.identifier].CategoryClassification))", + "", + "\t\t\tfor scenario := range identifiers.Catalog[k.identifier].CategoryClassification {", + "\t\t\t\tkeys = append(keys, scenario)", + "\t\t\t\t_, ok := summary.testPerScenario[scenarioIDToText(scenario)]", + "\t\t\t\tif !ok {", + "\t\t\t\t\tchild := make(map[string]int)", + "\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)] = child", + "\t\t\t\t}", + "\t\t\t\tswitch scenario {", + "\t\t\t\tcase identifiers.NonTelco:", + "\t\t\t\t\ttag := identifiers.TagCommon", + "\t\t\t\t\tif identifiers.Catalog[k.identifier].Tags == tag {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\tdefault:", + "\t\t\t\t\ttag := strings.ToLower(scenario)", + "\t\t\t\t\tif strings.Contains(identifiers.Catalog[k.identifier].Tags, tag) {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tsort.Strings(keys)", + "\t\t\tclassificationString := \"|**Scenario**|**Optional/Mandatory**|\\n\"", + "\t\t\tfor _, j := range keys {", + "\t\t\t\tclassificationString += \"|\" + scenarioIDToText(j) + \"|\" + identifiers.Catalog[k.identifier].CategoryClassification[j] + \"|\\n\"", + "\t\t\t}", + "", + "\t\t\t// Every paragraph starts with a new line.", + "", + "\t\t\toutString += fmt.Sprintf(\"\\n#### %s\\n\\n\", k.testName)", + "\t\t\toutString += \"|Property|Description|\\n\"", + "\t\t\toutString += \"|---|---|\\n\"", + "\t\t\toutString += fmt.Sprintf(\"|Unique ID|%s|\\n\", k.identifier.Id)", + "\t\t\toutString += fmt.Sprintf(\"|Description|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Description, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Suggested Remediation|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Remediation, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Best Practice Reference|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].BestPracticeReference, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Exception Process|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].ExceptionProcess, \"\\n\", \" \"))", + "", + "\t\t\t// Add impact statement if available - fail if missing", + "\t\t\tif impact, exists := identifiers.ImpactMap[k.identifier.Id]; exists {", + "\t\t\t\toutString += fmt.Sprintf(\"|Impact Statement|%s|\\n\", strings.ReplaceAll(impact, \"\\n\", \" \"))", + "\t\t\t} else {", + "\t\t\t\tlog.Error(\"Test case %s is missing an impact statement in the ImpactMap\", k.identifier.Id)", + "\t\t\t\tfmt.Printf(\"ERROR: Test case %s is missing an impact statement in the ImpactMap\\n\", k.identifier.Id)", + "\t\t\t\tos.Exit(1)", + "\t\t\t}", + "", + "\t\t\toutString += fmt.Sprintf(\"|Tags|%s|\\n\", tags)", + "\t\t\toutString += classificationString", + "\t\t}", + "\t}", + "", + "\treturn outString, summary", + "}" + ] + }, + { + "name": "runGenerateMarkdownCmd", + "qualifiedName": "runGenerateMarkdownCmd", + "exported": false, + "signature": "func(*cobra.Command, []string)(error)", + "doc": "runGenerateMarkdownCmd Produces a markdown catalog of test cases\n\nIt gathers introductory text, formats each test case with metadata and impact\nstatements, builds a summary table, appends security context categories, then\nwrites the combined output to standard output. The function returns no error\nunless writing fails.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:473", + "calls": [ + { + "name": "outputIntro", + "kind": "function", + "source": [ + "func outputIntro() (out string) {", + "\theaderStr :=", + "\t\t\"\u003c!-- markdownlint-disable line-length no-bare-urls blanks-around-lists ul-indent blanks-around-headings no-trailing-spaces --\u003e\\n\" +", + "\t\t\t\"# Red Hat Best Practices Test Suite for Kubernetes catalog\\n\\n\"", + "\tintroStr :=", + "\t\t\"The catalog for the Red Hat Best Practices Test Suite for Kubernetes contains a list of test cases \" +", + "\t\t\t\"aiming at testing best practices in various areas. Test suites are defined in 10 areas : `platform-alteration`, `access-control`, `affiliated-certification`, \" +", + "\t\t\t\"`lifecycle`, `manageability`,`networking`, `observability`, `operator`, and `performance.`\" +", + "\t\t\t\"\\n\\nDepending on the workload type, not all tests are required to pass to satisfy best practice requirements. The scenario section\" +", + "\t\t\t\" indicates which tests are mandatory or optional depending on the scenario. The following workload types / scenarios are defined: `Telco`, `Non-Telco`, `Far-Edge`, `Extended`.\\n\\n\"", + "", + "\treturn headerStr + introStr", + "}" + ] + }, + { + "name": "outputTestCases", + "kind": "function", + "source": [ + "func outputTestCases() (outString string, summary catalogSummary) { //nolint:funlen", + "\t// Adds Preflight tests to catalog", + "\taddPreflightTestsToCatalog()", + "", + "\t// Building a separate data structure to store the key order for the map", + "\tkeys := make([]claim.Identifier, 0, len(identifiers.Catalog))", + "\tfor k := range identifiers.Catalog {", + "\t\tkeys = append(keys, k)", + "\t}", + "", + "\t// Sorting the map by identifier ID", + "\tsort.Slice(keys, func(i, j int) bool {", + "\t\treturn keys[i].Id \u003c keys[j].Id", + "\t})", + "", + "\tcatalog := CreatePrintableCatalogFromIdentifiers(keys)", + "\tif catalog == nil {", + "\t\treturn", + "\t}", + "\t// we need the list of suite's names", + "\tsuites := GetSuitesFromIdentifiers(keys)", + "", + "\t// Sort the list of suite names", + "\tsort.Strings(suites)", + "", + "\t// Iterating the map by test and suite names", + "\toutString = \"## Test Case list\\n\\n\" +", + "\t\t\"Test Cases are the specifications used to perform a meaningful test. \" +", + "\t\t\"Test cases may run once, or several times against several targets. The Red Hat Best Practices Test Suite for Kubernetes includes \" +", + "\t\t\"a number of normative and informative tests to ensure that workloads follow best practices. \" +", + "\t\t\"Here is the list of available Test Cases:\\n\"", + "", + "\tsummary.testPerScenario = make(map[string]map[string]int)", + "\tsummary.testsPerSuite = make(map[string]int)", + "\tsummary.totalSuites = len(suites)", + "\tfor _, suite := range suites {", + "\t\toutString += fmt.Sprintf(\"\\n### %s\\n\", suite)", + "\t\tfor _, k := range catalog[suite] {", + "\t\t\tsummary.testsPerSuite[suite]++", + "\t\t\tsummary.totalTests++", + "\t\t\t// Add the suite to the comma separate list of tags shown. The tags are also modified in the:", + "\t\t\t// GetTestIDAndLabels function.", + "\t\t\ttags := strings.ReplaceAll(identifiers.Catalog[k.identifier].Tags, \"\\n\", \" \") + \",\" + k.identifier.Suite", + "", + "\t\t\tkeys := make([]string, 0, len(identifiers.Catalog[k.identifier].CategoryClassification))", + "", + "\t\t\tfor scenario := range identifiers.Catalog[k.identifier].CategoryClassification {", + "\t\t\t\tkeys = append(keys, scenario)", + "\t\t\t\t_, ok := summary.testPerScenario[scenarioIDToText(scenario)]", + "\t\t\t\tif !ok {", + "\t\t\t\t\tchild := make(map[string]int)", + "\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)] = child", + "\t\t\t\t}", + "\t\t\t\tswitch scenario {", + "\t\t\t\tcase identifiers.NonTelco:", + "\t\t\t\t\ttag := identifiers.TagCommon", + "\t\t\t\t\tif identifiers.Catalog[k.identifier].Tags == tag {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\tdefault:", + "\t\t\t\t\ttag := strings.ToLower(scenario)", + "\t\t\t\t\tif strings.Contains(identifiers.Catalog[k.identifier].Tags, tag) {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tsort.Strings(keys)", + "\t\t\tclassificationString := \"|**Scenario**|**Optional/Mandatory**|\\n\"", + "\t\t\tfor _, j := range keys {", + "\t\t\t\tclassificationString += \"|\" + scenarioIDToText(j) + \"|\" + identifiers.Catalog[k.identifier].CategoryClassification[j] + \"|\\n\"", + "\t\t\t}", + "", + "\t\t\t// Every paragraph starts with a new line.", + "", + "\t\t\toutString += fmt.Sprintf(\"\\n#### %s\\n\\n\", k.testName)", + "\t\t\toutString += \"|Property|Description|\\n\"", + "\t\t\toutString += \"|---|---|\\n\"", + "\t\t\toutString += fmt.Sprintf(\"|Unique ID|%s|\\n\", k.identifier.Id)", + "\t\t\toutString += fmt.Sprintf(\"|Description|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Description, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Suggested Remediation|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Remediation, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Best Practice Reference|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].BestPracticeReference, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Exception Process|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].ExceptionProcess, \"\\n\", \" \"))", + "", + "\t\t\t// Add impact statement if available - fail if missing", + "\t\t\tif impact, exists := identifiers.ImpactMap[k.identifier.Id]; exists {", + "\t\t\t\toutString += fmt.Sprintf(\"|Impact Statement|%s|\\n\", strings.ReplaceAll(impact, \"\\n\", \" \"))", + "\t\t\t} else {", + "\t\t\t\tlog.Error(\"Test case %s is missing an impact statement in the ImpactMap\", k.identifier.Id)", + "\t\t\t\tfmt.Printf(\"ERROR: Test case %s is missing an impact statement in the ImpactMap\\n\", k.identifier.Id)", + "\t\t\t\tos.Exit(1)", + "\t\t\t}", + "", + "\t\t\toutString += fmt.Sprintf(\"|Tags|%s|\\n\", tags)", + "\t\t\toutString += classificationString", + "\t\t}", + "\t}", + "", + "\treturn outString, summary", + "}" + ] + }, + { + "name": "summaryToMD", + "kind": "function", + "source": [ + "func summaryToMD(aSummary catalogSummary) (out string) {", + "\tconst tableHeader = \"|---|---|---|\\n\"", + "\tout += \"## Test cases summary\\n\\n\"", + "\tout += fmt.Sprintf(\"### Total test cases: %d\\n\\n\", aSummary.totalTests)", + "\tout += fmt.Sprintf(\"### Total suites: %d\\n\\n\", aSummary.totalSuites)", + "\tout += \"|Suite|Tests per suite|Link|\\n\"", + "\tout += tableHeader", + "", + "\tkeys := make([]string, 0, len(aSummary.testsPerSuite))", + "", + "\tfor j := range aSummary.testsPerSuite {", + "\t\tkeys = append(keys, j)", + "\t}", + "\tsort.Strings(keys)", + "\tfor _, suite := range keys {", + "\t\tout += fmt.Sprintf(\"|%s|%d|[%s](#%s)|\\n\", suite, aSummary.testsPerSuite[suite], suite, suite)", + "\t}", + "\tout += \"\\n\"", + "", + "\tkeys = make([]string, 0, len(aSummary.testPerScenario))", + "", + "\tfor j := range aSummary.testPerScenario {", + "\t\tkeys = append(keys, j)", + "\t}", + "", + "\tsort.Strings(keys)", + "", + "\tfor _, scenario := range keys {", + "\t\tout += fmt.Sprintf(\"### %s specific tests only: %d\\n\\n\", scenario, aSummary.testPerScenario[scenario][identifiers.Mandatory]+aSummary.testPerScenario[scenario][identifiers.Optional])", + "\t\tout += \"|Mandatory|Optional|\\n\"", + "\t\tout += tableHeader", + "\t\tout += fmt.Sprintf(\"|%d|%d|\\n\", aSummary.testPerScenario[scenario][identifiers.Mandatory], aSummary.testPerScenario[scenario][identifiers.Optional])", + "\t\tout += \"\\n\"", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "outputSccCategories", + "kind": "function", + "source": [ + "func outputSccCategories() (sccCategories string) {", + "\tsccCategories = \"\\n## Security Context Categories\\n\"", + "", + "\tintro := \"\\nSecurity context categories referred here are applicable to the [access control test case](#access-control-security-context).\\n\\n\"", + "", + "\tfirstCat := \"### 1st Category\\n\" +", + "\t\t\"Default SCC for all users if namespace does not use service mesh.\\n\\n\" +", + "\t\t\"Workloads under this category should: \\n\" +", + "\t\t\" - Use default CNI (OVN) network interface\\n\" +", + "\t\t\" - Not request NET_ADMIN or NET_RAW for advanced networking functions\\n\\n\"", + "", + "\tsecondCat := \"### 2nd Category\\n\" +", + "\t\t\"For workloads which utilize Service Mesh sidecars for mTLS or load balancing. These workloads must utilize an alternative SCC “restricted-no-uid0” to workaround a service mesh UID limitation. \" +", + "\t\t\"Workloads under this category should not run as root (UID0).\\n\\n\"", + "", + "\tthirdCat := \"### 3rd Category\\n\" +", + "\t\t\"For workloads with advanced networking functions/requirements (e.g. CAP_NET_RAW, CAP_NET_ADMIN, may run as root).\\n\\n\" +", + "\t\t\"For example:\\n\" +", + "\t\t\" - Manipulate the low-level protocol flags, such as the 802.1p priority, VLAN tag, DSCP value, etc.\\n\" +", + "\t\t\" - Manipulate the interface IP addresses or the routing table or the firewall rules on-the-fly.\\n\" +", + "\t\t\" - Process Ethernet packets\\n\" +", + "\t\t\"Workloads under this category may\\n\" +", + "\t\t\" - Use Macvlan interface to sending and receiving Ethernet packets\\n\" +", + "\t\t\" - Request CAP_NET_RAW for creating raw sockets\\n\" +", + "\t\t\" - Request CAP_NET_ADMIN for\\n\" +", + "\t\t\" - Modify the interface IP address on-the-fly\\n\" +", + "\t\t\" - Manipulating the routing table on-the-fly\\n\" +", + "\t\t\" - Manipulating firewall rules on-the-fly\\n\" +", + "\t\t\" - Setting packet DSCP value\\n\\n\"", + "", + "\tfourthCat := \"### 4th Category\\n\" +", + "\t\t\"For workloads handling user plane traffic or latency-sensitive payloads at line rate, such as load balancing, routing, deep packet inspection etc. \" +", + "\t\t\"Workloads under this category may also need to process the packets at a lower level.\\n\\n\" +", + "\t\t\"These workloads shall \\n\" +", + "\t\t\" - Use SR-IOV interfaces \\n\" +", + "\t\t\" - Fully or partially bypassing kernel networking stack with userspace networking technologies,\" +", + "\t\t\"such as DPDK, F-stack, VPP, OpenFastPath, etc. A userspace networking stack not only improves\" +", + "\t\t\"the performance but also reduces the need for CAP_NET_ADMIN and CAP_NET_RAW.\\n\" +", + "\t\t\"CAP_IPC_LOCK is mandatory for allocating hugepage memory, hence shall be granted to DPDK applications. If the workload is latency-sensitive and needs a real-time kernel, CAP_SYS_NICE would be required.\\n\"", + "", + "\treturn sccCategories + intro + firstCat + secondCat + thirdCat + fourthCat", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func runGenerateMarkdownCmd(_ *cobra.Command, _ []string) error {", + "\t// prints intro", + "\tintro := outputIntro()", + "\t// process the test cases", + "\ttcs, summaryRaw := outputTestCases()", + "\t// create summary", + "\tsummary := summaryToMD(summaryRaw)", + "", + "\tsccCategories := outputSccCategories()", + "\tfmt.Fprintf(os.Stdout, \"%s\", intro+summary+tcs+sccCategories)", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "scenarioIDToText", + "qualifiedName": "scenarioIDToText", + "exported": false, + "signature": "func(string)(string)", + "doc": "scenarioIDToText Converts scenario identifiers to readable text\n\nThe function maps a string identifier to a human‑friendly scenario name\nusing predefined constants. If the identifier does not match any known case,\nit returns \"Unknown Scenario\". The returned value is used throughout catalog\ngeneration for display and labeling.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:142", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "outputTestCases", + "kind": "function", + "source": [ + "func outputTestCases() (outString string, summary catalogSummary) { //nolint:funlen", + "\t// Adds Preflight tests to catalog", + "\taddPreflightTestsToCatalog()", + "", + "\t// Building a separate data structure to store the key order for the map", + "\tkeys := make([]claim.Identifier, 0, len(identifiers.Catalog))", + "\tfor k := range identifiers.Catalog {", + "\t\tkeys = append(keys, k)", + "\t}", + "", + "\t// Sorting the map by identifier ID", + "\tsort.Slice(keys, func(i, j int) bool {", + "\t\treturn keys[i].Id \u003c keys[j].Id", + "\t})", + "", + "\tcatalog := CreatePrintableCatalogFromIdentifiers(keys)", + "\tif catalog == nil {", + "\t\treturn", + "\t}", + "\t// we need the list of suite's names", + "\tsuites := GetSuitesFromIdentifiers(keys)", + "", + "\t// Sort the list of suite names", + "\tsort.Strings(suites)", + "", + "\t// Iterating the map by test and suite names", + "\toutString = \"## Test Case list\\n\\n\" +", + "\t\t\"Test Cases are the specifications used to perform a meaningful test. \" +", + "\t\t\"Test cases may run once, or several times against several targets. The Red Hat Best Practices Test Suite for Kubernetes includes \" +", + "\t\t\"a number of normative and informative tests to ensure that workloads follow best practices. \" +", + "\t\t\"Here is the list of available Test Cases:\\n\"", + "", + "\tsummary.testPerScenario = make(map[string]map[string]int)", + "\tsummary.testsPerSuite = make(map[string]int)", + "\tsummary.totalSuites = len(suites)", + "\tfor _, suite := range suites {", + "\t\toutString += fmt.Sprintf(\"\\n### %s\\n\", suite)", + "\t\tfor _, k := range catalog[suite] {", + "\t\t\tsummary.testsPerSuite[suite]++", + "\t\t\tsummary.totalTests++", + "\t\t\t// Add the suite to the comma separate list of tags shown. The tags are also modified in the:", + "\t\t\t// GetTestIDAndLabels function.", + "\t\t\ttags := strings.ReplaceAll(identifiers.Catalog[k.identifier].Tags, \"\\n\", \" \") + \",\" + k.identifier.Suite", + "", + "\t\t\tkeys := make([]string, 0, len(identifiers.Catalog[k.identifier].CategoryClassification))", + "", + "\t\t\tfor scenario := range identifiers.Catalog[k.identifier].CategoryClassification {", + "\t\t\t\tkeys = append(keys, scenario)", + "\t\t\t\t_, ok := summary.testPerScenario[scenarioIDToText(scenario)]", + "\t\t\t\tif !ok {", + "\t\t\t\t\tchild := make(map[string]int)", + "\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)] = child", + "\t\t\t\t}", + "\t\t\t\tswitch scenario {", + "\t\t\t\tcase identifiers.NonTelco:", + "\t\t\t\t\ttag := identifiers.TagCommon", + "\t\t\t\t\tif identifiers.Catalog[k.identifier].Tags == tag {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\tdefault:", + "\t\t\t\t\ttag := strings.ToLower(scenario)", + "\t\t\t\t\tif strings.Contains(identifiers.Catalog[k.identifier].Tags, tag) {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tsort.Strings(keys)", + "\t\t\tclassificationString := \"|**Scenario**|**Optional/Mandatory**|\\n\"", + "\t\t\tfor _, j := range keys {", + "\t\t\t\tclassificationString += \"|\" + scenarioIDToText(j) + \"|\" + identifiers.Catalog[k.identifier].CategoryClassification[j] + \"|\\n\"", + "\t\t\t}", + "", + "\t\t\t// Every paragraph starts with a new line.", + "", + "\t\t\toutString += fmt.Sprintf(\"\\n#### %s\\n\\n\", k.testName)", + "\t\t\toutString += \"|Property|Description|\\n\"", + "\t\t\toutString += \"|---|---|\\n\"", + "\t\t\toutString += fmt.Sprintf(\"|Unique ID|%s|\\n\", k.identifier.Id)", + "\t\t\toutString += fmt.Sprintf(\"|Description|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Description, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Suggested Remediation|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Remediation, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Best Practice Reference|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].BestPracticeReference, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Exception Process|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].ExceptionProcess, \"\\n\", \" \"))", + "", + "\t\t\t// Add impact statement if available - fail if missing", + "\t\t\tif impact, exists := identifiers.ImpactMap[k.identifier.Id]; exists {", + "\t\t\t\toutString += fmt.Sprintf(\"|Impact Statement|%s|\\n\", strings.ReplaceAll(impact, \"\\n\", \" \"))", + "\t\t\t} else {", + "\t\t\t\tlog.Error(\"Test case %s is missing an impact statement in the ImpactMap\", k.identifier.Id)", + "\t\t\t\tfmt.Printf(\"ERROR: Test case %s is missing an impact statement in the ImpactMap\\n\", k.identifier.Id)", + "\t\t\t\tos.Exit(1)", + "\t\t\t}", + "", + "\t\t\toutString += fmt.Sprintf(\"|Tags|%s|\\n\", tags)", + "\t\t\toutString += classificationString", + "\t\t}", + "\t}", + "", + "\treturn outString, summary", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func scenarioIDToText(id string) (text string) {", + "\tswitch id {", + "\tcase identifiers.FarEdge:", + "\t\ttext = \"Far-Edge\"", + "\tcase identifiers.Telco:", + "\t\ttext = \"Telco\"", + "\tcase identifiers.NonTelco:", + "\t\ttext = \"Non-Telco\"", + "\tcase identifiers.Extended:", + "\t\ttext = \"Extended\"", + "\tdefault:", + "\t\ttext = \"Unknown Scenario\"", + "\t}", + "\treturn text", + "}" + ] + }, + { + "name": "summaryToMD", + "qualifiedName": "summaryToMD", + "exported": false, + "signature": "func(catalogSummary)(string)", + "doc": "summaryToMD Generates a markdown formatted test case summary\n\nThe function accepts a catalogSummary structure containing totals and\nper-suite/per-scenario counts. It builds a string with headings, total\nnumbers, tables of suites, and separate sections for each scenario’s\nmandatory and optional tests, using sorted keys to ensure consistent\nordering.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:329", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Strings", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Strings", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "runGenerateMarkdownCmd", + "kind": "function", + "source": [ + "func runGenerateMarkdownCmd(_ *cobra.Command, _ []string) error {", + "\t// prints intro", + "\tintro := outputIntro()", + "\t// process the test cases", + "\ttcs, summaryRaw := outputTestCases()", + "\t// create summary", + "\tsummary := summaryToMD(summaryRaw)", + "", + "\tsccCategories := outputSccCategories()", + "\tfmt.Fprintf(os.Stdout, \"%s\", intro+summary+tcs+sccCategories)", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func summaryToMD(aSummary catalogSummary) (out string) {", + "\tconst tableHeader = \"|---|---|---|\\n\"", + "\tout += \"## Test cases summary\\n\\n\"", + "\tout += fmt.Sprintf(\"### Total test cases: %d\\n\\n\", aSummary.totalTests)", + "\tout += fmt.Sprintf(\"### Total suites: %d\\n\\n\", aSummary.totalSuites)", + "\tout += \"|Suite|Tests per suite|Link|\\n\"", + "\tout += tableHeader", + "", + "\tkeys := make([]string, 0, len(aSummary.testsPerSuite))", + "", + "\tfor j := range aSummary.testsPerSuite {", + "\t\tkeys = append(keys, j)", + "\t}", + "\tsort.Strings(keys)", + "\tfor _, suite := range keys {", + "\t\tout += fmt.Sprintf(\"|%s|%d|[%s](#%s)|\\n\", suite, aSummary.testsPerSuite[suite], suite, suite)", + "\t}", + "\tout += \"\\n\"", + "", + "\tkeys = make([]string, 0, len(aSummary.testPerScenario))", + "", + "\tfor j := range aSummary.testPerScenario {", + "\t\tkeys = append(keys, j)", + "\t}", + "", + "\tsort.Strings(keys)", + "", + "\tfor _, scenario := range keys {", + "\t\tout += fmt.Sprintf(\"### %s specific tests only: %d\\n\\n\", scenario, aSummary.testPerScenario[scenario][identifiers.Mandatory]+aSummary.testPerScenario[scenario][identifiers.Optional])", + "\t\tout += \"|Mandatory|Optional|\\n\"", + "\t\tout += tableHeader", + "\t\tout += fmt.Sprintf(\"|%d|%d|\\n\", aSummary.testPerScenario[scenario][identifiers.Mandatory], aSummary.testPerScenario[scenario][identifiers.Optional])", + "\t\tout += \"\\n\"", + "\t}", + "\treturn out", + "}" + ] + } + ], + "globals": [ + { + "name": "generateCmd", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:43" + }, + { + "name": "markdownGenerateClassification", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:48" + }, + { + "name": "markdownGenerateCmd", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/catalog/catalog.go:55" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "config", + "files": 2, + "imports": [ + "bufio", + "fmt", + "github.com/fatih/color", + "github.com/manifoldco/promptui", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "github.com/spf13/cobra", + "gopkg.in/yaml.v3", + "log", + "os", + "strconv", + "strings" + ], + "structs": [ + { + "name": "configOption", + "exported": false, + "doc": "configOption Represents a configuration setting with its description\n\nThis structure holds two text fields: one that specifies the name of an\noption, and another that provides explanatory help for that option. It is\nused internally to map command-line flags or configuration keys to\nuser-facing descriptions.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:24", + "fields": { + "Help": "string", + "Option": "string" + }, + "methodNames": null, + "source": [ + "type configOption struct {", + "\tOption string", + "\tHelp string", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates the configuration subcommand\n\nThis function returns a preconfigured cobra.Command that provides options for\ngenerating or managing configuration files within the application. It does\nnot take any arguments and simply returns the command instance that has been\nset up elsewhere in the package.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:35", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tgenerate.AddCommand(catalog.NewCommand())", + "\tgenerate.AddCommand(feedback.NewCommand())", + "\tgenerate.AddCommand(config.NewCommand())", + "\tgenerate.AddCommand(qecoverage.NewCommand())", + "", + "\treturn generate", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\treturn generateConfigCmd", + "}" + ] + }, + { + "name": "createCertSuiteResourcesConfiguration", + "qualifiedName": "createCertSuiteResourcesConfiguration", + "exported": false, + "signature": "func()()", + "doc": "createCertSuiteResourcesConfiguration Presents an interactive menu to configure resource selections\n\nThe function displays a list of configuration options such as namespaces, pod\nlabels, operator labels, CRD filters, and managed deployments. Users can\nselect each option to provide input via prompts, which is then parsed and\nstored in the global configuration. Selecting \"previousMenu\" exits back to\nthe higher‑level menu.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:207", + "calls": [ + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ToLower", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ToLower", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "Run", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "name": "loadNamespaces", + "kind": "function", + "source": [ + "func loadNamespaces(namespaces []string) {", + "\tcertsuiteConfig.TargetNameSpaces = nil", + "\tfor _, namespace := range namespaces {", + "\t\tcertsuiteNamespace := configuration.Namespace{Name: namespace}", + "\t\tcertsuiteConfig.TargetNameSpaces = append(certsuiteConfig.TargetNameSpaces, certsuiteNamespace)", + "\t}", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "loadPodLabels", + "kind": "function", + "source": [ + "func loadPodLabels(podLabels []string) {", + "\tcertsuiteConfig.PodsUnderTestLabels = nil", + "\tcertsuiteConfig.PodsUnderTestLabels = podLabels", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "loadOperatorLabels", + "kind": "function", + "source": [ + "func loadOperatorLabels(operatorLabels []string) {", + "\tcertsuiteConfig.OperatorsUnderTestLabels = nil", + "\tcertsuiteConfig.OperatorsUnderTestLabels = operatorLabels", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "loadCRDfilters", + "kind": "function", + "source": [ + "func loadCRDfilters(crdFilters []string) {", + "\tcertsuiteConfig.CrdFilters = nil", + "\tfor _, crdFilterStr := range crdFilters {", + "\t\tcrdFilter := strings.Split(crdFilterStr, \"/\")", + "\t\tcrdFilterName := crdFilter[0]", + "\t\tcrdFilterScalable, err := strconv.ParseBool(crdFilter[1])", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"could not parse CRD filter, err: %v\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tcertsuiteCrdFilter := configuration.CrdFilter{NameSuffix: crdFilterName, Scalable: crdFilterScalable}", + "\t\tcertsuiteConfig.CrdFilters = append(certsuiteConfig.CrdFilters, certsuiteCrdFilter)", + "\t}", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "loadManagedDeployments", + "kind": "function", + "source": [ + "func loadManagedDeployments(deployments []string) {", + "\tcertsuiteConfig.ManagedDeployments = nil", + "\tfor _, deployment := range deployments {", + "\t\tcertsuiteManagedDeployment := configuration.ManagedDeploymentsStatefulsets{Name: deployment}", + "\t\tcertsuiteConfig.ManagedDeployments = append(certsuiteConfig.ManagedDeployments, certsuiteManagedDeployment)", + "\t}", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "loadManagedStatefulSets", + "kind": "function", + "source": [ + "func loadManagedStatefulSets(statefulSets []string) {", + "\tcertsuiteConfig.ManagedStatefulsets = nil", + "\tfor _, statefulSet := range statefulSets {", + "\t\tcertsuiteManagedStatefulSet := configuration.ManagedDeploymentsStatefulsets{Name: statefulSet}", + "\t\tcertsuiteConfig.ManagedStatefulsets = append(certsuiteConfig.ManagedStatefulsets, certsuiteManagedStatefulSet)", + "\t}", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createConfiguration", + "kind": "function", + "source": [ + "func createConfiguration() {", + "\tcreateMenu := []configOption{", + "\t\t{Option: certSuiteResources, Help: certSuiteResourcesHelp},", + "\t\t{Option: exceptions, Help: exceptionsdHelp},", + "\t\t// {Option: collector, Help: collectordHelp},", + "\t\t{Option: settings, Help: settingsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "", + "\tcreatePrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: createMenu,", + "\t\tTemplates: templates,", + "\t\tSize: 5,", + "\t\tHideSelected: true,", + "\t}", + "", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := createPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch createMenu[i].Option {", + "\t\tcase certSuiteResources:", + "\t\t\tcreateCertSuiteResourcesConfiguration()", + "\t\tcase exceptions:", + "\t\t\tcreateExceptionsConfiguration()", + "\t\t// case collector:", + "\t\t// \tcreateCollectorConfiguration()", + "\t\tcase settings:", + "\t\t\tcreateSettingsConfiguration()", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createCertSuiteResourcesConfiguration() {", + "\tcertSuiteResourcesOptions := []configOption{", + "\t\t{Option: namespaces, Help: namespacesHelp},", + "\t\t{Option: pods, Help: podLabelsHelp},", + "\t\t{Option: operators, Help: operatorLabelsHelp},", + "\t\t{Option: crdFilters, Help: crdFiltersHelp},", + "\t\t{Option: managedDeployments, Help: managedDeploymentsHelp},", + "\t\t{Option: managedStatefulSets, Help: managedStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tcertSuiteResourcesSearcher := func(input string, index int) bool {", + "\t\tbasicOption := certSuiteResourcesOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(basicOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\tcertSuiteResourcesPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: certSuiteResourcesOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: certSuiteResourcesSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := certSuiteResourcesPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch certSuiteResourcesOptions[i].Option {", + "\t\tcase namespaces:", + "\t\t\tloadNamespaces(getAnswer(namespacePrompt, namespaceSyntax, namespaceExample))", + "\t\tcase pods:", + "\t\t\tloadPodLabels(getAnswer(podsPrompt, podsSyntax, podsExample))", + "\t\tcase operators:", + "\t\t\tloadOperatorLabels(getAnswer(operatorsPrompt, operatorsSyntax, operatorsExample))", + "\t\tcase crdFilters:", + "\t\t\tloadCRDfilters(getAnswer(crdFiltersPrompt, crdFiltersSyntax, crdFiltersExample))", + "\t\tcase managedDeployments:", + "\t\t\tloadManagedDeployments(getAnswer(managedDeploymentsPrompt, managedDeploymentsSyntax, managedDeploymentsExample))", + "\t\tcase managedStatefulSets:", + "\t\t\tloadManagedStatefulSets(getAnswer(managedStatefulSetsPrompt, managedStatefulSetsSyntax, managedStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "createCollectorConfiguration", + "qualifiedName": "createCollectorConfiguration", + "exported": false, + "signature": "func()()", + "doc": "createCollectorConfiguration prompts the user to select a collector configuration option\n\nThe function presents an interactive menu of configuration options such as\nendpoint, executor identity, partner name, password, and an exit choice. It\nuses a searcher that filters options by matching input text ignoring case and\nspaces. When the user selects an item, the corresponding action is handled in\na switch; currently only the exit option terminates the loop while other\ncases are placeholders for future implementation.\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:327", + "calls": [ + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ToLower", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ToLower", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "Run", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createCollectorConfiguration() {", + "\tcollectorOptions := []configOption{", + "\t\t{Option: appEndPoint, Help: \"\"},", + "\t\t{Option: executedBy, Help: \"\"},", + "\t\t{Option: partnerName, Help: \"\"},", + "\t\t{Option: appPassword, Help: \"\"},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tcollectorSearcher := func(input string, index int) bool {", + "\t\tcollectorOption := collectorOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(collectorOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\tcollectorPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: collectorOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 5,", + "\t\tSearcher: collectorSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := collectorPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch collectorOptions[i].Option {", + "\t\tcase appEndPoint:", + "\t\t\t// TODO: to be implemented", + "\t\tcase executedBy:", + "\t\t\t// TODO: to be implemented", + "\t\tcase partnerName:", + "\t\t\t// TODO: to be implemented", + "\t\tcase appPassword:", + "\t\t\t// TODO: to be implemented", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "createConfiguration", + "qualifiedName": "createConfiguration", + "exported": false, + "signature": "func()()", + "doc": "createConfiguration Starts the interactive configuration menu\n\nThe function presents a list of configuration categories such as resources,\nexceptions, settings, and an option to return to the previous menu. It uses a\nprompt loop that displays the choices and handles user selection by invoking\ndedicated sub‑configuration functions. Errors during the prompt are logged\nand cause an early exit from the routine.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:110", + "calls": [ + { + "name": "Run", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "name": "createCertSuiteResourcesConfiguration", + "kind": "function", + "source": [ + "func createCertSuiteResourcesConfiguration() {", + "\tcertSuiteResourcesOptions := []configOption{", + "\t\t{Option: namespaces, Help: namespacesHelp},", + "\t\t{Option: pods, Help: podLabelsHelp},", + "\t\t{Option: operators, Help: operatorLabelsHelp},", + "\t\t{Option: crdFilters, Help: crdFiltersHelp},", + "\t\t{Option: managedDeployments, Help: managedDeploymentsHelp},", + "\t\t{Option: managedStatefulSets, Help: managedStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tcertSuiteResourcesSearcher := func(input string, index int) bool {", + "\t\tbasicOption := certSuiteResourcesOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(basicOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\tcertSuiteResourcesPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: certSuiteResourcesOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: certSuiteResourcesSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := certSuiteResourcesPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch certSuiteResourcesOptions[i].Option {", + "\t\tcase namespaces:", + "\t\t\tloadNamespaces(getAnswer(namespacePrompt, namespaceSyntax, namespaceExample))", + "\t\tcase pods:", + "\t\t\tloadPodLabels(getAnswer(podsPrompt, podsSyntax, podsExample))", + "\t\tcase operators:", + "\t\t\tloadOperatorLabels(getAnswer(operatorsPrompt, operatorsSyntax, operatorsExample))", + "\t\tcase crdFilters:", + "\t\t\tloadCRDfilters(getAnswer(crdFiltersPrompt, crdFiltersSyntax, crdFiltersExample))", + "\t\tcase managedDeployments:", + "\t\t\tloadManagedDeployments(getAnswer(managedDeploymentsPrompt, managedDeploymentsSyntax, managedDeploymentsExample))", + "\t\tcase managedStatefulSets:", + "\t\t\tloadManagedStatefulSets(getAnswer(managedStatefulSetsPrompt, managedStatefulSetsSyntax, managedStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "createExceptionsConfiguration", + "kind": "function", + "source": [ + "func createExceptionsConfiguration() {", + "\texceptionsOptions := []configOption{", + "\t\t{Option: kernelTaints, Help: kernelTaintsHelp},", + "\t\t{Option: helmCharts, Help: helmChartsHelp},", + "\t\t{Option: protocolNames, Help: protocolNamesHelp},", + "\t\t{Option: services, Help: servicesHelp},", + "\t\t{Option: nonScalableDeployments, Help: nonScalableDeploymentsHelp},", + "\t\t{Option: nonScalableStatefulSets, Help: nonScalableStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\texceptionsSearcher := func(input string, index int) bool {", + "\t\texceptionOption := exceptionsOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(exceptionOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\texceptionsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: exceptionsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: exceptionsSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := exceptionsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch exceptionsOptions[i].Option {", + "\t\tcase kernelTaints:", + "\t\t\tloadAcceptedKernelTaints(getAnswer(kernelTaintsPrompt, kernelTaintsSyntax, kernelTaintsExample))", + "\t\tcase helmCharts:", + "\t\t\tloadHelmCharts(getAnswer(helmChartsPrompt, helmChartsSyntax, helmChartsExample))", + "\t\tcase protocolNames:", + "\t\t\tloadProtocolNames(getAnswer(protocolNamesPrompt, protocolNamesSyntax, protocolNamesExample))", + "\t\tcase services:", + "\t\t\tloadServices(getAnswer(servicesPrompt, servicesSyntax, servicesExample))", + "\t\tcase nonScalableDeployments:", + "\t\t\tloadNonScalableDeployments(getAnswer(nonScalableDeploymentsPrompt, nonScalableDeploymentsSyntax, nonScalableDeploymentsExample))", + "\t\tcase nonScalableStatefulSets:", + "\t\t\tloadNonScalableStatefulSets(getAnswer(nonScalableStatefulSetsPrompt, nonScalableStatefulSetsSyxtax, nonScalableStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "createSettingsConfiguration", + "kind": "function", + "source": [ + "func createSettingsConfiguration() {", + "\tsettingsOptions := []configOption{", + "\t\t{Option: probeDaemonSet, Help: probeDaemonSetHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tsettingsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: settingsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 2,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := settingsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch settingsOptions[i].Option {", + "\t\tcase probeDaemonSet:", + "\t\t\tloadProbeDaemonSetNamespace(getAnswer(probeDaemonSetPrompt, probeDaemonSetSyntax, probeDaemonSetExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "generateConfig", + "kind": "function", + "source": [ + "func generateConfig() {", + "\tmainMenu := []configOption{", + "\t\t{Option: create, Help: createConfigHelp},", + "\t\t{Option: show, Help: showConfigHelp},", + "\t\t{Option: save, Help: saveConfigHelp},", + "\t\t{Option: quit, Help: exitHelp},", + "\t}", + "", + "\tvar exit bool", + "\tfor !exit {", + "\t\tmainPrompt := promptui.Select{", + "\t\t\tLabel: \"\",", + "\t\t\tItems: mainMenu,", + "\t\t\tTemplates: templates,", + "\t\t\tSize: 4,", + "\t\t\tHideSelected: true,", + "\t\t}", + "", + "\t\topt, _, err := mainPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch mainMenu[opt].Option {", + "\t\tcase create:", + "\t\t\tcreateConfiguration()", + "\t\tcase show:", + "\t\t\tshowConfiguration(\u0026certsuiteConfig)", + "\t\tcase save:", + "\t\t\tsaveConfiguration(\u0026certsuiteConfig)", + "\t\tcase quit:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createConfiguration() {", + "\tcreateMenu := []configOption{", + "\t\t{Option: certSuiteResources, Help: certSuiteResourcesHelp},", + "\t\t{Option: exceptions, Help: exceptionsdHelp},", + "\t\t// {Option: collector, Help: collectordHelp},", + "\t\t{Option: settings, Help: settingsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "", + "\tcreatePrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: createMenu,", + "\t\tTemplates: templates,", + "\t\tSize: 5,", + "\t\tHideSelected: true,", + "\t}", + "", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := createPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch createMenu[i].Option {", + "\t\tcase certSuiteResources:", + "\t\t\tcreateCertSuiteResourcesConfiguration()", + "\t\tcase exceptions:", + "\t\t\tcreateExceptionsConfiguration()", + "\t\t// case collector:", + "\t\t// \tcreateCollectorConfiguration()", + "\t\tcase settings:", + "\t\t\tcreateSettingsConfiguration()", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "createExceptionsConfiguration", + "qualifiedName": "createExceptionsConfiguration", + "exported": false, + "signature": "func()()", + "doc": "createExceptionsConfiguration Presents an interactive menu to configure exception lists\n\nThe routine builds a selection list of exception categories such as kernel\ntaints, Helm charts, protocol names, services, and non‑scalable\ndeployments. It uses promptui to allow the user to search and choose an\noption; upon selection it calls helper functions that read comma‑separated\ninput from the terminal and populate global configuration slices. The process\nrepeats until the user chooses to return to the previous menu.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:266", + "calls": [ + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ToLower", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ToLower", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "Run", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "name": "loadAcceptedKernelTaints", + "kind": "function", + "source": [ + "func loadAcceptedKernelTaints(taints []string) {", + "\tcertsuiteConfig.AcceptedKernelTaints = nil", + "\tfor _, taint := range taints {", + "\t\tcertsuiteKernelTaint := configuration.AcceptedKernelTaintsInfo{Module: taint}", + "\t\tcertsuiteConfig.AcceptedKernelTaints = append(certsuiteConfig.AcceptedKernelTaints, certsuiteKernelTaint)", + "\t}", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "loadHelmCharts", + "kind": "function", + "source": [ + "func loadHelmCharts(helmCharts []string) {", + "\tcertsuiteConfig.SkipHelmChartList = nil", + "\tfor _, chart := range helmCharts {", + "\t\tcertsuiteHelmChart := configuration.SkipHelmChartList{Name: chart}", + "\t\tcertsuiteConfig.SkipHelmChartList = append(certsuiteConfig.SkipHelmChartList, certsuiteHelmChart)", + "\t}", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "loadProtocolNames", + "kind": "function", + "source": [ + "func loadProtocolNames(protocolNames []string) {", + "\tcertsuiteConfig.ValidProtocolNames = nil", + "\tcertsuiteConfig.ValidProtocolNames = protocolNames", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "loadServices", + "kind": "function", + "source": [ + "func loadServices(services []string) {", + "\tcertsuiteConfig.ServicesIgnoreList = nil", + "\tcertsuiteConfig.ServicesIgnoreList = services", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "loadNonScalableDeployments", + "kind": "function", + "source": [ + "func loadNonScalableDeployments(nonScalableDeployments []string) {", + "\tcertsuiteConfig.SkipScalingTestDeployments = nil", + "\tfor _, nonScalableDeploymentStr := range nonScalableDeployments {", + "\t\tnonScalableDeployment := strings.Split(nonScalableDeploymentStr, \"/\")", + "\t\tconst nonScalableDeploymentsFields = 2", + "\t\tif len(nonScalableDeployment) != nonScalableDeploymentsFields {", + "\t\t\tlog.Println(\"could not parse Non-scalable Deployment\")", + "\t\t\treturn", + "\t\t}", + "\t\tnonScalableDeploymentName := nonScalableDeployment[0]", + "\t\tnonScalableDeploymentNamespace := nonScalableDeployment[1]", + "\t\tcertsuiteNonScalableDeployment := configuration.SkipScalingTestDeploymentsInfo{Name: nonScalableDeploymentName,", + "\t\t\tNamespace: nonScalableDeploymentNamespace}", + "\t\tcertsuiteConfig.SkipScalingTestDeployments = append(certsuiteConfig.SkipScalingTestDeployments, certsuiteNonScalableDeployment)", + "\t}", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "loadNonScalableStatefulSets", + "kind": "function", + "source": [ + "func loadNonScalableStatefulSets(nonScalableStatefulSets []string) {", + "\tcertsuiteConfig.SkipScalingTestStatefulSets = nil", + "\tfor _, nonScalableStatefulSetStr := range nonScalableStatefulSets {", + "\t\tnonScalableStatefulSet := strings.Split(nonScalableStatefulSetStr, \"/\")", + "\t\tconst nonScalableStatefulSetFields = 2", + "\t\tif len(nonScalableStatefulSet) != nonScalableStatefulSetFields {", + "\t\t\tlog.Println(\"could not parse Non-scalable StatefulSet\")", + "\t\t\treturn", + "\t\t}", + "\t\tnonScalableStatefulSetName := nonScalableStatefulSet[0]", + "\t\tnonScalableStatefulSetNamespace := nonScalableStatefulSet[1]", + "\t\tcertsuiteNonScalableStatefulSet := configuration.SkipScalingTestStatefulSetsInfo{Name: nonScalableStatefulSetName,", + "\t\t\tNamespace: nonScalableStatefulSetNamespace}", + "\t\tcertsuiteConfig.SkipScalingTestStatefulSets = append(certsuiteConfig.SkipScalingTestStatefulSets, certsuiteNonScalableStatefulSet)", + "\t}", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createConfiguration", + "kind": "function", + "source": [ + "func createConfiguration() {", + "\tcreateMenu := []configOption{", + "\t\t{Option: certSuiteResources, Help: certSuiteResourcesHelp},", + "\t\t{Option: exceptions, Help: exceptionsdHelp},", + "\t\t// {Option: collector, Help: collectordHelp},", + "\t\t{Option: settings, Help: settingsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "", + "\tcreatePrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: createMenu,", + "\t\tTemplates: templates,", + "\t\tSize: 5,", + "\t\tHideSelected: true,", + "\t}", + "", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := createPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch createMenu[i].Option {", + "\t\tcase certSuiteResources:", + "\t\t\tcreateCertSuiteResourcesConfiguration()", + "\t\tcase exceptions:", + "\t\t\tcreateExceptionsConfiguration()", + "\t\t// case collector:", + "\t\t// \tcreateCollectorConfiguration()", + "\t\tcase settings:", + "\t\t\tcreateSettingsConfiguration()", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createExceptionsConfiguration() {", + "\texceptionsOptions := []configOption{", + "\t\t{Option: kernelTaints, Help: kernelTaintsHelp},", + "\t\t{Option: helmCharts, Help: helmChartsHelp},", + "\t\t{Option: protocolNames, Help: protocolNamesHelp},", + "\t\t{Option: services, Help: servicesHelp},", + "\t\t{Option: nonScalableDeployments, Help: nonScalableDeploymentsHelp},", + "\t\t{Option: nonScalableStatefulSets, Help: nonScalableStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\texceptionsSearcher := func(input string, index int) bool {", + "\t\texceptionOption := exceptionsOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(exceptionOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\texceptionsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: exceptionsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: exceptionsSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := exceptionsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch exceptionsOptions[i].Option {", + "\t\tcase kernelTaints:", + "\t\t\tloadAcceptedKernelTaints(getAnswer(kernelTaintsPrompt, kernelTaintsSyntax, kernelTaintsExample))", + "\t\tcase helmCharts:", + "\t\t\tloadHelmCharts(getAnswer(helmChartsPrompt, helmChartsSyntax, helmChartsExample))", + "\t\tcase protocolNames:", + "\t\t\tloadProtocolNames(getAnswer(protocolNamesPrompt, protocolNamesSyntax, protocolNamesExample))", + "\t\tcase services:", + "\t\t\tloadServices(getAnswer(servicesPrompt, servicesSyntax, servicesExample))", + "\t\tcase nonScalableDeployments:", + "\t\t\tloadNonScalableDeployments(getAnswer(nonScalableDeploymentsPrompt, nonScalableDeploymentsSyntax, nonScalableDeploymentsExample))", + "\t\tcase nonScalableStatefulSets:", + "\t\t\tloadNonScalableStatefulSets(getAnswer(nonScalableStatefulSetsPrompt, nonScalableStatefulSetsSyxtax, nonScalableStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "createSettingsConfiguration", + "qualifiedName": "createSettingsConfiguration", + "exported": false, + "signature": "func()()", + "doc": "createSettingsConfiguration Prompts user to configure Probe DaemonSet namespace\n\nThe function presents a menu with an option to set the Probe DaemonSet\nnamespace or return to the previous menu. When selected, it asks the user for\na comma‑separated list of namespaces, parses the input, and assigns the\nfirst value to the global configuration. The loop continues until the user\nchooses to exit.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:379", + "calls": [ + { + "name": "Run", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "name": "loadProbeDaemonSetNamespace", + "kind": "function", + "source": [ + "func loadProbeDaemonSetNamespace(namespace []string) {", + "\tcertsuiteConfig.ProbeDaemonSetNamespace = namespace[0]", + "}" + ] + }, + { + "name": "getAnswer", + "kind": "function", + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createConfiguration", + "kind": "function", + "source": [ + "func createConfiguration() {", + "\tcreateMenu := []configOption{", + "\t\t{Option: certSuiteResources, Help: certSuiteResourcesHelp},", + "\t\t{Option: exceptions, Help: exceptionsdHelp},", + "\t\t// {Option: collector, Help: collectordHelp},", + "\t\t{Option: settings, Help: settingsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "", + "\tcreatePrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: createMenu,", + "\t\tTemplates: templates,", + "\t\tSize: 5,", + "\t\tHideSelected: true,", + "\t}", + "", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := createPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch createMenu[i].Option {", + "\t\tcase certSuiteResources:", + "\t\t\tcreateCertSuiteResourcesConfiguration()", + "\t\tcase exceptions:", + "\t\t\tcreateExceptionsConfiguration()", + "\t\t// case collector:", + "\t\t// \tcreateCollectorConfiguration()", + "\t\tcase settings:", + "\t\t\tcreateSettingsConfiguration()", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createSettingsConfiguration() {", + "\tsettingsOptions := []configOption{", + "\t\t{Option: probeDaemonSet, Help: probeDaemonSetHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tsettingsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: settingsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 2,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := settingsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch settingsOptions[i].Option {", + "\t\tcase probeDaemonSet:", + "\t\t\tloadProbeDaemonSetNamespace(getAnswer(probeDaemonSetPrompt, probeDaemonSetSyntax, probeDaemonSetExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "generateConfig", + "qualifiedName": "generateConfig", + "exported": false, + "signature": "func()()", + "doc": "generateConfig Launches an interactive menu for managing configuration\n\nWhen invoked, this routine displays a prompt with options to create, view,\nsave, or exit the configuration workflow. It loops until the user selects\nquit, calling helper functions to handle each action. Errors during prompt\nexecution are logged and cause an early return.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:67", + "calls": [ + { + "name": "Run", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "name": "createConfiguration", + "kind": "function", + "source": [ + "func createConfiguration() {", + "\tcreateMenu := []configOption{", + "\t\t{Option: certSuiteResources, Help: certSuiteResourcesHelp},", + "\t\t{Option: exceptions, Help: exceptionsdHelp},", + "\t\t// {Option: collector, Help: collectordHelp},", + "\t\t{Option: settings, Help: settingsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "", + "\tcreatePrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: createMenu,", + "\t\tTemplates: templates,", + "\t\tSize: 5,", + "\t\tHideSelected: true,", + "\t}", + "", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := createPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch createMenu[i].Option {", + "\t\tcase certSuiteResources:", + "\t\t\tcreateCertSuiteResourcesConfiguration()", + "\t\tcase exceptions:", + "\t\t\tcreateExceptionsConfiguration()", + "\t\t// case collector:", + "\t\t// \tcreateCollectorConfiguration()", + "\t\tcase settings:", + "\t\t\tcreateSettingsConfiguration()", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "showConfiguration", + "kind": "function", + "source": [ + "func showConfiguration(config *configuration.TestConfiguration) {", + "\tconfigYaml, err := yaml.Marshal(config)", + "\tif err != nil {", + "\t\tlog.Printf(\"could not marshal the YAML file, err: %v\", err)", + "\t\treturn", + "\t}", + "\tfmt.Println(\"================= Cert Suite CONFIGURATION =================\")", + "\tfmt.Println(string(configYaml))", + "\tfmt.Println(\"=====================================================\")", + "}" + ] + }, + { + "name": "saveConfiguration", + "kind": "function", + "source": [ + "func saveConfiguration(config *configuration.TestConfiguration) {", + "\tconfigYaml, err := yaml.Marshal(config)", + "\tif err != nil {", + "\t\tlog.Printf(\"could not marshal the YAML file, err: %v\", err)", + "\t\treturn", + "\t}", + "", + "\tsaveConfigPrompt := promptui.Prompt{", + "\t\tLabel: \"Cert Suite config file\",", + "\t\tDefault: defaultConfigFileName,", + "\t}", + "", + "\tconfigFileName, err := saveConfigPrompt.Run()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read config file name, err: %v\\n\", err)", + "\t\treturn", + "\t}", + "", + "\terr = os.WriteFile(configFileName, configYaml, defaultConfigFilePermissions)", + "\tif err != nil {", + "\t\tlog.Printf(\"could not write file, err: %v\", err)", + "\t\treturn", + "\t}", + "", + "\tfmt.Println(color.GreenString(\"Configuration saved\"))", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func generateConfig() {", + "\tmainMenu := []configOption{", + "\t\t{Option: create, Help: createConfigHelp},", + "\t\t{Option: show, Help: showConfigHelp},", + "\t\t{Option: save, Help: saveConfigHelp},", + "\t\t{Option: quit, Help: exitHelp},", + "\t}", + "", + "\tvar exit bool", + "\tfor !exit {", + "\t\tmainPrompt := promptui.Select{", + "\t\t\tLabel: \"\",", + "\t\t\tItems: mainMenu,", + "\t\t\tTemplates: templates,", + "\t\t\tSize: 4,", + "\t\t\tHideSelected: true,", + "\t\t}", + "", + "\t\topt, _, err := mainPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch mainMenu[opt].Option {", + "\t\tcase create:", + "\t\t\tcreateConfiguration()", + "\t\tcase show:", + "\t\t\tshowConfiguration(\u0026certsuiteConfig)", + "\t\tcase save:", + "\t\t\tsaveConfiguration(\u0026certsuiteConfig)", + "\t\tcase quit:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "getAnswer", + "qualifiedName": "getAnswer", + "exported": false, + "signature": "func(string, string, string)([]string)", + "doc": "getAnswer Collects a comma‑separated list of items from the user\n\nThe function displays a prompt with syntax and example guidance, then reads a\nsingle line of text from standard input. It splits the entered string on\ncommas, trims surrounding whitespace from each element, and returns the\nresulting slice of strings. If reading fails, it logs an error and returns\nnil.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:414", + "calls": [ + { + "pkgPath": "github.com/fatih/color", + "name": "HiCyanString", + "kind": "function" + }, + { + "pkgPath": "github.com/fatih/color", + "name": "CyanString", + "kind": "function" + }, + { + "pkgPath": "github.com/fatih/color", + "name": "WhiteString", + "kind": "function" + }, + { + "pkgPath": "github.com/fatih/color", + "name": "CyanString", + "kind": "function" + }, + { + "pkgPath": "github.com/fatih/color", + "name": "WhiteString", + "kind": "function" + }, + { + "pkgPath": "github.com/fatih/color", + "name": "HiCyanString", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + }, + { + "pkgPath": "bufio", + "name": "NewScanner", + "kind": "function" + }, + { + "name": "Scan", + "kind": "function" + }, + { + "name": "Err", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "Text", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createCertSuiteResourcesConfiguration", + "kind": "function", + "source": [ + "func createCertSuiteResourcesConfiguration() {", + "\tcertSuiteResourcesOptions := []configOption{", + "\t\t{Option: namespaces, Help: namespacesHelp},", + "\t\t{Option: pods, Help: podLabelsHelp},", + "\t\t{Option: operators, Help: operatorLabelsHelp},", + "\t\t{Option: crdFilters, Help: crdFiltersHelp},", + "\t\t{Option: managedDeployments, Help: managedDeploymentsHelp},", + "\t\t{Option: managedStatefulSets, Help: managedStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tcertSuiteResourcesSearcher := func(input string, index int) bool {", + "\t\tbasicOption := certSuiteResourcesOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(basicOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\tcertSuiteResourcesPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: certSuiteResourcesOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: certSuiteResourcesSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := certSuiteResourcesPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch certSuiteResourcesOptions[i].Option {", + "\t\tcase namespaces:", + "\t\t\tloadNamespaces(getAnswer(namespacePrompt, namespaceSyntax, namespaceExample))", + "\t\tcase pods:", + "\t\t\tloadPodLabels(getAnswer(podsPrompt, podsSyntax, podsExample))", + "\t\tcase operators:", + "\t\t\tloadOperatorLabels(getAnswer(operatorsPrompt, operatorsSyntax, operatorsExample))", + "\t\tcase crdFilters:", + "\t\t\tloadCRDfilters(getAnswer(crdFiltersPrompt, crdFiltersSyntax, crdFiltersExample))", + "\t\tcase managedDeployments:", + "\t\t\tloadManagedDeployments(getAnswer(managedDeploymentsPrompt, managedDeploymentsSyntax, managedDeploymentsExample))", + "\t\tcase managedStatefulSets:", + "\t\t\tloadManagedStatefulSets(getAnswer(managedStatefulSetsPrompt, managedStatefulSetsSyntax, managedStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createExceptionsConfiguration", + "kind": "function", + "source": [ + "func createExceptionsConfiguration() {", + "\texceptionsOptions := []configOption{", + "\t\t{Option: kernelTaints, Help: kernelTaintsHelp},", + "\t\t{Option: helmCharts, Help: helmChartsHelp},", + "\t\t{Option: protocolNames, Help: protocolNamesHelp},", + "\t\t{Option: services, Help: servicesHelp},", + "\t\t{Option: nonScalableDeployments, Help: nonScalableDeploymentsHelp},", + "\t\t{Option: nonScalableStatefulSets, Help: nonScalableStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\texceptionsSearcher := func(input string, index int) bool {", + "\t\texceptionOption := exceptionsOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(exceptionOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\texceptionsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: exceptionsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: exceptionsSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := exceptionsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch exceptionsOptions[i].Option {", + "\t\tcase kernelTaints:", + "\t\t\tloadAcceptedKernelTaints(getAnswer(kernelTaintsPrompt, kernelTaintsSyntax, kernelTaintsExample))", + "\t\tcase helmCharts:", + "\t\t\tloadHelmCharts(getAnswer(helmChartsPrompt, helmChartsSyntax, helmChartsExample))", + "\t\tcase protocolNames:", + "\t\t\tloadProtocolNames(getAnswer(protocolNamesPrompt, protocolNamesSyntax, protocolNamesExample))", + "\t\tcase services:", + "\t\t\tloadServices(getAnswer(servicesPrompt, servicesSyntax, servicesExample))", + "\t\tcase nonScalableDeployments:", + "\t\t\tloadNonScalableDeployments(getAnswer(nonScalableDeploymentsPrompt, nonScalableDeploymentsSyntax, nonScalableDeploymentsExample))", + "\t\tcase nonScalableStatefulSets:", + "\t\t\tloadNonScalableStatefulSets(getAnswer(nonScalableStatefulSetsPrompt, nonScalableStatefulSetsSyxtax, nonScalableStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createSettingsConfiguration", + "kind": "function", + "source": [ + "func createSettingsConfiguration() {", + "\tsettingsOptions := []configOption{", + "\t\t{Option: probeDaemonSet, Help: probeDaemonSetHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tsettingsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: settingsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 2,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := settingsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch settingsOptions[i].Option {", + "\t\tcase probeDaemonSet:", + "\t\t\tloadProbeDaemonSetNamespace(getAnswer(probeDaemonSetPrompt, probeDaemonSetSyntax, probeDaemonSetExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getAnswer(prompt, syntax, example string) []string {", + "\tfullPrompt := color.HiCyanString(\"%s\\n\", prompt) +", + "\t\tcolor.CyanString(\"Syntax: \") + color.WhiteString(\"%s\\n\", syntax) +", + "\t\tcolor.CyanString(\"Example: \") + color.WhiteString(\"%s\\n\", example) + color.HiCyanString(\"\u003e \")", + "\tfmt.Print(fullPrompt)", + "", + "\tscanner := bufio.NewScanner(os.Stdin)", + "\tscanner.Scan()", + "\terr := scanner.Err()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read user input, err: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\t// Split CSV string by ',' and remove any whitespace", + "\tfields := strings.Split(scanner.Text(), \",\")", + "\tfor i, field := range fields {", + "\t\tfields[i] = strings.TrimSpace(field)", + "\t}", + "", + "\treturn fields", + "}" + ] + }, + { + "name": "loadAcceptedKernelTaints", + "qualifiedName": "loadAcceptedKernelTaints", + "exported": false, + "signature": "func([]string)()", + "doc": "loadAcceptedKernelTaints stores a list of accepted kernel taints in the configuration\n\nThe function clears any previously stored taint entries, then iterates over\nthe supplied slice. For each taint string it creates a new struct containing\nthe module name and appends it to the global configuration slice. The\nresulting list is used by the tool when evaluating cluster readiness.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:533", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createExceptionsConfiguration", + "kind": "function", + "source": [ + "func createExceptionsConfiguration() {", + "\texceptionsOptions := []configOption{", + "\t\t{Option: kernelTaints, Help: kernelTaintsHelp},", + "\t\t{Option: helmCharts, Help: helmChartsHelp},", + "\t\t{Option: protocolNames, Help: protocolNamesHelp},", + "\t\t{Option: services, Help: servicesHelp},", + "\t\t{Option: nonScalableDeployments, Help: nonScalableDeploymentsHelp},", + "\t\t{Option: nonScalableStatefulSets, Help: nonScalableStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\texceptionsSearcher := func(input string, index int) bool {", + "\t\texceptionOption := exceptionsOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(exceptionOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\texceptionsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: exceptionsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: exceptionsSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := exceptionsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch exceptionsOptions[i].Option {", + "\t\tcase kernelTaints:", + "\t\t\tloadAcceptedKernelTaints(getAnswer(kernelTaintsPrompt, kernelTaintsSyntax, kernelTaintsExample))", + "\t\tcase helmCharts:", + "\t\t\tloadHelmCharts(getAnswer(helmChartsPrompt, helmChartsSyntax, helmChartsExample))", + "\t\tcase protocolNames:", + "\t\t\tloadProtocolNames(getAnswer(protocolNamesPrompt, protocolNamesSyntax, protocolNamesExample))", + "\t\tcase services:", + "\t\t\tloadServices(getAnswer(servicesPrompt, servicesSyntax, servicesExample))", + "\t\tcase nonScalableDeployments:", + "\t\t\tloadNonScalableDeployments(getAnswer(nonScalableDeploymentsPrompt, nonScalableDeploymentsSyntax, nonScalableDeploymentsExample))", + "\t\tcase nonScalableStatefulSets:", + "\t\t\tloadNonScalableStatefulSets(getAnswer(nonScalableStatefulSetsPrompt, nonScalableStatefulSetsSyxtax, nonScalableStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadAcceptedKernelTaints(taints []string) {", + "\tcertsuiteConfig.AcceptedKernelTaints = nil", + "\tfor _, taint := range taints {", + "\t\tcertsuiteKernelTaint := configuration.AcceptedKernelTaintsInfo{Module: taint}", + "\t\tcertsuiteConfig.AcceptedKernelTaints = append(certsuiteConfig.AcceptedKernelTaints, certsuiteKernelTaint)", + "\t}", + "}" + ] + }, + { + "name": "loadCRDfilters", + "qualifiedName": "loadCRDfilters", + "exported": false, + "signature": "func([]string)()", + "doc": "loadCRDfilters parses CRD filter strings into configuration objects\n\nThe function clears the existing list of CRD filters, then iterates over each\nsupplied string. Each string is split on a slash to extract a name suffix and\na boolean flag indicating scalability; it converts the second part to a bool,\nlogs an error if conversion fails, and appends a new filter structure to the\nglobal configuration.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:481", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "ParseBool", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createCertSuiteResourcesConfiguration", + "kind": "function", + "source": [ + "func createCertSuiteResourcesConfiguration() {", + "\tcertSuiteResourcesOptions := []configOption{", + "\t\t{Option: namespaces, Help: namespacesHelp},", + "\t\t{Option: pods, Help: podLabelsHelp},", + "\t\t{Option: operators, Help: operatorLabelsHelp},", + "\t\t{Option: crdFilters, Help: crdFiltersHelp},", + "\t\t{Option: managedDeployments, Help: managedDeploymentsHelp},", + "\t\t{Option: managedStatefulSets, Help: managedStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tcertSuiteResourcesSearcher := func(input string, index int) bool {", + "\t\tbasicOption := certSuiteResourcesOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(basicOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\tcertSuiteResourcesPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: certSuiteResourcesOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: certSuiteResourcesSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := certSuiteResourcesPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch certSuiteResourcesOptions[i].Option {", + "\t\tcase namespaces:", + "\t\t\tloadNamespaces(getAnswer(namespacePrompt, namespaceSyntax, namespaceExample))", + "\t\tcase pods:", + "\t\t\tloadPodLabels(getAnswer(podsPrompt, podsSyntax, podsExample))", + "\t\tcase operators:", + "\t\t\tloadOperatorLabels(getAnswer(operatorsPrompt, operatorsSyntax, operatorsExample))", + "\t\tcase crdFilters:", + "\t\t\tloadCRDfilters(getAnswer(crdFiltersPrompt, crdFiltersSyntax, crdFiltersExample))", + "\t\tcase managedDeployments:", + "\t\t\tloadManagedDeployments(getAnswer(managedDeploymentsPrompt, managedDeploymentsSyntax, managedDeploymentsExample))", + "\t\tcase managedStatefulSets:", + "\t\t\tloadManagedStatefulSets(getAnswer(managedStatefulSetsPrompt, managedStatefulSetsSyntax, managedStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadCRDfilters(crdFilters []string) {", + "\tcertsuiteConfig.CrdFilters = nil", + "\tfor _, crdFilterStr := range crdFilters {", + "\t\tcrdFilter := strings.Split(crdFilterStr, \"/\")", + "\t\tcrdFilterName := crdFilter[0]", + "\t\tcrdFilterScalable, err := strconv.ParseBool(crdFilter[1])", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"could not parse CRD filter, err: %v\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tcertsuiteCrdFilter := configuration.CrdFilter{NameSuffix: crdFilterName, Scalable: crdFilterScalable}", + "\t\tcertsuiteConfig.CrdFilters = append(certsuiteConfig.CrdFilters, certsuiteCrdFilter)", + "\t}", + "}" + ] + }, + { + "name": "loadHelmCharts", + "qualifiedName": "loadHelmCharts", + "exported": false, + "signature": "func([]string)()", + "doc": "loadHelmCharts Stores specified Helm chart names to skip during configuration\n\nThe function receives a slice of chart identifiers and resets the global skip\nlist before adding each entry as a new configuration object. Each name is\nwrapped in a struct that represents an item to be excluded from processing.\nThe resulting list is used elsewhere to avoid handling those Helm charts.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:547", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createExceptionsConfiguration", + "kind": "function", + "source": [ + "func createExceptionsConfiguration() {", + "\texceptionsOptions := []configOption{", + "\t\t{Option: kernelTaints, Help: kernelTaintsHelp},", + "\t\t{Option: helmCharts, Help: helmChartsHelp},", + "\t\t{Option: protocolNames, Help: protocolNamesHelp},", + "\t\t{Option: services, Help: servicesHelp},", + "\t\t{Option: nonScalableDeployments, Help: nonScalableDeploymentsHelp},", + "\t\t{Option: nonScalableStatefulSets, Help: nonScalableStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\texceptionsSearcher := func(input string, index int) bool {", + "\t\texceptionOption := exceptionsOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(exceptionOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\texceptionsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: exceptionsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: exceptionsSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := exceptionsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch exceptionsOptions[i].Option {", + "\t\tcase kernelTaints:", + "\t\t\tloadAcceptedKernelTaints(getAnswer(kernelTaintsPrompt, kernelTaintsSyntax, kernelTaintsExample))", + "\t\tcase helmCharts:", + "\t\t\tloadHelmCharts(getAnswer(helmChartsPrompt, helmChartsSyntax, helmChartsExample))", + "\t\tcase protocolNames:", + "\t\t\tloadProtocolNames(getAnswer(protocolNamesPrompt, protocolNamesSyntax, protocolNamesExample))", + "\t\tcase services:", + "\t\t\tloadServices(getAnswer(servicesPrompt, servicesSyntax, servicesExample))", + "\t\tcase nonScalableDeployments:", + "\t\t\tloadNonScalableDeployments(getAnswer(nonScalableDeploymentsPrompt, nonScalableDeploymentsSyntax, nonScalableDeploymentsExample))", + "\t\tcase nonScalableStatefulSets:", + "\t\t\tloadNonScalableStatefulSets(getAnswer(nonScalableStatefulSetsPrompt, nonScalableStatefulSetsSyxtax, nonScalableStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadHelmCharts(helmCharts []string) {", + "\tcertsuiteConfig.SkipHelmChartList = nil", + "\tfor _, chart := range helmCharts {", + "\t\tcertsuiteHelmChart := configuration.SkipHelmChartList{Name: chart}", + "\t\tcertsuiteConfig.SkipHelmChartList = append(certsuiteConfig.SkipHelmChartList, certsuiteHelmChart)", + "\t}", + "}" + ] + }, + { + "name": "loadManagedDeployments", + "qualifiedName": "loadManagedDeployments", + "exported": false, + "signature": "func([]string)()", + "doc": "loadManagedDeployments Populates the list of deployments to be managed\n\nThe function receives a slice of deployment names, clears any previously\nstored deployments in the global configuration, and then iterates over each\nname. For every entry it creates a new ManagedDeploymentsStatefulsets object\nwith the name field set, appending this object to the configuration’s\nManagedDeployments list. This prepares the configuration for subsequent\nresource generation.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:504", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createCertSuiteResourcesConfiguration", + "kind": "function", + "source": [ + "func createCertSuiteResourcesConfiguration() {", + "\tcertSuiteResourcesOptions := []configOption{", + "\t\t{Option: namespaces, Help: namespacesHelp},", + "\t\t{Option: pods, Help: podLabelsHelp},", + "\t\t{Option: operators, Help: operatorLabelsHelp},", + "\t\t{Option: crdFilters, Help: crdFiltersHelp},", + "\t\t{Option: managedDeployments, Help: managedDeploymentsHelp},", + "\t\t{Option: managedStatefulSets, Help: managedStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tcertSuiteResourcesSearcher := func(input string, index int) bool {", + "\t\tbasicOption := certSuiteResourcesOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(basicOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\tcertSuiteResourcesPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: certSuiteResourcesOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: certSuiteResourcesSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := certSuiteResourcesPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch certSuiteResourcesOptions[i].Option {", + "\t\tcase namespaces:", + "\t\t\tloadNamespaces(getAnswer(namespacePrompt, namespaceSyntax, namespaceExample))", + "\t\tcase pods:", + "\t\t\tloadPodLabels(getAnswer(podsPrompt, podsSyntax, podsExample))", + "\t\tcase operators:", + "\t\t\tloadOperatorLabels(getAnswer(operatorsPrompt, operatorsSyntax, operatorsExample))", + "\t\tcase crdFilters:", + "\t\t\tloadCRDfilters(getAnswer(crdFiltersPrompt, crdFiltersSyntax, crdFiltersExample))", + "\t\tcase managedDeployments:", + "\t\t\tloadManagedDeployments(getAnswer(managedDeploymentsPrompt, managedDeploymentsSyntax, managedDeploymentsExample))", + "\t\tcase managedStatefulSets:", + "\t\t\tloadManagedStatefulSets(getAnswer(managedStatefulSetsPrompt, managedStatefulSetsSyntax, managedStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadManagedDeployments(deployments []string) {", + "\tcertsuiteConfig.ManagedDeployments = nil", + "\tfor _, deployment := range deployments {", + "\t\tcertsuiteManagedDeployment := configuration.ManagedDeploymentsStatefulsets{Name: deployment}", + "\t\tcertsuiteConfig.ManagedDeployments = append(certsuiteConfig.ManagedDeployments, certsuiteManagedDeployment)", + "\t}", + "}" + ] + }, + { + "name": "loadManagedStatefulSets", + "qualifiedName": "loadManagedStatefulSets", + "exported": false, + "signature": "func([]string)()", + "doc": "loadManagedStatefulSets Stores user-selected StatefulSet names for later configuration\n\nThis routine clears any previously stored StatefulSet entries in the global\nconfiguration, then iterates over each supplied name. For every name it\ncreates a lightweight structure containing that name and appends it to the\nlist of managed StatefulSets maintained by the application. The function has\nno return value but updates shared state used by subsequent setup steps.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:519", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createCertSuiteResourcesConfiguration", + "kind": "function", + "source": [ + "func createCertSuiteResourcesConfiguration() {", + "\tcertSuiteResourcesOptions := []configOption{", + "\t\t{Option: namespaces, Help: namespacesHelp},", + "\t\t{Option: pods, Help: podLabelsHelp},", + "\t\t{Option: operators, Help: operatorLabelsHelp},", + "\t\t{Option: crdFilters, Help: crdFiltersHelp},", + "\t\t{Option: managedDeployments, Help: managedDeploymentsHelp},", + "\t\t{Option: managedStatefulSets, Help: managedStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tcertSuiteResourcesSearcher := func(input string, index int) bool {", + "\t\tbasicOption := certSuiteResourcesOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(basicOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\tcertSuiteResourcesPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: certSuiteResourcesOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: certSuiteResourcesSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := certSuiteResourcesPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch certSuiteResourcesOptions[i].Option {", + "\t\tcase namespaces:", + "\t\t\tloadNamespaces(getAnswer(namespacePrompt, namespaceSyntax, namespaceExample))", + "\t\tcase pods:", + "\t\t\tloadPodLabels(getAnswer(podsPrompt, podsSyntax, podsExample))", + "\t\tcase operators:", + "\t\t\tloadOperatorLabels(getAnswer(operatorsPrompt, operatorsSyntax, operatorsExample))", + "\t\tcase crdFilters:", + "\t\t\tloadCRDfilters(getAnswer(crdFiltersPrompt, crdFiltersSyntax, crdFiltersExample))", + "\t\tcase managedDeployments:", + "\t\t\tloadManagedDeployments(getAnswer(managedDeploymentsPrompt, managedDeploymentsSyntax, managedDeploymentsExample))", + "\t\tcase managedStatefulSets:", + "\t\t\tloadManagedStatefulSets(getAnswer(managedStatefulSetsPrompt, managedStatefulSetsSyntax, managedStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadManagedStatefulSets(statefulSets []string) {", + "\tcertsuiteConfig.ManagedStatefulsets = nil", + "\tfor _, statefulSet := range statefulSets {", + "\t\tcertsuiteManagedStatefulSet := configuration.ManagedDeploymentsStatefulsets{Name: statefulSet}", + "\t\tcertsuiteConfig.ManagedStatefulsets = append(certsuiteConfig.ManagedStatefulsets, certsuiteManagedStatefulSet)", + "\t}", + "}" + ] + }, + { + "name": "loadNamespaces", + "qualifiedName": "loadNamespaces", + "exported": false, + "signature": "func([]string)()", + "doc": "loadNamespaces Stores selected namespaces in the configuration\n\nThis routine receives a slice of namespace names, clears any previously\nstored target namespaces, and then appends each provided name as a Namespace\nstruct to the global configuration list. It modifies the config in place\nwithout returning a value.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:443", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createCertSuiteResourcesConfiguration", + "kind": "function", + "source": [ + "func createCertSuiteResourcesConfiguration() {", + "\tcertSuiteResourcesOptions := []configOption{", + "\t\t{Option: namespaces, Help: namespacesHelp},", + "\t\t{Option: pods, Help: podLabelsHelp},", + "\t\t{Option: operators, Help: operatorLabelsHelp},", + "\t\t{Option: crdFilters, Help: crdFiltersHelp},", + "\t\t{Option: managedDeployments, Help: managedDeploymentsHelp},", + "\t\t{Option: managedStatefulSets, Help: managedStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tcertSuiteResourcesSearcher := func(input string, index int) bool {", + "\t\tbasicOption := certSuiteResourcesOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(basicOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\tcertSuiteResourcesPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: certSuiteResourcesOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: certSuiteResourcesSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := certSuiteResourcesPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch certSuiteResourcesOptions[i].Option {", + "\t\tcase namespaces:", + "\t\t\tloadNamespaces(getAnswer(namespacePrompt, namespaceSyntax, namespaceExample))", + "\t\tcase pods:", + "\t\t\tloadPodLabels(getAnswer(podsPrompt, podsSyntax, podsExample))", + "\t\tcase operators:", + "\t\t\tloadOperatorLabels(getAnswer(operatorsPrompt, operatorsSyntax, operatorsExample))", + "\t\tcase crdFilters:", + "\t\t\tloadCRDfilters(getAnswer(crdFiltersPrompt, crdFiltersSyntax, crdFiltersExample))", + "\t\tcase managedDeployments:", + "\t\t\tloadManagedDeployments(getAnswer(managedDeploymentsPrompt, managedDeploymentsSyntax, managedDeploymentsExample))", + "\t\tcase managedStatefulSets:", + "\t\t\tloadManagedStatefulSets(getAnswer(managedStatefulSetsPrompt, managedStatefulSetsSyntax, managedStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadNamespaces(namespaces []string) {", + "\tcertsuiteConfig.TargetNameSpaces = nil", + "\tfor _, namespace := range namespaces {", + "\t\tcertsuiteNamespace := configuration.Namespace{Name: namespace}", + "\t\tcertsuiteConfig.TargetNameSpaces = append(certsuiteConfig.TargetNameSpaces, certsuiteNamespace)", + "\t}", + "}" + ] + }, + { + "name": "loadNonScalableDeployments", + "qualifiedName": "loadNonScalableDeployments", + "exported": false, + "signature": "func([]string)()", + "doc": "loadNonScalableDeployments parses a list of non-scalable deployments to skip scaling tests\n\nThe function receives an array of strings where each entry contains a\ndeployment name and namespace separated by a slash. It clears any previously\nstored entries, then splits each string into its two parts; if the format is\ninvalid it logs an error and aborts. Valid pairs are converted into\nconfiguration objects that are appended to the global skip list.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:585", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Println", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createExceptionsConfiguration", + "kind": "function", + "source": [ + "func createExceptionsConfiguration() {", + "\texceptionsOptions := []configOption{", + "\t\t{Option: kernelTaints, Help: kernelTaintsHelp},", + "\t\t{Option: helmCharts, Help: helmChartsHelp},", + "\t\t{Option: protocolNames, Help: protocolNamesHelp},", + "\t\t{Option: services, Help: servicesHelp},", + "\t\t{Option: nonScalableDeployments, Help: nonScalableDeploymentsHelp},", + "\t\t{Option: nonScalableStatefulSets, Help: nonScalableStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\texceptionsSearcher := func(input string, index int) bool {", + "\t\texceptionOption := exceptionsOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(exceptionOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\texceptionsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: exceptionsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: exceptionsSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := exceptionsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch exceptionsOptions[i].Option {", + "\t\tcase kernelTaints:", + "\t\t\tloadAcceptedKernelTaints(getAnswer(kernelTaintsPrompt, kernelTaintsSyntax, kernelTaintsExample))", + "\t\tcase helmCharts:", + "\t\t\tloadHelmCharts(getAnswer(helmChartsPrompt, helmChartsSyntax, helmChartsExample))", + "\t\tcase protocolNames:", + "\t\t\tloadProtocolNames(getAnswer(protocolNamesPrompt, protocolNamesSyntax, protocolNamesExample))", + "\t\tcase services:", + "\t\t\tloadServices(getAnswer(servicesPrompt, servicesSyntax, servicesExample))", + "\t\tcase nonScalableDeployments:", + "\t\t\tloadNonScalableDeployments(getAnswer(nonScalableDeploymentsPrompt, nonScalableDeploymentsSyntax, nonScalableDeploymentsExample))", + "\t\tcase nonScalableStatefulSets:", + "\t\t\tloadNonScalableStatefulSets(getAnswer(nonScalableStatefulSetsPrompt, nonScalableStatefulSetsSyxtax, nonScalableStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadNonScalableDeployments(nonScalableDeployments []string) {", + "\tcertsuiteConfig.SkipScalingTestDeployments = nil", + "\tfor _, nonScalableDeploymentStr := range nonScalableDeployments {", + "\t\tnonScalableDeployment := strings.Split(nonScalableDeploymentStr, \"/\")", + "\t\tconst nonScalableDeploymentsFields = 2", + "\t\tif len(nonScalableDeployment) != nonScalableDeploymentsFields {", + "\t\t\tlog.Println(\"could not parse Non-scalable Deployment\")", + "\t\t\treturn", + "\t\t}", + "\t\tnonScalableDeploymentName := nonScalableDeployment[0]", + "\t\tnonScalableDeploymentNamespace := nonScalableDeployment[1]", + "\t\tcertsuiteNonScalableDeployment := configuration.SkipScalingTestDeploymentsInfo{Name: nonScalableDeploymentName,", + "\t\t\tNamespace: nonScalableDeploymentNamespace}", + "\t\tcertsuiteConfig.SkipScalingTestDeployments = append(certsuiteConfig.SkipScalingTestDeployments, certsuiteNonScalableDeployment)", + "\t}", + "}" + ] + }, + { + "name": "loadNonScalableStatefulSets", + "qualifiedName": "loadNonScalableStatefulSets", + "exported": false, + "signature": "func([]string)()", + "doc": "loadNonScalableStatefulSets Parses a list of non-scalable StatefulSet identifiers to skip scaling tests\n\nThe function takes an array of strings, each expected in the form\n\"name/namespace\", splits them into name and namespace components, validates\nthe format, and appends the parsed information to a global configuration\nslice. If any string does not contain exactly two parts separated by a slash,\nit logs an error and aborts further processing.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:609", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Println", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createExceptionsConfiguration", + "kind": "function", + "source": [ + "func createExceptionsConfiguration() {", + "\texceptionsOptions := []configOption{", + "\t\t{Option: kernelTaints, Help: kernelTaintsHelp},", + "\t\t{Option: helmCharts, Help: helmChartsHelp},", + "\t\t{Option: protocolNames, Help: protocolNamesHelp},", + "\t\t{Option: services, Help: servicesHelp},", + "\t\t{Option: nonScalableDeployments, Help: nonScalableDeploymentsHelp},", + "\t\t{Option: nonScalableStatefulSets, Help: nonScalableStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\texceptionsSearcher := func(input string, index int) bool {", + "\t\texceptionOption := exceptionsOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(exceptionOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\texceptionsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: exceptionsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: exceptionsSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := exceptionsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch exceptionsOptions[i].Option {", + "\t\tcase kernelTaints:", + "\t\t\tloadAcceptedKernelTaints(getAnswer(kernelTaintsPrompt, kernelTaintsSyntax, kernelTaintsExample))", + "\t\tcase helmCharts:", + "\t\t\tloadHelmCharts(getAnswer(helmChartsPrompt, helmChartsSyntax, helmChartsExample))", + "\t\tcase protocolNames:", + "\t\t\tloadProtocolNames(getAnswer(protocolNamesPrompt, protocolNamesSyntax, protocolNamesExample))", + "\t\tcase services:", + "\t\t\tloadServices(getAnswer(servicesPrompt, servicesSyntax, servicesExample))", + "\t\tcase nonScalableDeployments:", + "\t\t\tloadNonScalableDeployments(getAnswer(nonScalableDeploymentsPrompt, nonScalableDeploymentsSyntax, nonScalableDeploymentsExample))", + "\t\tcase nonScalableStatefulSets:", + "\t\t\tloadNonScalableStatefulSets(getAnswer(nonScalableStatefulSetsPrompt, nonScalableStatefulSetsSyxtax, nonScalableStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadNonScalableStatefulSets(nonScalableStatefulSets []string) {", + "\tcertsuiteConfig.SkipScalingTestStatefulSets = nil", + "\tfor _, nonScalableStatefulSetStr := range nonScalableStatefulSets {", + "\t\tnonScalableStatefulSet := strings.Split(nonScalableStatefulSetStr, \"/\")", + "\t\tconst nonScalableStatefulSetFields = 2", + "\t\tif len(nonScalableStatefulSet) != nonScalableStatefulSetFields {", + "\t\t\tlog.Println(\"could not parse Non-scalable StatefulSet\")", + "\t\t\treturn", + "\t\t}", + "\t\tnonScalableStatefulSetName := nonScalableStatefulSet[0]", + "\t\tnonScalableStatefulSetNamespace := nonScalableStatefulSet[1]", + "\t\tcertsuiteNonScalableStatefulSet := configuration.SkipScalingTestStatefulSetsInfo{Name: nonScalableStatefulSetName,", + "\t\t\tNamespace: nonScalableStatefulSetNamespace}", + "\t\tcertsuiteConfig.SkipScalingTestStatefulSets = append(certsuiteConfig.SkipScalingTestStatefulSets, certsuiteNonScalableStatefulSet)", + "\t}", + "}" + ] + }, + { + "name": "loadOperatorLabels", + "qualifiedName": "loadOperatorLabels", + "exported": false, + "signature": "func([]string)()", + "doc": "loadOperatorLabels Updates the configuration with new operator labels\n\nThis function replaces any previously stored operator labels in the global\nconfiguration with a fresh list provided as input. It first resets the\ncurrent label collection to an empty state and then assigns the supplied\nslice, ensuring that subsequent operations use only the latest set of labels.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:469", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createCertSuiteResourcesConfiguration", + "kind": "function", + "source": [ + "func createCertSuiteResourcesConfiguration() {", + "\tcertSuiteResourcesOptions := []configOption{", + "\t\t{Option: namespaces, Help: namespacesHelp},", + "\t\t{Option: pods, Help: podLabelsHelp},", + "\t\t{Option: operators, Help: operatorLabelsHelp},", + "\t\t{Option: crdFilters, Help: crdFiltersHelp},", + "\t\t{Option: managedDeployments, Help: managedDeploymentsHelp},", + "\t\t{Option: managedStatefulSets, Help: managedStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tcertSuiteResourcesSearcher := func(input string, index int) bool {", + "\t\tbasicOption := certSuiteResourcesOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(basicOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\tcertSuiteResourcesPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: certSuiteResourcesOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: certSuiteResourcesSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := certSuiteResourcesPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch certSuiteResourcesOptions[i].Option {", + "\t\tcase namespaces:", + "\t\t\tloadNamespaces(getAnswer(namespacePrompt, namespaceSyntax, namespaceExample))", + "\t\tcase pods:", + "\t\t\tloadPodLabels(getAnswer(podsPrompt, podsSyntax, podsExample))", + "\t\tcase operators:", + "\t\t\tloadOperatorLabels(getAnswer(operatorsPrompt, operatorsSyntax, operatorsExample))", + "\t\tcase crdFilters:", + "\t\t\tloadCRDfilters(getAnswer(crdFiltersPrompt, crdFiltersSyntax, crdFiltersExample))", + "\t\tcase managedDeployments:", + "\t\t\tloadManagedDeployments(getAnswer(managedDeploymentsPrompt, managedDeploymentsSyntax, managedDeploymentsExample))", + "\t\tcase managedStatefulSets:", + "\t\t\tloadManagedStatefulSets(getAnswer(managedStatefulSetsPrompt, managedStatefulSetsSyntax, managedStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadOperatorLabels(operatorLabels []string) {", + "\tcertsuiteConfig.OperatorsUnderTestLabels = nil", + "\tcertsuiteConfig.OperatorsUnderTestLabels = operatorLabels", + "}" + ] + }, + { + "name": "loadPodLabels", + "qualifiedName": "loadPodLabels", + "exported": false, + "signature": "func([]string)()", + "doc": "loadPodLabels Stores user-specified pod labels for later configuration\n\nThis routine clears any existing pod label settings and then assigns the\nsupplied slice of strings to the global configuration structure. It is\ninvoked after the user selects pod labels from an interactive prompt,\nensuring that only the chosen labels are retained. No value is returned; the\neffect is visible through the updated configuration state.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:458", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createCertSuiteResourcesConfiguration", + "kind": "function", + "source": [ + "func createCertSuiteResourcesConfiguration() {", + "\tcertSuiteResourcesOptions := []configOption{", + "\t\t{Option: namespaces, Help: namespacesHelp},", + "\t\t{Option: pods, Help: podLabelsHelp},", + "\t\t{Option: operators, Help: operatorLabelsHelp},", + "\t\t{Option: crdFilters, Help: crdFiltersHelp},", + "\t\t{Option: managedDeployments, Help: managedDeploymentsHelp},", + "\t\t{Option: managedStatefulSets, Help: managedStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tcertSuiteResourcesSearcher := func(input string, index int) bool {", + "\t\tbasicOption := certSuiteResourcesOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(basicOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\tcertSuiteResourcesPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: certSuiteResourcesOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: certSuiteResourcesSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := certSuiteResourcesPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch certSuiteResourcesOptions[i].Option {", + "\t\tcase namespaces:", + "\t\t\tloadNamespaces(getAnswer(namespacePrompt, namespaceSyntax, namespaceExample))", + "\t\tcase pods:", + "\t\t\tloadPodLabels(getAnswer(podsPrompt, podsSyntax, podsExample))", + "\t\tcase operators:", + "\t\t\tloadOperatorLabels(getAnswer(operatorsPrompt, operatorsSyntax, operatorsExample))", + "\t\tcase crdFilters:", + "\t\t\tloadCRDfilters(getAnswer(crdFiltersPrompt, crdFiltersSyntax, crdFiltersExample))", + "\t\tcase managedDeployments:", + "\t\t\tloadManagedDeployments(getAnswer(managedDeploymentsPrompt, managedDeploymentsSyntax, managedDeploymentsExample))", + "\t\tcase managedStatefulSets:", + "\t\t\tloadManagedStatefulSets(getAnswer(managedStatefulSetsPrompt, managedStatefulSetsSyntax, managedStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadPodLabels(podLabels []string) {", + "\tcertsuiteConfig.PodsUnderTestLabels = nil", + "\tcertsuiteConfig.PodsUnderTestLabels = podLabels", + "}" + ] + }, + { + "name": "loadProbeDaemonSetNamespace", + "qualifiedName": "loadProbeDaemonSetNamespace", + "exported": false, + "signature": "func([]string)()", + "doc": "loadProbeDaemonSetNamespace Sets the Probe DaemonSet namespace in the configuration\n\nThe function receives a list of strings and assigns the first element to the\nProbeDaemonSetNamespace field of the shared configuration object. It assumes\nthat the slice contains at least one entry and uses it directly without\nvalidation or conversion.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:632", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createSettingsConfiguration", + "kind": "function", + "source": [ + "func createSettingsConfiguration() {", + "\tsettingsOptions := []configOption{", + "\t\t{Option: probeDaemonSet, Help: probeDaemonSetHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\tsettingsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: settingsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 2,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := settingsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch settingsOptions[i].Option {", + "\t\tcase probeDaemonSet:", + "\t\t\tloadProbeDaemonSetNamespace(getAnswer(probeDaemonSetPrompt, probeDaemonSetSyntax, probeDaemonSetExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadProbeDaemonSetNamespace(namespace []string) {", + "\tcertsuiteConfig.ProbeDaemonSetNamespace = namespace[0]", + "}" + ] + }, + { + "name": "loadProtocolNames", + "qualifiedName": "loadProtocolNames", + "exported": false, + "signature": "func([]string)()", + "doc": "loadProtocolNames stores a list of acceptable protocol names\n\nThis function replaces the current collection of valid protocol identifiers\nin the configuration with a new slice supplied by the caller. It first clears\nany previously stored values to avoid residual data, then assigns the\nprovided slice directly to the global configuration variable. No return value\nis produced.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:562", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createExceptionsConfiguration", + "kind": "function", + "source": [ + "func createExceptionsConfiguration() {", + "\texceptionsOptions := []configOption{", + "\t\t{Option: kernelTaints, Help: kernelTaintsHelp},", + "\t\t{Option: helmCharts, Help: helmChartsHelp},", + "\t\t{Option: protocolNames, Help: protocolNamesHelp},", + "\t\t{Option: services, Help: servicesHelp},", + "\t\t{Option: nonScalableDeployments, Help: nonScalableDeploymentsHelp},", + "\t\t{Option: nonScalableStatefulSets, Help: nonScalableStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\texceptionsSearcher := func(input string, index int) bool {", + "\t\texceptionOption := exceptionsOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(exceptionOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\texceptionsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: exceptionsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: exceptionsSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := exceptionsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch exceptionsOptions[i].Option {", + "\t\tcase kernelTaints:", + "\t\t\tloadAcceptedKernelTaints(getAnswer(kernelTaintsPrompt, kernelTaintsSyntax, kernelTaintsExample))", + "\t\tcase helmCharts:", + "\t\t\tloadHelmCharts(getAnswer(helmChartsPrompt, helmChartsSyntax, helmChartsExample))", + "\t\tcase protocolNames:", + "\t\t\tloadProtocolNames(getAnswer(protocolNamesPrompt, protocolNamesSyntax, protocolNamesExample))", + "\t\tcase services:", + "\t\t\tloadServices(getAnswer(servicesPrompt, servicesSyntax, servicesExample))", + "\t\tcase nonScalableDeployments:", + "\t\t\tloadNonScalableDeployments(getAnswer(nonScalableDeploymentsPrompt, nonScalableDeploymentsSyntax, nonScalableDeploymentsExample))", + "\t\tcase nonScalableStatefulSets:", + "\t\t\tloadNonScalableStatefulSets(getAnswer(nonScalableStatefulSetsPrompt, nonScalableStatefulSetsSyxtax, nonScalableStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadProtocolNames(protocolNames []string) {", + "\tcertsuiteConfig.ValidProtocolNames = nil", + "\tcertsuiteConfig.ValidProtocolNames = protocolNames", + "}" + ] + }, + { + "name": "loadServices", + "qualifiedName": "loadServices", + "exported": false, + "signature": "func([]string)()", + "doc": "loadServices sets the list of services to ignore\n\nThe function replaces any existing ignored service entries with a new slice\nprovided as input. It first clears the current configuration's ignore list,\nthen assigns the supplied list directly. The resulting configuration is used\nelsewhere to skip checks for these services.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:573", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "createExceptionsConfiguration", + "kind": "function", + "source": [ + "func createExceptionsConfiguration() {", + "\texceptionsOptions := []configOption{", + "\t\t{Option: kernelTaints, Help: kernelTaintsHelp},", + "\t\t{Option: helmCharts, Help: helmChartsHelp},", + "\t\t{Option: protocolNames, Help: protocolNamesHelp},", + "\t\t{Option: services, Help: servicesHelp},", + "\t\t{Option: nonScalableDeployments, Help: nonScalableDeploymentsHelp},", + "\t\t{Option: nonScalableStatefulSets, Help: nonScalableStatefulSetsHelp},", + "\t\t{Option: previousMenu, Help: backHelp},", + "\t}", + "\texceptionsSearcher := func(input string, index int) bool {", + "\t\texceptionOption := exceptionsOptions[index]", + "\t\tname := strings.ReplaceAll(strings.ToLower(exceptionOption.Option), \" \", \"\")", + "\t\tinput = strings.ReplaceAll(strings.ToLower(input), \" \", \"\")", + "", + "\t\treturn strings.Contains(name, input)", + "\t}", + "\texceptionsPrompt := promptui.Select{", + "\t\tLabel: \"\",", + "\t\tItems: exceptionsOptions,", + "\t\tTemplates: templates,", + "\t\tSize: 7,", + "\t\tSearcher: exceptionsSearcher,", + "\t\tHideSelected: true,", + "\t}", + "\tvar exit bool", + "\tfor !exit {", + "\t\ti, _, err := exceptionsPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch exceptionsOptions[i].Option {", + "\t\tcase kernelTaints:", + "\t\t\tloadAcceptedKernelTaints(getAnswer(kernelTaintsPrompt, kernelTaintsSyntax, kernelTaintsExample))", + "\t\tcase helmCharts:", + "\t\t\tloadHelmCharts(getAnswer(helmChartsPrompt, helmChartsSyntax, helmChartsExample))", + "\t\tcase protocolNames:", + "\t\t\tloadProtocolNames(getAnswer(protocolNamesPrompt, protocolNamesSyntax, protocolNamesExample))", + "\t\tcase services:", + "\t\t\tloadServices(getAnswer(servicesPrompt, servicesSyntax, servicesExample))", + "\t\tcase nonScalableDeployments:", + "\t\t\tloadNonScalableDeployments(getAnswer(nonScalableDeploymentsPrompt, nonScalableDeploymentsSyntax, nonScalableDeploymentsExample))", + "\t\tcase nonScalableStatefulSets:", + "\t\t\tloadNonScalableStatefulSets(getAnswer(nonScalableStatefulSetsPrompt, nonScalableStatefulSetsSyxtax, nonScalableStatefulSetsExample))", + "\t\tcase previousMenu:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func loadServices(services []string) {", + "\tcertsuiteConfig.ServicesIgnoreList = nil", + "\tcertsuiteConfig.ServicesIgnoreList = services", + "}" + ] + }, + { + "name": "saveConfiguration", + "qualifiedName": "saveConfiguration", + "exported": false, + "signature": "func(*configuration.TestConfiguration)()", + "doc": "saveConfiguration Saves the current configuration to a YAML file\n\nThe function converts a TestConfiguration struct into YAML, prompts the user\nfor a filename with a default suggestion, writes the data to that file with\nappropriate permissions, and prints a success message. If any step\nfails—marshalling, prompting, or writing—it logs an error and aborts\nwithout returning a value.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:173", + "calls": [ + { + "name": "Marshal", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "name": "Run", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "WriteFile", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "github.com/fatih/color", + "name": "GreenString", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "generateConfig", + "kind": "function", + "source": [ + "func generateConfig() {", + "\tmainMenu := []configOption{", + "\t\t{Option: create, Help: createConfigHelp},", + "\t\t{Option: show, Help: showConfigHelp},", + "\t\t{Option: save, Help: saveConfigHelp},", + "\t\t{Option: quit, Help: exitHelp},", + "\t}", + "", + "\tvar exit bool", + "\tfor !exit {", + "\t\tmainPrompt := promptui.Select{", + "\t\t\tLabel: \"\",", + "\t\t\tItems: mainMenu,", + "\t\t\tTemplates: templates,", + "\t\t\tSize: 4,", + "\t\t\tHideSelected: true,", + "\t\t}", + "", + "\t\topt, _, err := mainPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch mainMenu[opt].Option {", + "\t\tcase create:", + "\t\t\tcreateConfiguration()", + "\t\tcase show:", + "\t\t\tshowConfiguration(\u0026certsuiteConfig)", + "\t\tcase save:", + "\t\t\tsaveConfiguration(\u0026certsuiteConfig)", + "\t\tcase quit:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func saveConfiguration(config *configuration.TestConfiguration) {", + "\tconfigYaml, err := yaml.Marshal(config)", + "\tif err != nil {", + "\t\tlog.Printf(\"could not marshal the YAML file, err: %v\", err)", + "\t\treturn", + "\t}", + "", + "\tsaveConfigPrompt := promptui.Prompt{", + "\t\tLabel: \"Cert Suite config file\",", + "\t\tDefault: defaultConfigFileName,", + "\t}", + "", + "\tconfigFileName, err := saveConfigPrompt.Run()", + "\tif err != nil {", + "\t\tlog.Printf(\"could not read config file name, err: %v\\n\", err)", + "\t\treturn", + "\t}", + "", + "\terr = os.WriteFile(configFileName, configYaml, defaultConfigFilePermissions)", + "\tif err != nil {", + "\t\tlog.Printf(\"could not write file, err: %v\", err)", + "\t\treturn", + "\t}", + "", + "\tfmt.Println(color.GreenString(\"Configuration saved\"))", + "}" + ] + }, + { + "name": "showConfiguration", + "qualifiedName": "showConfiguration", + "exported": false, + "signature": "func(*configuration.TestConfiguration)()", + "doc": "showConfiguration Displays the current configuration in YAML format\n\nThe function serializes a TestConfiguration object into YAML and prints it to\nstandard output, surrounded by header and footer lines for readability. If\nmarshaling fails, it logs an error message and exits without printing\nanything.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:155", + "calls": [ + { + "name": "Marshal", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config", + "name": "generateConfig", + "kind": "function", + "source": [ + "func generateConfig() {", + "\tmainMenu := []configOption{", + "\t\t{Option: create, Help: createConfigHelp},", + "\t\t{Option: show, Help: showConfigHelp},", + "\t\t{Option: save, Help: saveConfigHelp},", + "\t\t{Option: quit, Help: exitHelp},", + "\t}", + "", + "\tvar exit bool", + "\tfor !exit {", + "\t\tmainPrompt := promptui.Select{", + "\t\t\tLabel: \"\",", + "\t\t\tItems: mainMenu,", + "\t\t\tTemplates: templates,", + "\t\t\tSize: 4,", + "\t\t\tHideSelected: true,", + "\t\t}", + "", + "\t\topt, _, err := mainPrompt.Run()", + "\t\tif err != nil {", + "\t\t\tlog.Printf(\"Prompt failed %v\\n\", err)", + "\t\t\treturn", + "\t\t}", + "\t\tswitch mainMenu[opt].Option {", + "\t\tcase create:", + "\t\t\tcreateConfiguration()", + "\t\tcase show:", + "\t\t\tshowConfiguration(\u0026certsuiteConfig)", + "\t\tcase save:", + "\t\t\tsaveConfiguration(\u0026certsuiteConfig)", + "\t\tcase quit:", + "\t\t\texit = true", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func showConfiguration(config *configuration.TestConfiguration) {", + "\tconfigYaml, err := yaml.Marshal(config)", + "\tif err != nil {", + "\t\tlog.Printf(\"could not marshal the YAML file, err: %v\", err)", + "\t\treturn", + "\t}", + "\tfmt.Println(\"================= Cert Suite CONFIGURATION =================\")", + "\tfmt.Println(string(configYaml))", + "\tfmt.Println(\"=====================================================\")", + "}" + ] + } + ], + "globals": [ + { + "name": "certsuiteConfig", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:50" + }, + { + "name": "generateConfigCmd", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:41" + }, + { + "name": "templates", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/config.go:52" + } + ], + "consts": [ + { + "name": "appEndPoint", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:33" + }, + { + "name": "appPassword", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:36" + }, + { + "name": "backHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:48" + }, + { + "name": "certSuiteResources", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:13" + }, + { + "name": "certSuiteResourcesHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:50" + }, + { + "name": "collector", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:15" + }, + { + "name": "collectordHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:57" + }, + { + "name": "crdFilters", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:22" + }, + { + "name": "crdFiltersExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:135" + }, + { + "name": "crdFiltersHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:74" + }, + { + "name": "crdFiltersPrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:132" + }, + { + "name": "crdFiltersSyntax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:134" + }, + { + "name": "create", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:8" + }, + { + "name": "createConfigHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:44" + }, + { + "name": "defaultConfigFileName", + "exported": false, + "doc": "Internal constants", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:170" + }, + { + "name": "defaultConfigFilePermissions", + "exported": false, + "doc": "Internal constants", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:171" + }, + { + "name": "exceptions", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:14" + }, + { + "name": "exceptionsdHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:53" + }, + { + "name": "executedBy", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:34" + }, + { + "name": "exitHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:47" + }, + { + "name": "helmCharts", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:27" + }, + { + "name": "helmChartsExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:148" + }, + { + "name": "helmChartsHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:91" + }, + { + "name": "helmChartsPrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:146" + }, + { + "name": "helmChartsSyntax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:147" + }, + { + "name": "kernelTaints", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:26" + }, + { + "name": "kernelTaintsExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:145" + }, + { + "name": "kernelTaintsHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:88" + }, + { + "name": "kernelTaintsPrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:143" + }, + { + "name": "kernelTaintsSyntax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:144" + }, + { + "name": "managedDeployments", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:23" + }, + { + "name": "managedDeploymentsExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:138" + }, + { + "name": "managedDeploymentsHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:77" + }, + { + "name": "managedDeploymentsPrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:136" + }, + { + "name": "managedDeploymentsSyntax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:137" + }, + { + "name": "managedStatefulSets", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:24" + }, + { + "name": "managedStatefulSetsExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:141" + }, + { + "name": "managedStatefulSetsHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:82" + }, + { + "name": "managedStatefulSetsPrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:139" + }, + { + "name": "managedStatefulSetsSyntax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:140" + }, + { + "name": "namespaceExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:125" + }, + { + "name": "namespacePrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:123" + }, + { + "name": "namespaceSyntax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:124" + }, + { + "name": "namespaces", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:19" + }, + { + "name": "namespacesHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:60" + }, + { + "name": "nonScalableDeployments", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:30" + }, + { + "name": "nonScalableDeploymentsExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:157" + }, + { + "name": "nonScalableDeploymentsHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:104" + }, + { + "name": "nonScalableDeploymentsPrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:155" + }, + { + "name": "nonScalableDeploymentsSyntax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:156" + }, + { + "name": "nonScalableStatefulSets", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:31" + }, + { + "name": "nonScalableStatefulSetsExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:160" + }, + { + "name": "nonScalableStatefulSetsHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:107" + }, + { + "name": "nonScalableStatefulSetsPrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:158" + }, + { + "name": "nonScalableStatefulSetsSyxtax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:159" + }, + { + "name": "operatorLabelsHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:70" + }, + { + "name": "operators", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:21" + }, + { + "name": "operatorsExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:131" + }, + { + "name": "operatorsPrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:129" + }, + { + "name": "operatorsSyntax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:130" + }, + { + "name": "partnerName", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:35" + }, + { + "name": "podLabelsHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:62" + }, + { + "name": "pods", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:20" + }, + { + "name": "podsExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:128" + }, + { + "name": "podsPrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:126" + }, + { + "name": "podsSyntax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:127" + }, + { + "name": "previousMenu", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:17" + }, + { + "name": "probeDaemonSet", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:38" + }, + { + "name": "probeDaemonSetExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:165" + }, + { + "name": "probeDaemonSetHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:112" + }, + { + "name": "probeDaemonSetPrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:163" + }, + { + "name": "probeDaemonSetSyntax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:164" + }, + { + "name": "protocolNames", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:28" + }, + { + "name": "protocolNamesExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:151" + }, + { + "name": "protocolNamesHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:95" + }, + { + "name": "protocolNamesPrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:149" + }, + { + "name": "protocolNamesSyntax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:150" + }, + { + "name": "quit", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:11" + }, + { + "name": "save", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:10" + }, + { + "name": "saveConfigHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:46" + }, + { + "name": "services", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:29" + }, + { + "name": "servicesExample", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:154" + }, + { + "name": "servicesHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:100" + }, + { + "name": "servicesPrompt", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:152" + }, + { + "name": "servicesSyntax", + "exported": false, + "doc": "Prompts, syxtax, examples", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:153" + }, + { + "name": "settings", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:16" + }, + { + "name": "settingsHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:58" + }, + { + "name": "show", + "exported": false, + "doc": "Menu names\n\nnolint:unused", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:9" + }, + { + "name": "showConfigHelp", + "exported": false, + "doc": "Menu help", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/config/const.go:45" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/feedback", + "name": "feedback", + "files": 1, + "imports": [ + "encoding/json", + "fmt", + "github.com/spf13/cobra", + "log", + "os", + "path/filepath" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates a command to generate feedback.js from a JSON file\n\nIt defines flags for the input JSON path and output directory, marking both\nas required. If flag validation fails, it logs a fatal error. The function\nreturns the configured cobra.Command instance.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/feedback/feedback.go:84", + "calls": [ + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "MarkFlagRequired", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "MarkFlagRequired", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tgenerate.AddCommand(catalog.NewCommand())", + "\tgenerate.AddCommand(feedback.NewCommand())", + "\tgenerate.AddCommand(config.NewCommand())", + "\tgenerate.AddCommand(qecoverage.NewCommand())", + "", + "\treturn generate", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tgenerateFeedbackJsFile.Flags().StringVarP(", + "\t\t\u0026feedbackJSONFilePath, \"feedback\", \"f\", \"\",", + "\t\t\"path to the feedback.json file\")", + "", + "\terr := generateFeedbackJsFile.MarkFlagRequired(\"feedback\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"failed to mark feedback flag as required: :%v\", err)", + "\t\treturn nil", + "\t}", + "\tgenerateFeedbackJsFile.Flags().StringVarP(", + "\t\t\u0026feedbackOutputPath, \"outputPath\", \"o\", \"\",", + "\t\t\"path to create on it the feedback.js file\")", + "", + "\terr = generateFeedbackJsFile.MarkFlagRequired(\"outputPath\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"failed to mark outputPath flag as required: :%v\", err)", + "\t\treturn nil", + "\t}", + "\treturn generateFeedbackJsFile", + "}" + ] + }, + { + "name": "runGenerateFeedbackJsFile", + "qualifiedName": "runGenerateFeedbackJsFile", + "exported": false, + "signature": "func(*cobra.Command, []string)(error)", + "doc": "runGenerateFeedbackJsFile Creates a JavaScript file containing feedback data\n\nThe function reads a JSON file with feedback information, parses it into a\nmap, formats the data with indentation, and writes it to a new JavaScript\nfile prefixed by 'feedback=' in the specified output directory. It logs the\nresulting string to standard output and returns any errors encountered during\nreading, unmarshalling, or writing.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/feedback/feedback.go:48", + "calls": [ + { + "pkgPath": "os", + "name": "ReadFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "MarshalIndent", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "path/filepath", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Create", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "WriteString", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func runGenerateFeedbackJsFile(_ *cobra.Command, _ []string) error {", + "\tdat, err := os.ReadFile(feedbackJSONFilePath)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to read json feedback file: %v\", err)", + "\t}", + "\tvar obj map[string]interface{}", + "\terr = json.Unmarshal(dat, \u0026obj)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to unmarshal json feedback file %s: %v\", feedbackJSONFilePath, err)", + "\t}", + "", + "\t// Print the JSON content", + "\tjsonBytes, err := json.MarshalIndent(obj, \"\", \" \")", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to marshal feedback js content: %v\", err)", + "\t}", + "\tfeedbackJsFilePath := filepath.Join(feedbackOutputPath, \"feedback.js\")", + "\tfile, err := os.Create(feedbackJsFilePath)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create javascript feedback file: %v\", err)", + "\t}", + "\tfeedbackjs := \"feedback=\"", + "\t_, err = file.WriteString(feedbackjs + string(jsonBytes))", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to write javascript feedback file: %v\", err)", + "\t}", + "", + "\tfmt.Println(feedbackjs + string(jsonBytes))", + "\treturn nil", + "}" + ] + } + ], + "globals": [ + { + "name": "feedbackJSONFilePath", + "exported": false, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/feedback/feedback.go:30" + }, + { + "name": "feedbackOutputPath", + "exported": false, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/feedback/feedback.go:31" + }, + { + "name": "generateFeedbackJsFile", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/feedback/feedback.go:34" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/qe_coverage", + "name": "qecoverage", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite-claim/pkg/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "github.com/spf13/cobra", + "sort", + "strings" + ], + "structs": [ + { + "name": "TestCoverageSummaryReport", + "exported": true, + "doc": "TestCoverageSummaryReport Provides a snapshot of QE coverage across test suites\n\nThis struct holds overall statistics such as total test cases, those covered\nby QE, and the percentage of coverage. It also maps each suite name to its\nown TestSuiteQeCoverage record for detailed per-suite information. The data\nis used by reporting functions to display coverage metrics.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/qe_coverage/qe_coverage.go:23", + "fields": { + "CoverageByTestSuite": "map[string]TestSuiteQeCoverage", + "TestCasesTotal": "int", + "TestCasesWithQe": "int", + "TotalCoveragePercentage": "float32" + }, + "methodNames": null, + "source": [ + "type TestCoverageSummaryReport struct {", + "\tCoverageByTestSuite map[string]TestSuiteQeCoverage", + "\tTotalCoveragePercentage float32", + "\tTestCasesTotal int", + "\tTestCasesWithQe int", + "}" + ] + }, + { + "name": "TestSuiteQeCoverage", + "exported": true, + "doc": "TestSuiteQeCoverage Represents coverage statistics for a test suite\n\nThis structure holds counts of total test cases, how many include QE-specific\ntests, and the calculated percentage coverage. It also tracks any test cases\nthat are not yet implemented. The data can be used to assess overall quality\nand identify gaps in QE integration.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/qe_coverage/qe_coverage.go:36", + "fields": { + "Coverage": "float32", + "NotImplementedTestCases": "[]string", + "TestCases": "int", + "TestCasesWithQe": "int" + }, + "methodNames": null, + "source": [ + "type TestSuiteQeCoverage struct {", + "\tTestCases int", + "\tTestCasesWithQe int", + "\tCoverage float32", + "\tNotImplementedTestCases []string", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "GetQeCoverage", + "qualifiedName": "GetQeCoverage", + "exported": true, + "signature": "func(map[claim.Identifier]claim.TestCaseDescription)(TestCoverageSummaryReport)", + "doc": "GetQeCoverage Calculates overall and per-suite QE coverage statistics\n\nThe function iterates over a catalog of test case descriptions, counting\ntotal cases, those marked for QE, and noting which are not implemented. It\naggregates these counts by test suite, computing a percentage coverage for\neach suite using a multiplier factor. Finally, it returns a summary report\ncontaining per-suite data, overall coverage, and total counts.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/qe_coverage/qe_coverage.go:109", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "name": "float32", + "kind": "function" + }, + { + "name": "float32", + "kind": "function" + }, + { + "name": "float32", + "kind": "function" + }, + { + "name": "float32", + "kind": "function" + }, + { + "name": "float32", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/qe_coverage", + "name": "showQeCoverageSummaryReport", + "kind": "function", + "source": [ + "func showQeCoverageSummaryReport() {", + "\tqeCoverage := GetQeCoverage(identifiers.Catalog)", + "", + "\t// Order test suite names so the report is in ascending test suite name order.", + "\ttestSuites := []string{}", + "\tfor suite := range qeCoverage.CoverageByTestSuite {", + "\t\ttestSuites = append(testSuites, suite)", + "\t}", + "\tsort.Strings(testSuites)", + "", + "\t// QE Coverage details", + "\tfmt.Printf(\"Total QE Coverage: %.f%%\\n\\n\", qeCoverage.TotalCoveragePercentage)", + "\tfmt.Printf(\"Total Test Cases: %d, Total QE Test Cases: %d\\n\\n\", qeCoverage.TestCasesTotal, qeCoverage.TestCasesWithQe)", + "", + "\t// Per test suite QE coverage", + "\tfmt.Printf(\"%-30s\\t%-20s\\t%-20s\\t%s\\n\", \"Test Suite Name\", \"QE Coverage\", \"Total Test Cases\", \"Not Covered Test Count\")", + "\tfor _, suite := range testSuites {", + "\t\ttsCoverage := qeCoverage.CoverageByTestSuite[suite]", + "\t\tfmt.Printf(\"%-30s\\t%.0f%%\\t%30d\\t%10d\\n\", suite, tsCoverage.Coverage, tsCoverage.TestCases, tsCoverage.TestCases-tsCoverage.TestCasesWithQe)", + "\t}", + "", + "\tfmt.Println()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetQeCoverage(catalog map[claim.Identifier]claim.TestCaseDescription) TestCoverageSummaryReport {", + "\ttotalTcs := 0", + "\ttotalTcsWithQe := 0", + "", + "\tqeCoverageByTestSuite := map[string]TestSuiteQeCoverage{}", + "", + "\tfor claimID := range catalog {", + "\t\ttotalTcs++", + "", + "\t\ttcDescription := catalog[claimID]", + "", + "\t\ttsName := tcDescription.Identifier.Suite", + "", + "\t\ttsQeCoverage, exists := qeCoverageByTestSuite[tsName]", + "\t\tif !exists {", + "\t\t\ttsQeCoverage = TestSuiteQeCoverage{}", + "\t\t}", + "", + "\t\ttsQeCoverage.TestCases++", + "\t\tif tcDescription.Qe {", + "\t\t\ttsQeCoverage.TestCasesWithQe++", + "\t\t\ttotalTcsWithQe++", + "\t\t} else {", + "\t\t\ttsQeCoverage.NotImplementedTestCases = append(tsQeCoverage.NotImplementedTestCases, tcDescription.Identifier.Id)", + "\t\t}", + "", + "\t\t// Update this test suite's coverage percentage", + "\t\ttsQeCoverage.Coverage = multiplier * (float32(tsQeCoverage.TestCasesWithQe) / float32(tsQeCoverage.TestCases))", + "", + "\t\tqeCoverageByTestSuite[tsName] = tsQeCoverage", + "\t}", + "", + "\ttotalCoverage := float32(0)", + "\tif totalTcs \u003e 0 {", + "\t\ttotalCoverage = multiplier * (float32(totalTcsWithQe) / float32(totalTcs))", + "\t}", + "", + "\treturn TestCoverageSummaryReport{", + "\t\tCoverageByTestSuite: qeCoverageByTestSuite,", + "\t\tTotalCoveragePercentage: totalCoverage,", + "\t\tTestCasesTotal: totalTcs,", + "\t\tTestCasesWithQe: totalTcsWithQe,", + "\t}", + "}" + ] + }, + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates a command to report QE test coverage\n\nThe function builds a new command instance that includes a persistent string\nflag named \"suitename\" for filtering coverage output by suite name. It\nreturns this configured command so it can be added to the parent generate\ncommand hierarchy.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/qe_coverage/qe_coverage.go:49", + "calls": [ + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tgenerate.AddCommand(catalog.NewCommand())", + "\tgenerate.AddCommand(feedback.NewCommand())", + "\tgenerate.AddCommand(config.NewCommand())", + "\tgenerate.AddCommand(qecoverage.NewCommand())", + "", + "\treturn generate", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tqeCoverageReportCmd.PersistentFlags().String(\"suitename\", \"\", \"Displays the remaining tests not covered by QE for the specified suite name.\")", + "", + "\treturn qeCoverageReportCmd", + "}" + ] + }, + { + "name": "showQeCoverageForTestCaseName", + "qualifiedName": "showQeCoverageForTestCaseName", + "exported": false, + "signature": "func(string, TestCoverageSummaryReport)()", + "doc": "showQeCoverageForTestCaseName Displays QE coverage statistics for a specified test suite\n\nThe function prints the name of the test suite, total number of test cases,\noverall coverage percentage, and how many are not covered by QE. It then\nreports whether all tests have QE coverage or lists any unimplemented test\ncases in detail.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/qe_coverage/qe_coverage.go:85", + "calls": [ + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func showQeCoverageForTestCaseName(suiteName string, qeCoverage TestCoverageSummaryReport) {", + "\ttsCoverage := qeCoverage.CoverageByTestSuite[suiteName]", + "", + "\tfmt.Println(\"Suite Name : \", suiteName)", + "\tfmt.Printf(\"Total Test Cases : %d, QE Coverage: %.f%%, Unimplemented Test Cases : %d\\n\",", + "\t\ttsCoverage.TestCases, tsCoverage.Coverage, tsCoverage.TestCases-tsCoverage.TestCasesWithQe)", + "", + "\tif len(tsCoverage.NotImplementedTestCases) == 0 {", + "\t\tfmt.Println(\"Congrats! All tests are QE test covered\")", + "\t} else {", + "\t\tvar testCases = strings.Join(tsCoverage.NotImplementedTestCases, \"\\n\")", + "\t\tfmt.Printf(\"\\nUnimplemented Test Cases are the following: \\n\\n%s\", testCases)", + "\t}", + "", + "\tfmt.Println()", + "}" + ] + }, + { + "name": "showQeCoverageSummaryReport", + "qualifiedName": "showQeCoverageSummaryReport", + "exported": false, + "signature": "func()()", + "doc": "showQeCoverageSummaryReport Displays a formatted report of QE coverage statistics\n\nThis routine calculates overall and per-test-suite coverage by calling\nGetQeCoverage, then sorts the suite names alphabetically. It prints total\npercentages and counts, followed by a table showing each suite’s name, its\ncoverage percentage, total test cases, and how many are not covered. The\noutput is formatted for console readability.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/qe_coverage/qe_coverage.go:161", + "calls": [ + { + "name": "GetQeCoverage", + "kind": "function", + "source": [ + "func GetQeCoverage(catalog map[claim.Identifier]claim.TestCaseDescription) TestCoverageSummaryReport {", + "\ttotalTcs := 0", + "\ttotalTcsWithQe := 0", + "", + "\tqeCoverageByTestSuite := map[string]TestSuiteQeCoverage{}", + "", + "\tfor claimID := range catalog {", + "\t\ttotalTcs++", + "", + "\t\ttcDescription := catalog[claimID]", + "", + "\t\ttsName := tcDescription.Identifier.Suite", + "", + "\t\ttsQeCoverage, exists := qeCoverageByTestSuite[tsName]", + "\t\tif !exists {", + "\t\t\ttsQeCoverage = TestSuiteQeCoverage{}", + "\t\t}", + "", + "\t\ttsQeCoverage.TestCases++", + "\t\tif tcDescription.Qe {", + "\t\t\ttsQeCoverage.TestCasesWithQe++", + "\t\t\ttotalTcsWithQe++", + "\t\t} else {", + "\t\t\ttsQeCoverage.NotImplementedTestCases = append(tsQeCoverage.NotImplementedTestCases, tcDescription.Identifier.Id)", + "\t\t}", + "", + "\t\t// Update this test suite's coverage percentage", + "\t\ttsQeCoverage.Coverage = multiplier * (float32(tsQeCoverage.TestCasesWithQe) / float32(tsQeCoverage.TestCases))", + "", + "\t\tqeCoverageByTestSuite[tsName] = tsQeCoverage", + "\t}", + "", + "\ttotalCoverage := float32(0)", + "\tif totalTcs \u003e 0 {", + "\t\ttotalCoverage = multiplier * (float32(totalTcsWithQe) / float32(totalTcs))", + "\t}", + "", + "\treturn TestCoverageSummaryReport{", + "\t\tCoverageByTestSuite: qeCoverageByTestSuite,", + "\t\tTotalCoveragePercentage: totalCoverage,", + "\t\tTestCasesTotal: totalTcs,", + "\t\tTestCasesWithQe: totalTcsWithQe,", + "\t}", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Strings", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func showQeCoverageSummaryReport() {", + "\tqeCoverage := GetQeCoverage(identifiers.Catalog)", + "", + "\t// Order test suite names so the report is in ascending test suite name order.", + "\ttestSuites := []string{}", + "\tfor suite := range qeCoverage.CoverageByTestSuite {", + "\t\ttestSuites = append(testSuites, suite)", + "\t}", + "\tsort.Strings(testSuites)", + "", + "\t// QE Coverage details", + "\tfmt.Printf(\"Total QE Coverage: %.f%%\\n\\n\", qeCoverage.TotalCoveragePercentage)", + "\tfmt.Printf(\"Total Test Cases: %d, Total QE Test Cases: %d\\n\\n\", qeCoverage.TestCasesTotal, qeCoverage.TestCasesWithQe)", + "", + "\t// Per test suite QE coverage", + "\tfmt.Printf(\"%-30s\\t%-20s\\t%-20s\\t%s\\n\", \"Test Suite Name\", \"QE Coverage\", \"Total Test Cases\", \"Not Covered Test Count\")", + "\tfor _, suite := range testSuites {", + "\t\ttsCoverage := qeCoverage.CoverageByTestSuite[suite]", + "\t\tfmt.Printf(\"%-30s\\t%.0f%%\\t%30d\\t%10d\\n\", suite, tsCoverage.Coverage, tsCoverage.TestCases, tsCoverage.TestCases-tsCoverage.TestCasesWithQe)", + "\t}", + "", + "\tfmt.Println()", + "}" + ] + } + ], + "globals": [ + { + "name": "qeCoverageReportCmd", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/qe_coverage/qe_coverage.go:57" + } + ], + "consts": [ + { + "name": "multiplier", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/generate/qe_coverage/qe_coverage.go:14" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "info", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite-claim/pkg/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "github.com/spf13/cobra", + "golang.org/x/term", + "os", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates the info subcommand with a required test-label flag\n\nThe function configures an information command for the CLI by adding\npersistent string and boolean flags that filter and display test case data.\nIt marks the test-label flag as mandatory, printing an error to standard\nerror if this fails, and then returns the configured command object.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/info/info.go:75", + "calls": [ + { + "name": "StringP", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "BoolP", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "MarkPersistentFlagRequired", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite", + "name": "newRootCmd", + "kind": "function", + "source": [ + "func newRootCmd() *cobra.Command {", + "\trootCmd := cobra.Command{", + "\t\tUse: \"certsuite\",", + "\t\tShort: \"A CLI tool for the Red Hat Best Practices Test Suite for Kubernetes.\",", + "\t}", + "", + "\trootCmd.AddCommand(claim.NewCommand())", + "\trootCmd.AddCommand(generate.NewCommand())", + "\trootCmd.AddCommand(check.NewCommand())", + "\trootCmd.AddCommand(run.NewCommand())", + "\trootCmd.AddCommand(info.NewCommand())", + "\trootCmd.AddCommand(version.NewCommand())", + "\trootCmd.AddCommand(upload.NewCommand())", + "", + "\treturn \u0026rootCmd", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tinfoCmd.PersistentFlags().StringP(\"test-label\", \"t\", \"\", \"The test label filter to select the test cases to show information about\")", + "\tinfoCmd.PersistentFlags().BoolP(\"list\", \"l\", false, \"Show only the names of the test cases for a given test label\")", + "\terr := infoCmd.MarkPersistentFlagRequired(\"test-label\")", + "\tif err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not mark persistent flag \\\"test-case\\\" as required, err: %v\", err)", + "\t\treturn nil", + "\t}", + "\treturn infoCmd", + "}" + ] + }, + { + "name": "adjustLineMaxWidth", + "qualifiedName": "adjustLineMaxWidth", + "exported": false, + "signature": "func()()", + "doc": "adjustLineMaxWidth Adjusts the maximum line width for output\n\nThe function checks if standard input is a terminal, then retrieves the\nterminal's width. If the width is smaller than the current maximum plus\npadding, it reduces the maximum line width accordingly to fit the display. No\nvalue is returned; the global variable is updated in place.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/info/info.go:198", + "calls": [ + { + "pkgPath": "golang.org/x/term", + "name": "IsTerminal", + "kind": "function" + }, + { + "pkgPath": "golang.org/x/term", + "name": "GetSize", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "showInfo", + "kind": "function", + "source": [ + "func showInfo(cmd *cobra.Command, _ []string) error {", + "\ttestCaseFlag, _ := cmd.Flags().GetString(\"test-label\")", + "\tlistFlag, _ := cmd.Flags().GetBool(\"list\")", + "", + "\t// Get a list of matching test cases names", + "\ttestIDs, err := getMatchingTestIDs(testCaseFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the matching test case list, err: %v\", err)", + "\t}", + "", + "\t// Print the list and leave if only listing is required", + "\tif listFlag {", + "\t\tprintTestList(testIDs)", + "\t\treturn nil", + "\t}", + "", + "\t// Get a list of test descriptions with detail info per test case", + "\ttestCases := getTestDescriptionsFromTestIDs(testIDs)", + "\tif len(testCases) == 0 {", + "\t\treturn fmt.Errorf(\"no test case found matching name %q\", testCaseFlag)", + "\t}", + "", + "\t// Adjust text box line width", + "\tadjustLineMaxWidth()", + "", + "\t// Print test case info box", + "\tfor i := range testCases {", + "\t\tprintTestCaseInfoBox(\u0026testCases[i])", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func adjustLineMaxWidth() {", + "\tif term.IsTerminal(0) {", + "\t\twidth, _, err := term.GetSize(0)", + "\t\tif err != nil {", + "\t\t\treturn", + "\t\t}", + "\t\tif width \u003c lineMaxWidth+linePadding {", + "\t\t\tlineMaxWidth = width - linePadding", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "getMatchingTestIDs", + "qualifiedName": "getMatchingTestIDs", + "exported": false, + "signature": "func(string)([]string, error)", + "doc": "getMatchingTestIDs retrieves test case identifiers that match a label expression\n\nThe function initializes a label evaluator with the provided expression,\nloads all internal check definitions, then filters those checks to return\nonly IDs whose labels satisfy the evaluator. It returns a slice of matching\nIDs or an error if initialization or filtering fails.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/info/info.go:158", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "InitLabelsExprEvaluator", + "kind": "function", + "source": [ + "func InitLabelsExprEvaluator(labelsFilter string) error {", + "\t// Expand the abstract \"all\" label into actual existing labels", + "\tif labelsFilter == \"all\" {", + "\t\tallTags := []string{identifiers.TagCommon, identifiers.TagExtended,", + "\t\t\tidentifiers.TagFarEdge, identifiers.TagTelco}", + "\t\tlabelsFilter = strings.Join(allTags, \",\")", + "\t}", + "", + "\teval, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not create a label evaluator, err: %v\", err)", + "\t}", + "", + "\tlabelsExprEvaluator = eval", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadInternalChecksDB", + "kind": "function", + "source": [ + "func LoadInternalChecksDB() {", + "\taccesscontrol.LoadChecks()", + "\tcertification.LoadChecks()", + "\tlifecycle.LoadChecks()", + "\tmanageability.LoadChecks()", + "\tnetworking.LoadChecks()", + "\tobservability.LoadChecks()", + "\tperformance.LoadChecks()", + "\tplatform.LoadChecks()", + "\toperator.LoadChecks()", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "FilterCheckIDs", + "kind": "function", + "source": [ + "func FilterCheckIDs() ([]string, error) {", + "\tfilteredCheckIDs := []string{}", + "\tfor _, group := range dbByGroup {", + "\t\tfor _, check := range group.checks {", + "\t\t\tif labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\t\tfilteredCheckIDs = append(filteredCheckIDs, check.ID)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn filteredCheckIDs, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "showInfo", + "kind": "function", + "source": [ + "func showInfo(cmd *cobra.Command, _ []string) error {", + "\ttestCaseFlag, _ := cmd.Flags().GetString(\"test-label\")", + "\tlistFlag, _ := cmd.Flags().GetBool(\"list\")", + "", + "\t// Get a list of matching test cases names", + "\ttestIDs, err := getMatchingTestIDs(testCaseFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the matching test case list, err: %v\", err)", + "\t}", + "", + "\t// Print the list and leave if only listing is required", + "\tif listFlag {", + "\t\tprintTestList(testIDs)", + "\t\treturn nil", + "\t}", + "", + "\t// Get a list of test descriptions with detail info per test case", + "\ttestCases := getTestDescriptionsFromTestIDs(testIDs)", + "\tif len(testCases) == 0 {", + "\t\treturn fmt.Errorf(\"no test case found matching name %q\", testCaseFlag)", + "\t}", + "", + "\t// Adjust text box line width", + "\tadjustLineMaxWidth()", + "", + "\t// Print test case info box", + "\tfor i := range testCases {", + "\t\tprintTestCaseInfoBox(\u0026testCases[i])", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getMatchingTestIDs(labelExpr string) ([]string, error) {", + "\tif err := checksdb.InitLabelsExprEvaluator(labelExpr); err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to initialize a test case label evaluator, err: %v\", err)", + "\t}", + "\tcertsuite.LoadInternalChecksDB()", + "\ttestIDs, err := checksdb.FilterCheckIDs()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not list test cases, err: %v\", err)", + "\t}", + "", + "\treturn testIDs, nil", + "}" + ] + }, + { + "name": "getTestDescriptionsFromTestIDs", + "qualifiedName": "getTestDescriptionsFromTestIDs", + "exported": false, + "signature": "func([]string)([]claim.TestCaseDescription)", + "doc": "getTestDescriptionsFromTestIDs Retrieves test case descriptions for given IDs\n\nThe function receives a slice of test ID strings, iterates over each ID, and\nsearches a catalog map for matching entries by comparing the identifier\nfield. When a match is found, the corresponding test case description is\nappended to a result slice. After processing all input IDs, it returns the\nslice containing all matched descriptions, which may be empty if no IDs were\nfound.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/info/info.go:179", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "showInfo", + "kind": "function", + "source": [ + "func showInfo(cmd *cobra.Command, _ []string) error {", + "\ttestCaseFlag, _ := cmd.Flags().GetString(\"test-label\")", + "\tlistFlag, _ := cmd.Flags().GetBool(\"list\")", + "", + "\t// Get a list of matching test cases names", + "\ttestIDs, err := getMatchingTestIDs(testCaseFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the matching test case list, err: %v\", err)", + "\t}", + "", + "\t// Print the list and leave if only listing is required", + "\tif listFlag {", + "\t\tprintTestList(testIDs)", + "\t\treturn nil", + "\t}", + "", + "\t// Get a list of test descriptions with detail info per test case", + "\ttestCases := getTestDescriptionsFromTestIDs(testIDs)", + "\tif len(testCases) == 0 {", + "\t\treturn fmt.Errorf(\"no test case found matching name %q\", testCaseFlag)", + "\t}", + "", + "\t// Adjust text box line width", + "\tadjustLineMaxWidth()", + "", + "\t// Print test case info box", + "\tfor i := range testCases {", + "\t\tprintTestCaseInfoBox(\u0026testCases[i])", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getTestDescriptionsFromTestIDs(testIDs []string) []claim.TestCaseDescription {", + "\tvar testCases []claim.TestCaseDescription", + "\tfor _, test := range testIDs {", + "\t\tfor id := range identifiers.Catalog {", + "\t\t\tif id.Id == test {", + "\t\t\t\ttestCases = append(testCases, identifiers.Catalog[id])", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn testCases", + "}" + ] + }, + { + "name": "printTestCaseInfoBox", + "qualifiedName": "printTestCaseInfoBox", + "exported": false, + "signature": "func(*claim.TestCaseDescription)()", + "doc": "printTestCaseInfoBox Displays a formatted information box for a test case\n\nThe function builds a bordered text block that shows the test case ID,\ndescription, remediation steps, exceptions, and best‑practice references.\nIt uses helper functions to center or left‑align lines, color headers, and\nwrap long paragraphs to fit within the terminal width. Each section is\nseparated by horizontal borders made of dashes.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/info/info.go:93", + "calls": [ + { + "pkgPath": "strings", + "name": "Repeat", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineColor", + "kind": "function", + "source": [ + "func LineColor(s, color string) string {", + "\treturn color + s + Reset", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineAlignCenter", + "kind": "function", + "source": [ + "func LineAlignCenter(s string, w int) string {", + "\treturn fmt.Sprintf(\"%[1]*s\", -w, fmt.Sprintf(\"%[1]*s\", (w+len(s))/2, s)) //nolint:mnd // magic number", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Repeat", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineColor", + "kind": "function", + "source": [ + "func LineColor(s, color string) string {", + "\treturn color + s + Reset", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineAlignCenter", + "kind": "function", + "source": [ + "func LineAlignCenter(s string, w int) string {", + "\treturn fmt.Sprintf(\"%[1]*s\", -w, fmt.Sprintf(\"%[1]*s\", (w+len(s))/2, s)) //nolint:mnd // magic number", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "WrapLines", + "kind": "function", + "source": [ + "func WrapLines(text string, maxWidth int) []string {", + "\tlines := strings.Split(text, \"\\n\")", + "\twrappedLines := make([]string, 0, len(lines))", + "\tfor _, line := range lines {", + "\t\tif len(line) \u003c= maxWidth {", + "\t\t\twrappedLines = append(wrappedLines, line)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Break lines longer than maxWidth", + "\t\twords := strings.Fields(line)", + "\t\tcurrentLine := words[0]", + "\t\tfor _, word := range words[1:] {", + "\t\t\tif len(currentLine)+len(word)+1 \u003c= maxWidth {", + "\t\t\t\tcurrentLine += \" \" + word", + "\t\t\t} else {", + "\t\t\t\twrappedLines = append(wrappedLines, currentLine)", + "\t\t\t\tcurrentLine = word", + "\t\t\t}", + "\t\t}", + "", + "\t\twrappedLines = append(wrappedLines, currentLine)", + "\t}", + "", + "\treturn wrappedLines", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineAlignLeft", + "kind": "function", + "source": [ + "func LineAlignLeft(s string, w int) string {", + "\treturn fmt.Sprintf(\"%[1]*s\", -w, s)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineColor", + "kind": "function", + "source": [ + "func LineColor(s, color string) string {", + "\treturn color + s + Reset", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineAlignCenter", + "kind": "function", + "source": [ + "func LineAlignCenter(s string, w int) string {", + "\treturn fmt.Sprintf(\"%[1]*s\", -w, fmt.Sprintf(\"%[1]*s\", (w+len(s))/2, s)) //nolint:mnd // magic number", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "WrapLines", + "kind": "function", + "source": [ + "func WrapLines(text string, maxWidth int) []string {", + "\tlines := strings.Split(text, \"\\n\")", + "\twrappedLines := make([]string, 0, len(lines))", + "\tfor _, line := range lines {", + "\t\tif len(line) \u003c= maxWidth {", + "\t\t\twrappedLines = append(wrappedLines, line)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Break lines longer than maxWidth", + "\t\twords := strings.Fields(line)", + "\t\tcurrentLine := words[0]", + "\t\tfor _, word := range words[1:] {", + "\t\t\tif len(currentLine)+len(word)+1 \u003c= maxWidth {", + "\t\t\t\tcurrentLine += \" \" + word", + "\t\t\t} else {", + "\t\t\t\twrappedLines = append(wrappedLines, currentLine)", + "\t\t\t\tcurrentLine = word", + "\t\t\t}", + "\t\t}", + "", + "\t\twrappedLines = append(wrappedLines, currentLine)", + "\t}", + "", + "\treturn wrappedLines", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineAlignLeft", + "kind": "function", + "source": [ + "func LineAlignLeft(s string, w int) string {", + "\treturn fmt.Sprintf(\"%[1]*s\", -w, s)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineColor", + "kind": "function", + "source": [ + "func LineColor(s, color string) string {", + "\treturn color + s + Reset", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineAlignCenter", + "kind": "function", + "source": [ + "func LineAlignCenter(s string, w int) string {", + "\treturn fmt.Sprintf(\"%[1]*s\", -w, fmt.Sprintf(\"%[1]*s\", (w+len(s))/2, s)) //nolint:mnd // magic number", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "WrapLines", + "kind": "function", + "source": [ + "func WrapLines(text string, maxWidth int) []string {", + "\tlines := strings.Split(text, \"\\n\")", + "\twrappedLines := make([]string, 0, len(lines))", + "\tfor _, line := range lines {", + "\t\tif len(line) \u003c= maxWidth {", + "\t\t\twrappedLines = append(wrappedLines, line)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Break lines longer than maxWidth", + "\t\twords := strings.Fields(line)", + "\t\tcurrentLine := words[0]", + "\t\tfor _, word := range words[1:] {", + "\t\t\tif len(currentLine)+len(word)+1 \u003c= maxWidth {", + "\t\t\t\tcurrentLine += \" \" + word", + "\t\t\t} else {", + "\t\t\t\twrappedLines = append(wrappedLines, currentLine)", + "\t\t\t\tcurrentLine = word", + "\t\t\t}", + "\t\t}", + "", + "\t\twrappedLines = append(wrappedLines, currentLine)", + "\t}", + "", + "\treturn wrappedLines", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineAlignLeft", + "kind": "function", + "source": [ + "func LineAlignLeft(s string, w int) string {", + "\treturn fmt.Sprintf(\"%[1]*s\", -w, s)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineColor", + "kind": "function", + "source": [ + "func LineColor(s, color string) string {", + "\treturn color + s + Reset", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineAlignCenter", + "kind": "function", + "source": [ + "func LineAlignCenter(s string, w int) string {", + "\treturn fmt.Sprintf(\"%[1]*s\", -w, fmt.Sprintf(\"%[1]*s\", (w+len(s))/2, s)) //nolint:mnd // magic number", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "WrapLines", + "kind": "function", + "source": [ + "func WrapLines(text string, maxWidth int) []string {", + "\tlines := strings.Split(text, \"\\n\")", + "\twrappedLines := make([]string, 0, len(lines))", + "\tfor _, line := range lines {", + "\t\tif len(line) \u003c= maxWidth {", + "\t\t\twrappedLines = append(wrappedLines, line)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Break lines longer than maxWidth", + "\t\twords := strings.Fields(line)", + "\t\tcurrentLine := words[0]", + "\t\tfor _, word := range words[1:] {", + "\t\t\tif len(currentLine)+len(word)+1 \u003c= maxWidth {", + "\t\t\t\tcurrentLine += \" \" + word", + "\t\t\t} else {", + "\t\t\t\twrappedLines = append(wrappedLines, currentLine)", + "\t\t\t\tcurrentLine = word", + "\t\t\t}", + "\t\t}", + "", + "\t\twrappedLines = append(wrappedLines, currentLine)", + "\t}", + "", + "\treturn wrappedLines", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "LineAlignLeft", + "kind": "function", + "source": [ + "func LineAlignLeft(s string, w int) string {", + "\treturn fmt.Sprintf(\"%[1]*s\", -w, s)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "showInfo", + "kind": "function", + "source": [ + "func showInfo(cmd *cobra.Command, _ []string) error {", + "\ttestCaseFlag, _ := cmd.Flags().GetString(\"test-label\")", + "\tlistFlag, _ := cmd.Flags().GetBool(\"list\")", + "", + "\t// Get a list of matching test cases names", + "\ttestIDs, err := getMatchingTestIDs(testCaseFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the matching test case list, err: %v\", err)", + "\t}", + "", + "\t// Print the list and leave if only listing is required", + "\tif listFlag {", + "\t\tprintTestList(testIDs)", + "\t\treturn nil", + "\t}", + "", + "\t// Get a list of test descriptions with detail info per test case", + "\ttestCases := getTestDescriptionsFromTestIDs(testIDs)", + "\tif len(testCases) == 0 {", + "\t\treturn fmt.Errorf(\"no test case found matching name %q\", testCaseFlag)", + "\t}", + "", + "\t// Adjust text box line width", + "\tadjustLineMaxWidth()", + "", + "\t// Print test case info box", + "\tfor i := range testCases {", + "\t\tprintTestCaseInfoBox(\u0026testCases[i])", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func printTestCaseInfoBox(testCase *claim.TestCaseDescription) {", + "\t// Test case identifier", + "\tborder := strings.Repeat(\"-\", lineMaxWidth+linePadding)", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(testCase.Identifier.Id, lineMaxWidth), cli.Cyan))", + "", + "\t// Description", + "\tborder = strings.Repeat(\"-\", lineMaxWidth+linePadding)", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"DESCRIPTION\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.Description, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Remediation", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"REMEDIATION\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.Remediation, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Exceptions", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"EXCEPTIONS\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.ExceptionProcess, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Best Practices reference", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"BEST PRACTICES REFERENCE\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.BestPracticeReference, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "\tfmt.Println(border)", + "\tfmt.Printf(\"\\n\\n\")", + "}" + ] + }, + { + "name": "printTestList", + "qualifiedName": "printTestList", + "exported": false, + "signature": "func([]string)()", + "doc": "printTestList Displays a formatted list of test case identifiers\n\nThe function receives a slice of strings representing test IDs, then prints a\nheader, each ID within a bordered box, and a footer to visually separate the\nlist. It uses fixed-width formatting so that all entries align consistently\nin the terminal output. No value is returned; the output is directed to\nstandard output via fmt functions.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/info/info.go:142", + "calls": [ + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "showInfo", + "kind": "function", + "source": [ + "func showInfo(cmd *cobra.Command, _ []string) error {", + "\ttestCaseFlag, _ := cmd.Flags().GetString(\"test-label\")", + "\tlistFlag, _ := cmd.Flags().GetBool(\"list\")", + "", + "\t// Get a list of matching test cases names", + "\ttestIDs, err := getMatchingTestIDs(testCaseFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the matching test case list, err: %v\", err)", + "\t}", + "", + "\t// Print the list and leave if only listing is required", + "\tif listFlag {", + "\t\tprintTestList(testIDs)", + "\t\treturn nil", + "\t}", + "", + "\t// Get a list of test descriptions with detail info per test case", + "\ttestCases := getTestDescriptionsFromTestIDs(testIDs)", + "\tif len(testCases) == 0 {", + "\t\treturn fmt.Errorf(\"no test case found matching name %q\", testCaseFlag)", + "\t}", + "", + "\t// Adjust text box line width", + "\tadjustLineMaxWidth()", + "", + "\t// Print test case info box", + "\tfor i := range testCases {", + "\t\tprintTestCaseInfoBox(\u0026testCases[i])", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func printTestList(testIDs []string) {", + "\tfmt.Println(\"------------------------------------------------------------\")", + "\tfmt.Println(\"| TEST CASE SELECTION |\")", + "\tfmt.Println(\"------------------------------------------------------------\")", + "\tfor _, testID := range testIDs {", + "\t\tfmt.Printf(\"| %-56s |\\n\", testID)", + "\t}", + "\tfmt.Println(\"------------------------------------------------------------\")", + "}" + ] + }, + { + "name": "showInfo", + "qualifiedName": "showInfo", + "exported": false, + "signature": "func(*cobra.Command, []string)(error)", + "doc": "showInfo Displays detailed information about selected test cases\n\nThe function retrieves a list of test case identifiers based on a label\nexpression, optionally listing them if the --list flag is set. If not\nlisting, it fetches full descriptions for each matching test case and prints\na formatted box containing identifier, description, remediation, exceptions,\nand best practice references. Errors are returned if no matches or retrieval\nfails.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/info/info.go:36", + "calls": [ + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetBool", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "getMatchingTestIDs", + "kind": "function", + "source": [ + "func getMatchingTestIDs(labelExpr string) ([]string, error) {", + "\tif err := checksdb.InitLabelsExprEvaluator(labelExpr); err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to initialize a test case label evaluator, err: %v\", err)", + "\t}", + "\tcertsuite.LoadInternalChecksDB()", + "\ttestIDs, err := checksdb.FilterCheckIDs()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not list test cases, err: %v\", err)", + "\t}", + "", + "\treturn testIDs, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "printTestList", + "kind": "function", + "source": [ + "func printTestList(testIDs []string) {", + "\tfmt.Println(\"------------------------------------------------------------\")", + "\tfmt.Println(\"| TEST CASE SELECTION |\")", + "\tfmt.Println(\"------------------------------------------------------------\")", + "\tfor _, testID := range testIDs {", + "\t\tfmt.Printf(\"| %-56s |\\n\", testID)", + "\t}", + "\tfmt.Println(\"------------------------------------------------------------\")", + "}" + ] + }, + { + "name": "getTestDescriptionsFromTestIDs", + "kind": "function", + "source": [ + "func getTestDescriptionsFromTestIDs(testIDs []string) []claim.TestCaseDescription {", + "\tvar testCases []claim.TestCaseDescription", + "\tfor _, test := range testIDs {", + "\t\tfor id := range identifiers.Catalog {", + "\t\t\tif id.Id == test {", + "\t\t\t\ttestCases = append(testCases, identifiers.Catalog[id])", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn testCases", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "adjustLineMaxWidth", + "kind": "function", + "source": [ + "func adjustLineMaxWidth() {", + "\tif term.IsTerminal(0) {", + "\t\twidth, _, err := term.GetSize(0)", + "\t\tif err != nil {", + "\t\t\treturn", + "\t\t}", + "\t\tif width \u003c lineMaxWidth+linePadding {", + "\t\t\tlineMaxWidth = width - linePadding", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "printTestCaseInfoBox", + "kind": "function", + "source": [ + "func printTestCaseInfoBox(testCase *claim.TestCaseDescription) {", + "\t// Test case identifier", + "\tborder := strings.Repeat(\"-\", lineMaxWidth+linePadding)", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(testCase.Identifier.Id, lineMaxWidth), cli.Cyan))", + "", + "\t// Description", + "\tborder = strings.Repeat(\"-\", lineMaxWidth+linePadding)", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"DESCRIPTION\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.Description, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Remediation", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"REMEDIATION\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.Remediation, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Exceptions", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"EXCEPTIONS\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.ExceptionProcess, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Best Practices reference", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"BEST PRACTICES REFERENCE\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.BestPracticeReference, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "\tfmt.Println(border)", + "\tfmt.Printf(\"\\n\\n\")", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func showInfo(cmd *cobra.Command, _ []string) error {", + "\ttestCaseFlag, _ := cmd.Flags().GetString(\"test-label\")", + "\tlistFlag, _ := cmd.Flags().GetBool(\"list\")", + "", + "\t// Get a list of matching test cases names", + "\ttestIDs, err := getMatchingTestIDs(testCaseFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not get the matching test case list, err: %v\", err)", + "\t}", + "", + "\t// Print the list and leave if only listing is required", + "\tif listFlag {", + "\t\tprintTestList(testIDs)", + "\t\treturn nil", + "\t}", + "", + "\t// Get a list of test descriptions with detail info per test case", + "\ttestCases := getTestDescriptionsFromTestIDs(testIDs)", + "\tif len(testCases) == 0 {", + "\t\treturn fmt.Errorf(\"no test case found matching name %q\", testCaseFlag)", + "\t}", + "", + "\t// Adjust text box line width", + "\tadjustLineMaxWidth()", + "", + "\t// Print test case info box", + "\tfor i := range testCases {", + "\t\tprintTestCaseInfoBox(\u0026testCases[i])", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "globals": [ + { + "name": "infoCmd", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/info/info.go:20" + }, + { + "name": "lineMaxWidth", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/info/info.go:25" + } + ], + "consts": [ + { + "name": "linePadding", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/info/info.go:17" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim", + "name": "claim", + "files": 1, + "imports": [ + "encoding/json", + "fmt", + "github.com/Masterminds/semver/v3", + "github.com/redhat-best-practices-for-k8s/certsuite-claim/pkg/claim", + "os" + ], + "structs": [ + { + "name": "Configurations", + "exported": true, + "doc": "Configurations Holds test configuration data\n\nThis structure stores the overall configuration for a claim test, including\nany custom settings, a list of abnormal events to be monitored, and a\ncollection of operators that should run during the test. Each field is\ndesigned to be marshalled to or from JSON, allowing easy integration with\nexternal tools or configuration files.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:110", + "fields": { + "AbnormalEvents": "[]interface{}", + "Config": "interface{}", + "TestOperators": "[]TestOperator" + }, + "methodNames": null, + "source": [ + "type Configurations struct {", + "\tConfig interface{} `json:\"Config\"`", + "\tAbnormalEvents []interface{} `json:\"AbnormalEvents\"`", + "\tTestOperators []TestOperator `json:\"testOperators\"`", + "}" + ] + }, + { + "name": "Nodes", + "exported": true, + "doc": "Nodes represents information about nodes in a cluster\n\nThis struct holds aggregated data for the nodes, including their hardware\ndetails, network plugin configuration, CSI driver status, and an overall\nsummary of node health or capabilities. Each field is defined as an interface\nto allow flexible JSON unmarshalling from various sources.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:84", + "fields": { + "CniNetworks": "interface{}", + "CsiDriver": "interface{}", + "NodesHwInfo": "interface{}", + "NodesSummary": "interface{}" + }, + "methodNames": null, + "source": [ + "type Nodes struct {", + "\tNodesSummary interface{} `json:\"nodeSummary\"`", + "\tCniNetworks interface{} `json:\"cniPlugins\"`", + "\tNodesHwInfo interface{} `json:\"nodesHwInfo\"`", + "\tCsiDriver interface{} `json:\"csiDriver\"`", + "}" + ] + }, + { + "name": "Schema", + "exported": true, + "doc": "Schema Encapsulates an entire claim record\n\nThe structure holds the top‑level claim object which includes configuration\nsettings, node information, test suite outcomes, and schema versioning data.\nEach field maps directly to a JSON key in the claim file, allowing easy\nserialization and deserialization of the claim contents.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:122", + "fields": { + "Claim": "struct{Configurations; Nodes Nodes; Results TestSuiteResults; Versions officialClaimScheme.Versions}" + }, + "methodNames": null, + "source": [ + "type Schema struct {", + "\tClaim struct {", + "\t\tConfigurations `json:\"configurations\"`", + "", + "\t\tNodes Nodes `json:\"nodes\"`", + "", + "\t\tResults TestSuiteResults `json:\"results\"`", + "\t\tVersions officialClaimScheme.Versions `json:\"versions\"`", + "\t} `json:\"claim\"`", + "}" + ] + }, + { + "name": "TestCaseID", + "exported": true, + "doc": "TestCaseID represents a unique identifier for a test case\n\nThis struct holds the ID, suite name, and tags of a test case as strings. The\nfields are exported and annotated for JSON serialization with keys \"id\",\n\"suite\", and \"tags\". It is used to track and reference individual test cases\nwithin the claim package.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:38", + "fields": { + "ID": "string", + "Suite": "string", + "Tags": "string" + }, + "methodNames": null, + "source": [ + "type TestCaseID struct {", + "\tID string `json:\"id\"`", + "\tSuite string `json:\"suite\"`", + "\tTags string `json:\"tags\"`", + "}" + ] + }, + { + "name": "TestCaseRawResult", + "exported": true, + "doc": "TestCaseRawResult Represents the outcome of a test case\n\nThis structure holds the name of a test case along with its status, such as\npassed or failed. The fields are tagged for JSON serialization but omitted\nfrom output. It is used to aggregate results before further processing.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:27", + "fields": { + "Name": "string", + "Status": "string" + }, + "methodNames": null, + "source": [ + "type TestCaseRawResult struct {", + "\tName string `json:\"-name\"`", + "\tStatus string `json:\"-status\"`", + "}" + ] + }, + { + "name": "TestCaseResult", + "exported": true, + "doc": "TestCaseResult Stores the outcome of an individual test case\n\nThis structure captures metadata about a single test execution, including its\nidentifier, timing, state, and any failure details. It also holds catalog\ninformation such as best practice references, descriptions, exception\nhandling notes, and remediation steps. The fields are organized to support\nserialization for reporting and analysis of test results.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:51", + "fields": { + "CapturedTestOutput": "string", + "CatalogInfo": "struct{BestPracticeReference string; Description string; ExceptionProcess string; Remediation string}", + "CategoryClassification": "map[string]string", + "CheckDetails": "string", + "Duration": "int", + "EndTime": "string", + "FailureLineContent": "string", + "FailureLocation": "string", + "SkipReason": "string", + "StartTime": "string", + "State": "string", + "TestID": "struct{ID string; Suite string; Tags string}" + }, + "methodNames": null, + "source": [ + "type TestCaseResult struct {", + "\tCapturedTestOutput string `json:\"capturedTestOutput\"`", + "\tCatalogInfo struct {", + "\t\tBestPracticeReference string `json:\"bestPracticeReference\"`", + "\t\tDescription string `json:\"description\"`", + "\t\tExceptionProcess string `json:\"exceptionProcess\"`", + "\t\tRemediation string `json:\"remediation\"`", + "\t} `json:\"catalogInfo\"`", + "\tCategoryClassification map[string]string `json:\"categoryClassification\"`", + "\tDuration int `json:\"duration\"`", + "\tEndTime string `json:\"endTime\"`", + "\tFailureLineContent string `json:\"failureLineContent\"`", + "\tFailureLocation string `json:\"failureLocation\"`", + "\tSkipReason string `json:\"skipReason\"`", + "\tCheckDetails string `json:\"checkDetails\"`", + "\tStartTime string `json:\"startTime\"`", + "\tState string `json:\"state\"`", + "\tTestID struct {", + "\t\tID string `json:\"id\"`", + "\t\tSuite string `json:\"suite\"`", + "\t\tTags string `json:\"tags\"`", + "\t} `json:\"testID\"`", + "}" + ] + }, + { + "name": "TestOperator", + "exported": true, + "doc": "TestOperator Describes a Kubernetes operator to be tested\n\nThis struct holds the basic identifying information for an operator,\nincluding its name, the namespace it runs in, and its version string. It is\nused by testing utilities to reference specific operator deployments during\nvalidation or cleanup operations.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:97", + "fields": { + "Name": "string", + "Namespace": "string", + "Version": "string" + }, + "methodNames": null, + "source": [ + "type TestOperator struct {", + "\tName string `json:\"name\"`", + "\tNamespace string `json:\"namespace\"`", + "\tVersion string `json:\"version\"`", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "CheckVersion", + "qualifiedName": "CheckVersion", + "exported": true, + "signature": "func(string)(error)", + "doc": "CheckVersion Validates the claim file format version against a supported version\n\nThe function parses the supplied version string into a semantic version\nobject, then compares it to the predefined supported claim format version. If\nparsing fails or if the two versions do not match exactly, an error is\nreturned describing the issue. When the versions are equal, the function\nreturns nil indicating success.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:140", + "calls": [ + { + "name": "NewVersion", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "NewVersion", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Compare", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/csv", + "name": "dumpCsv", + "kind": "function", + "source": [ + "func dumpCsv(_ *cobra.Command, _ []string) error {", + "\t// set log output to stderr", + "\tlog.SetOutput(os.Stderr)", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// loads the mapping between CNF name and type", + "\tCNFTypeMap, err := loadCNFTypeMap(CNFListFilePathFlag)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to load CNF type map (%s): %v\", CNFListFilePathFlag, err)", + "\t\treturn nil", + "\t}", + "", + "\t// builds a catalog map indexed by test ID", + "\tcatalogMap := buildCatalogByID()", + "", + "\t// get CNF type", + "\tcnfType := CNFTypeMap[CNFNameFlag]", + "", + "\t// builds CSV file", + "\tresultsCsv := buildCSV(claimScheme, cnfType, catalogMap)", + "", + "\t// initializes CSV writer", + "\twriter := csv.NewWriter(os.Stdout)", + "", + "\t// writes all CSV records", + "\terr = writer.WriteAll(resultsCsv)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to write results CSV to screen, err: %s\", err)", + "\t\treturn nil", + "\t}", + "\t// flushes buffer to screen", + "\twriter.Flush()", + "\t// Check for any writing errors", + "\tif err := writer.Error(); err != nil {", + "\t\tpanic(err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures", + "name": "showFailures", + "kind": "function", + "source": [ + "func showFailures(_ *cobra.Command, _ []string) error {", + "\toutputFormat, err := parseOutputFormatFlag()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Order test case results by test suite, using a helper map.", + "\tresultsByTestSuite := map[string][]*claim.TestCaseResult{}", + "\tfor id := range claimScheme.Claim.Results {", + "\t\ttcResult := claimScheme.Claim.Results[id]", + "\t\tresultsByTestSuite[tcResult.TestID.Suite] = append(resultsByTestSuite[tcResult.TestID.Suite], \u0026tcResult)", + "\t}", + "", + "\ttargetTestSuites := parseTargetTestSuitesFlag()", + "\t// From the target test suites, get their failed test cases and put them in", + "\t// our custom types.", + "\ttestSuites := getFailedTestCasesByTestSuite(resultsByTestSuite, targetTestSuites)", + "", + "\tswitch outputFormat {", + "\tcase outputFormatJSON:", + "\t\tprintFailuresJSON(testSuites)", + "\tdefault:", + "\t\tprintFailuresText(testSuites)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CheckVersion(version string) error {", + "\tclaimSemVersion, err := semver.NewVersion(version)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"claim file version %q is not valid: %v\", version, err)", + "\t}", + "", + "\tsupportedSemVersion, err := semver.NewVersion(supportedClaimFormatVersion)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"supported claim file version v%v is not valid: v%v\", supportedClaimFormatVersion, err)", + "\t}", + "", + "\tif claimSemVersion.Compare(supportedSemVersion) != 0 {", + "\t\treturn fmt.Errorf(\"claim format version v%v is not supported. Supported version is v%v\",", + "\t\t\tclaimSemVersion, supportedSemVersion)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "Parse", + "qualifiedName": "Parse", + "exported": true, + "signature": "func(string)(*Schema, error)", + "doc": "Parse Parses a JSON claim file into a structured schema\n\nThe function reads the entire contents of the specified file path, handling\nany read errors with an informative message. It then unmarshals the JSON data\ninto a Schema object, returning detailed errors if parsing fails. On success\nit returns a pointer to the populated Schema and a nil error.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:165", + "calls": [ + { + "pkgPath": "os", + "name": "ReadFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/csv", + "name": "dumpCsv", + "kind": "function", + "source": [ + "func dumpCsv(_ *cobra.Command, _ []string) error {", + "\t// set log output to stderr", + "\tlog.SetOutput(os.Stderr)", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// loads the mapping between CNF name and type", + "\tCNFTypeMap, err := loadCNFTypeMap(CNFListFilePathFlag)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to load CNF type map (%s): %v\", CNFListFilePathFlag, err)", + "\t\treturn nil", + "\t}", + "", + "\t// builds a catalog map indexed by test ID", + "\tcatalogMap := buildCatalogByID()", + "", + "\t// get CNF type", + "\tcnfType := CNFTypeMap[CNFNameFlag]", + "", + "\t// builds CSV file", + "\tresultsCsv := buildCSV(claimScheme, cnfType, catalogMap)", + "", + "\t// initializes CSV writer", + "\twriter := csv.NewWriter(os.Stdout)", + "", + "\t// writes all CSV records", + "\terr = writer.WriteAll(resultsCsv)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to write results CSV to screen, err: %s\", err)", + "\t\treturn nil", + "\t}", + "\t// flushes buffer to screen", + "\twriter.Flush()", + "\t// Check for any writing errors", + "\tif err := writer.Error(); err != nil {", + "\t\tpanic(err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures", + "name": "showFailures", + "kind": "function", + "source": [ + "func showFailures(_ *cobra.Command, _ []string) error {", + "\toutputFormat, err := parseOutputFormatFlag()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Parse the claim file into the claim scheme.", + "\tclaimScheme, err := claim.Parse(claimFilePathFlag)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to parse claim file %s: %v\", claimFilePathFlag, err)", + "\t}", + "", + "\t// Check claim format version", + "\terr = claim.CheckVersion(claimScheme.Claim.Versions.ClaimFormat)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Order test case results by test suite, using a helper map.", + "\tresultsByTestSuite := map[string][]*claim.TestCaseResult{}", + "\tfor id := range claimScheme.Claim.Results {", + "\t\ttcResult := claimScheme.Claim.Results[id]", + "\t\tresultsByTestSuite[tcResult.TestID.Suite] = append(resultsByTestSuite[tcResult.TestID.Suite], \u0026tcResult)", + "\t}", + "", + "\ttargetTestSuites := parseTargetTestSuitesFlag()", + "\t// From the target test suites, get their failed test cases and put them in", + "\t// our custom types.", + "\ttestSuites := getFailedTestCasesByTestSuite(resultsByTestSuite, targetTestSuites)", + "", + "\tswitch outputFormat {", + "\tcase outputFormatJSON:", + "\t\tprintFailuresJSON(testSuites)", + "\tdefault:", + "\t\tprintFailuresText(testSuites)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Parse(filePath string) (*Schema, error) {", + "\tfileBytes, err := os.ReadFile(filePath)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failure reading file: %v\", err)", + "\t}", + "", + "\tclaimFile := Schema{}", + "\terr = json.Unmarshal(fileBytes, \u0026claimFile)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to unmarshal file: %v\", err)", + "\t}", + "", + "\treturn \u0026claimFile, nil", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "TestCaseResultFailed", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:19" + }, + { + "name": "TestCaseResultPassed", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:17" + }, + { + "name": "TestCaseResultSkipped", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:18" + }, + { + "name": "supportedClaimFormatVersion", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/pkg/claim/claim.go:13" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run", + "name": "run", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "github.com/spf13/cobra", + "io/fs", + "os", + "time" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates the run command with all persistent flags\n\nThis function builds a cobra.Command that configures numerous persistent\noptions for executing the test suite, such as output location, timeout,\nconfiguration files, kubeconfig, server mode, logging, data collection, and\nintegration with external services. It registers each flag with default\nvalues and help text, then returns the fully configured command instance.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/run/run.go:33", + "calls": [ + { + "name": "StringP", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "StringP", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "StringP", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "StringP", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "Bool", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "Bool", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "Bool", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "Bool", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "Bool", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "Bool", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "Bool", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "Bool", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "Bool", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "PersistentFlags", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite", + "name": "newRootCmd", + "kind": "function", + "source": [ + "func newRootCmd() *cobra.Command {", + "\trootCmd := cobra.Command{", + "\t\tUse: \"certsuite\",", + "\t\tShort: \"A CLI tool for the Red Hat Best Practices Test Suite for Kubernetes.\",", + "\t}", + "", + "\trootCmd.AddCommand(claim.NewCommand())", + "\trootCmd.AddCommand(generate.NewCommand())", + "\trootCmd.AddCommand(check.NewCommand())", + "\trootCmd.AddCommand(run.NewCommand())", + "\trootCmd.AddCommand(info.NewCommand())", + "\trootCmd.AddCommand(version.NewCommand())", + "\trootCmd.AddCommand(upload.NewCommand())", + "", + "\treturn \u0026rootCmd", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\trunCmd.PersistentFlags().StringP(\"output-dir\", \"o\", \"results\", \"The directory where the output artifacts will be placed\")", + "\trunCmd.PersistentFlags().StringP(\"label-filter\", \"l\", \"none\", \"Label expression to filter test cases (e.g. --label-filter 'access-control \u0026\u0026 !access-control-sys-admin-capability')\")", + "\trunCmd.PersistentFlags().String(\"timeout\", timeoutFlagDefaultvalue.String(), \"Time allowed for the test suite execution to complete (e.g. --timeout 30m or -timeout 1h30m)\")", + "\trunCmd.PersistentFlags().StringP(\"config-file\", \"c\", \"config/certsuite_config.yml\", \"The certsuite configuration file\")", + "\trunCmd.PersistentFlags().StringP(\"kubeconfig\", \"k\", \"\", \"The target cluster's Kubeconfig file\")", + "\trunCmd.PersistentFlags().Bool(\"server-mode\", false, \"Run the certsuite in web server mode\")", + "\trunCmd.PersistentFlags().Bool(\"omit-artifacts-zip-file\", false, \"Prevents the creation of a zip file with the result artifacts\")", + "\trunCmd.PersistentFlags().String(\"log-level\", \"debug\", \"Sets the log level\")", + "\trunCmd.PersistentFlags().String(\"offline-db\", \"\", \"Set the location of an offline DB to check the certification status of for container images, operators and helm charts\")", + "\trunCmd.PersistentFlags().String(\"preflight-dockerconfig\", \"\", \"Set the dockerconfig file to be used by the Preflight test suite\")", + "\trunCmd.PersistentFlags().Bool(\"intrusive\", true, \"Run intrusive tests that may disrupt the test environment\")", + "\trunCmd.PersistentFlags().Bool(\"allow-preflight-insecure\", false, \"Allow insecure connections in the Preflight test suite\")", + "\trunCmd.PersistentFlags().Bool(\"include-web-files\", false, \"Save web files in the configured output folder\")", + "\trunCmd.PersistentFlags().Bool(\"enable-data-collection\", false, \"Allow sending test results to an external data collector\")", + "\trunCmd.PersistentFlags().Bool(\"create-xml-junit-file\", false, \"Create a JUnit file with the test results\")", + "\trunCmd.PersistentFlags().String(\"certsuite-probe-image\", \"quay.io/redhat-best-practices-for-k8s/certsuite-probe:v0.0.25\", \"Certsuite probe image\")", + "\trunCmd.PersistentFlags().String(\"daemonset-cpu-req\", \"100m\", \"CPU request for the probe daemonset container\")", + "\trunCmd.PersistentFlags().String(\"daemonset-cpu-lim\", \"100m\", \"CPU limit for the probe daemonset container\")", + "\trunCmd.PersistentFlags().String(\"daemonset-mem-req\", \"100M\", \"Memory request for the probe daemonset container\")", + "\trunCmd.PersistentFlags().String(\"daemonset-mem-lim\", \"100M\", \"Memory limit for the probe daemonset container\")", + "\trunCmd.PersistentFlags().Bool(\"sanitize-claim\", false, \"Sanitize the claim.json file before sending it to the collector\")", + "\t// Include non-Running pods during autodiscovery when enabled (default false)", + "\trunCmd.PersistentFlags().Bool(\"allow-non-running\", false, \"Include non-Running pods during autodiscovery phase\")", + "\trunCmd.PersistentFlags().String(\"connect-api-key\", \"\", \"API Key for Red Hat Connect portal\")", + "\trunCmd.PersistentFlags().String(\"connect-project-id\", \"\", \"Project ID for Red Hat Connect portal\")", + "\trunCmd.PersistentFlags().String(\"connect-api-base-url\", \"\", \"Base URL for Red Hat Connect API\")", + "\trunCmd.PersistentFlags().String(\"connect-api-proxy-url\", \"\", \"Proxy URL for Red Hat Connect API\")", + "\trunCmd.PersistentFlags().String(\"connect-api-proxy-port\", \"\", \"Proxy port for Red Hat Connect API\")", + "", + "\treturn runCmd", + "}" + ] + }, + { + "name": "initTestParamsFromFlags", + "qualifiedName": "initTestParamsFromFlags", + "exported": false, + "signature": "func(*cobra.Command)(error)", + "doc": "initTestParamsFromFlags initializes test configuration from command line flags\n\nThis function reads a variety of flags provided to the CLI command and stores\ntheir values in a shared test parameters structure used throughout the\napplication. It ensures that the output directory exists, creating it if\nnecessary, and parses a timeout value with a default fallback. If any\nfilesystem or parsing errors occur, an error is returned for the caller to\nhandle.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/run/run.go:74", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetBool", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetBool", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetBool", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetBool", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetBool", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetBool", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetBool", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetBool", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetBool", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "GetString", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Stat", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "IsNotExist", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "MkdirAll", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "ParseDuration", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run", + "name": "runTestSuite", + "kind": "function", + "source": [ + "func runTestSuite(cmd *cobra.Command, _ []string) error {", + "\terr := initTestParamsFromFlags(cmd)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to initialize the test parameters, err: %v\", err)", + "\t}", + "", + "\ttestParams := configuration.GetTestParameters()", + "\tif testParams.ServerMode {", + "\t\tlog.Info(\"Running Certification Suite in web server mode\")", + "\t\twebserver.StartServer(testParams.OutputDir)", + "\t} else {", + "\t\tcertsuite.Startup()", + "\t\tdefer certsuite.Shutdown()", + "\t\tlog.Info(\"Running Certification Suite in stand-alone mode\")", + "\t\terr := certsuite.Run(testParams.LabelsFilter, testParams.OutputDir)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to run Certification Suite: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func initTestParamsFromFlags(cmd *cobra.Command) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Fetch test params from flags", + "\ttestParams.OutputDir, _ = cmd.Flags().GetString(\"output-dir\")", + "\ttestParams.LabelsFilter, _ = cmd.Flags().GetString(\"label-filter\")", + "\ttestParams.ServerMode, _ = cmd.Flags().GetBool(\"server-mode\")", + "\ttestParams.ConfigFile, _ = cmd.Flags().GetString(\"config-file\")", + "\ttestParams.Kubeconfig, _ = cmd.Flags().GetString(\"kubeconfig\")", + "\ttestParams.OmitArtifactsZipFile, _ = cmd.Flags().GetBool(\"omit-artifacts-zip-file\")", + "\ttestParams.LogLevel, _ = cmd.Flags().GetString(\"log-level\")", + "\ttestParams.OfflineDB, _ = cmd.Flags().GetString(\"offline-db\")", + "\ttestParams.PfltDockerconfig, _ = cmd.Flags().GetString(\"preflight-dockerconfig\")", + "\ttestParams.Intrusive, _ = cmd.Flags().GetBool(\"intrusive\")", + "\ttestParams.AllowPreflightInsecure, _ = cmd.Flags().GetBool(\"allow-preflight-insecure\")", + "\ttestParams.IncludeWebFilesInOutputFolder, _ = cmd.Flags().GetBool(\"include-web-files\")", + "\ttestParams.EnableDataCollection, _ = cmd.Flags().GetBool(\"enable-data-collection\")", + "\ttestParams.EnableXMLCreation, _ = cmd.Flags().GetBool(\"create-xml-junit-file\")", + "\ttestParams.CertSuiteProbeImage, _ = cmd.Flags().GetString(\"certsuite-probe-image\")", + "\ttestParams.DaemonsetCPUReq, _ = cmd.Flags().GetString(\"daemonset-cpu-req\")", + "\ttestParams.DaemonsetCPULim, _ = cmd.Flags().GetString(\"daemonset-cpu-lim\")", + "\ttestParams.DaemonsetMemReq, _ = cmd.Flags().GetString(\"daemonset-mem-req\")", + "\ttestParams.DaemonsetMemLim, _ = cmd.Flags().GetString(\"daemonset-mem-lim\")", + "\ttestParams.SanitizeClaim, _ = cmd.Flags().GetBool(\"sanitize-claim\")", + "\ttestParams.AllowNonRunning, _ = cmd.Flags().GetBool(\"allow-non-running\")", + "\ttestParams.ConnectAPIKey, _ = cmd.Flags().GetString(\"connect-api-key\")", + "\ttestParams.ConnectProjectID, _ = cmd.Flags().GetString(\"connect-project-id\")", + "\ttestParams.ConnectAPIBaseURL, _ = cmd.Flags().GetString(\"connect-api-base-url\")", + "\ttestParams.ConnectAPIProxyURL, _ = cmd.Flags().GetString(\"connect-api-proxy-url\")", + "\ttestParams.ConnectAPIProxyPort, _ = cmd.Flags().GetString(\"connect-api-proxy-port\")", + "\ttimeoutStr, _ := cmd.Flags().GetString(\"timeout\")", + "", + "\t// Check if the output directory exists and, if not, create it", + "\tif _, err := os.Stat(testParams.OutputDir); os.IsNotExist(err) {", + "\t\tvar dirPerm fs.FileMode = 0o755 // default permissions for a directory", + "\t\terr := os.MkdirAll(testParams.OutputDir, dirPerm)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not create directory %q, err: %v\", testParams.OutputDir, err)", + "\t\t}", + "\t} else if err != nil {", + "\t\treturn fmt.Errorf(\"could not check directory %q, err: %v\", testParams.OutputDir, err)", + "\t}", + "", + "\t// Process the timeout flag", + "\tconst timeoutDefaultvalue = 24 * time.Hour", + "\ttimeout, err := time.ParseDuration(timeoutStr)", + "\tif err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to parse timeout flag %q, err: %v. Using default timeout value %v\", timeoutStr, err, timeoutDefaultvalue)", + "\t\ttestParams.Timeout = timeoutDefaultvalue", + "\t} else {", + "\t\ttestParams.Timeout = timeout", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "runTestSuite", + "qualifiedName": "runTestSuite", + "exported": false, + "signature": "func(*cobra.Command, []string)(error)", + "doc": "runTestSuite Initializes test parameters and executes the suite in either server or standalone mode\n\nThe function reads command flags to set up test configuration, then checks if\na web‑server mode is requested. In server mode it starts an HTTP listener\nserving results; otherwise it runs the certification suite locally, handling\nstartup, execution, shutdown, and error reporting.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/run/run.go:136", + "calls": [ + { + "name": "initTestParamsFromFlags", + "kind": "function", + "source": [ + "func initTestParamsFromFlags(cmd *cobra.Command) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Fetch test params from flags", + "\ttestParams.OutputDir, _ = cmd.Flags().GetString(\"output-dir\")", + "\ttestParams.LabelsFilter, _ = cmd.Flags().GetString(\"label-filter\")", + "\ttestParams.ServerMode, _ = cmd.Flags().GetBool(\"server-mode\")", + "\ttestParams.ConfigFile, _ = cmd.Flags().GetString(\"config-file\")", + "\ttestParams.Kubeconfig, _ = cmd.Flags().GetString(\"kubeconfig\")", + "\ttestParams.OmitArtifactsZipFile, _ = cmd.Flags().GetBool(\"omit-artifacts-zip-file\")", + "\ttestParams.LogLevel, _ = cmd.Flags().GetString(\"log-level\")", + "\ttestParams.OfflineDB, _ = cmd.Flags().GetString(\"offline-db\")", + "\ttestParams.PfltDockerconfig, _ = cmd.Flags().GetString(\"preflight-dockerconfig\")", + "\ttestParams.Intrusive, _ = cmd.Flags().GetBool(\"intrusive\")", + "\ttestParams.AllowPreflightInsecure, _ = cmd.Flags().GetBool(\"allow-preflight-insecure\")", + "\ttestParams.IncludeWebFilesInOutputFolder, _ = cmd.Flags().GetBool(\"include-web-files\")", + "\ttestParams.EnableDataCollection, _ = cmd.Flags().GetBool(\"enable-data-collection\")", + "\ttestParams.EnableXMLCreation, _ = cmd.Flags().GetBool(\"create-xml-junit-file\")", + "\ttestParams.CertSuiteProbeImage, _ = cmd.Flags().GetString(\"certsuite-probe-image\")", + "\ttestParams.DaemonsetCPUReq, _ = cmd.Flags().GetString(\"daemonset-cpu-req\")", + "\ttestParams.DaemonsetCPULim, _ = cmd.Flags().GetString(\"daemonset-cpu-lim\")", + "\ttestParams.DaemonsetMemReq, _ = cmd.Flags().GetString(\"daemonset-mem-req\")", + "\ttestParams.DaemonsetMemLim, _ = cmd.Flags().GetString(\"daemonset-mem-lim\")", + "\ttestParams.SanitizeClaim, _ = cmd.Flags().GetBool(\"sanitize-claim\")", + "\ttestParams.AllowNonRunning, _ = cmd.Flags().GetBool(\"allow-non-running\")", + "\ttestParams.ConnectAPIKey, _ = cmd.Flags().GetString(\"connect-api-key\")", + "\ttestParams.ConnectProjectID, _ = cmd.Flags().GetString(\"connect-project-id\")", + "\ttestParams.ConnectAPIBaseURL, _ = cmd.Flags().GetString(\"connect-api-base-url\")", + "\ttestParams.ConnectAPIProxyURL, _ = cmd.Flags().GetString(\"connect-api-proxy-url\")", + "\ttestParams.ConnectAPIProxyPort, _ = cmd.Flags().GetString(\"connect-api-proxy-port\")", + "\ttimeoutStr, _ := cmd.Flags().GetString(\"timeout\")", + "", + "\t// Check if the output directory exists and, if not, create it", + "\tif _, err := os.Stat(testParams.OutputDir); os.IsNotExist(err) {", + "\t\tvar dirPerm fs.FileMode = 0o755 // default permissions for a directory", + "\t\terr := os.MkdirAll(testParams.OutputDir, dirPerm)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not create directory %q, err: %v\", testParams.OutputDir, err)", + "\t\t}", + "\t} else if err != nil {", + "\t\treturn fmt.Errorf(\"could not check directory %q, err: %v\", testParams.OutputDir, err)", + "\t}", + "", + "\t// Process the timeout flag", + "\tconst timeoutDefaultvalue = 24 * time.Hour", + "\ttimeout, err := time.ParseDuration(timeoutStr)", + "\tif err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to parse timeout flag %q, err: %v. Using default timeout value %v\", timeoutStr, err, timeoutDefaultvalue)", + "\t\ttestParams.Timeout = timeoutDefaultvalue", + "\t} else {", + "\t\ttestParams.Timeout = timeout", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "StartServer", + "kind": "function", + "source": [ + "func StartServer(outputFolder string) {", + "\tctx := context.TODO()", + "\tserver := \u0026http.Server{", + "\t\tAddr: \":8084\", // Server address", + "\t\tReadTimeout: readTimeoutSeconds * time.Second, // Maximum duration for reading the entire request", + "\t\tBaseContext: func(l net.Listener) context.Context {", + "\t\t\tctx = context.WithValue(ctx, outputFolderCtxKey, outputFolder)", + "\t\t\treturn ctx", + "\t\t},", + "\t}", + "", + "\tinstallReqHandlers()", + "", + "\thttp.HandleFunc(\"/runFunction\", runHandler)", + "", + "\tlog.Info(\"Server is running on :8084...\")", + "\tif err := server.ListenAndServe(); err != nil {", + "\t\tpanic(err)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Startup", + "kind": "function", + "source": [ + "func Startup() {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Create an evaluator to filter test cases with labels", + "\tif err := checksdb.InitLabelsExprEvaluator(testParams.LabelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(testParams.OutputDir, testParams.LogLevel); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\t// Diagnostic functions will run when no labels are provided.", + "\tif testParams.LabelsFilter == noLabelsFilterExpr {", + "\t\tlog.Warn(\"The Best Practices Test Suite will run in diagnostic mode so no test case will be launched\")", + "\t}", + "", + "\t// Set clientsholder singleton with the filenames from the env vars.", + "\t_ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...)", + "\tLoadChecksDB(testParams.LabelsFilter)", + "", + "\tlog.Info(\"Certsuite Version: %v\", versions.GitVersion())", + "\tlog.Info(\"Claim Format Version: %s\", versions.ClaimFormatVersion)", + "\tlog.Info(\"Labels filter: %v\", testParams.LabelsFilter)", + "\tlog.Info(\"Log level: %s\", strings.ToUpper(testParams.LogLevel))", + "", + "\tcli.PrintBanner()", + "", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "\tfmt.Printf(\"Checks filter: %s\\n\", testParams.LabelsFilter)", + "\tfmt.Printf(\"Output folder: %s\\n\", testParams.OutputDir)", + "\tfmt.Printf(\"Log file: %s (level=%s)\\n\", log.LogFileName, testParams.LogLevel)", + "\tfmt.Printf(\"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Shutdown", + "kind": "function", + "source": [ + "func Shutdown() {", + "\terr := log.CloseGlobalLogFile()", + "\tif err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not close the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func runTestSuite(cmd *cobra.Command, _ []string) error {", + "\terr := initTestParamsFromFlags(cmd)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to initialize the test parameters, err: %v\", err)", + "\t}", + "", + "\ttestParams := configuration.GetTestParameters()", + "\tif testParams.ServerMode {", + "\t\tlog.Info(\"Running Certification Suite in web server mode\")", + "\t\twebserver.StartServer(testParams.OutputDir)", + "\t} else {", + "\t\tcertsuite.Startup()", + "\t\tdefer certsuite.Shutdown()", + "\t\tlog.Info(\"Running Certification Suite in stand-alone mode\")", + "\t\terr := certsuite.Run(testParams.LabelsFilter, testParams.OutputDir)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to run Certification Suite: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "globals": [ + { + "name": "runCmd", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/run/run.go:19" + } + ], + "consts": [ + { + "name": "timeoutFlagDefaultvalue", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/run/run.go:16" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload", + "name": "upload", + "files": 1, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "github.com/spf13/cobra" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates the upload command group for the CLI\n\nThis function constructs a cobra.Command that represents the upload feature\nof the tool. It registers subcommands, such as those handling result\nspreadsheets, by adding them to the main upload command. The resulting\ncommand is returned for integration into the root command hierarchy.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/upload.go:21", + "calls": [ + { + "name": "AddCommand", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tuploadResultSpreadSheetCmd.Flags().StringVarP(\u0026resultsFilePath, \"results-file\", \"f\", \"\", \"Required: path to results file\")", + "\tuploadResultSpreadSheetCmd.Flags().StringVarP(\u0026rootFolderURL, \"dest-url\", \"d\", \"\", \"Required: Destination drive folder's URL\")", + "\tuploadResultSpreadSheetCmd.Flags().StringVarP(\u0026ocpVersion, \"version\", \"v\", \"\", \"Optional: OCP Version\")", + "\tuploadResultSpreadSheetCmd.Flags().StringVarP(\u0026credentials, \"credentials\", \"c\", \"credentials.json\", \"Optional: Google credentials file path, default path: credentials.json\")", + "", + "\terr := uploadResultSpreadSheetCmd.MarkFlagRequired(\"results-file\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to mark results file path as required parameter: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\terr = uploadResultSpreadSheetCmd.MarkFlagRequired(\"dest-url\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to mark dest url path as required parameter: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\treturn uploadResultSpreadSheetCmd", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite", + "name": "newRootCmd", + "kind": "function", + "source": [ + "func newRootCmd() *cobra.Command {", + "\trootCmd := cobra.Command{", + "\t\tUse: \"certsuite\",", + "\t\tShort: \"A CLI tool for the Red Hat Best Practices Test Suite for Kubernetes.\",", + "\t}", + "", + "\trootCmd.AddCommand(claim.NewCommand())", + "\trootCmd.AddCommand(generate.NewCommand())", + "\trootCmd.AddCommand(check.NewCommand())", + "\trootCmd.AddCommand(run.NewCommand())", + "\trootCmd.AddCommand(info.NewCommand())", + "\trootCmd.AddCommand(version.NewCommand())", + "\trootCmd.AddCommand(upload.NewCommand())", + "", + "\treturn \u0026rootCmd", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tupload.AddCommand(resultsspreadsheet.NewCommand())", + "", + "\treturn upload", + "}" + ] + } + ], + "globals": [ + { + "name": "upload", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/upload.go:9" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "resultsspreadsheet", + "files": 4, + "imports": [ + "context", + "encoding/csv", + "fmt", + "github.com/spf13/cobra", + "google.golang.org/api/drive/v3", + "google.golang.org/api/option", + "google.golang.org/api/sheets/v4", + "log", + "net/url", + "os", + "strings", + "time" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "CreateSheetsAndDriveServices", + "qualifiedName": "CreateSheetsAndDriveServices", + "exported": true, + "signature": "func(string)(*sheets.Service, *drive.Service, error)", + "doc": "CreateSheetsAndDriveServices Initializes Google Sheets and Drive services\n\nThis function takes a path to credentials and uses it to create authenticated\nclients for both the Sheets and Drive APIs. It returns the two service\ninstances or an error if either creation fails.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:93", + "calls": [ + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "NewService", + "kind": "function" + }, + { + "pkgPath": "google.golang.org/api/option", + "name": "WithCredentialsFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "NewService", + "kind": "function" + }, + { + "pkgPath": "google.golang.org/api/option", + "name": "WithCredentialsFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "generateResultsSpreadSheet", + "kind": "function", + "source": [ + "func generateResultsSpreadSheet() {", + "\tsheetService, driveService, err := CreateSheetsAndDriveServices(credentials)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create services: %v\", err)", + "\t}", + "", + "\trootFolderID, err := extractFolderIDFromURL(rootFolderURL)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"error getting folder ID from URL\")", + "\t}", + "\tmainFolderName := strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results %s\", ocpVersion, time.Now().Format(\"2006-01-02T15:04:05Z07:00\")), \" \")", + "\tmainResultsFolder, err := createDriveFolder(driveService, mainFolderName, rootFolderID)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create main results folder: %v\", err)", + "\t}", + "", + "\tlog.Printf(\"Generating raw results sheet...\")", + "\trawResultsSheet, err := createRawResultsSheet(resultsFilePath)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create raw results sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Raw results sheet has been generated.\")", + "", + "\tlog.Printf(\"Generating conclusion sheet...\")", + "\tconclusionSheet, err := createConclusionsSheet(sheetService, driveService, rawResultsSheet, mainResultsFolder.Id)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create conclusions sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Conclusion sheet has been generated.\")", + "", + "\tspreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results\", ocpVersion), \" \"),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{rawResultsSheet, conclusionSheet},", + "\t}", + "", + "\tspreadsheet, err = sheetService.Spreadsheets.Create(spreadsheet).Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create spreadsheet: %v\", err)", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, mainResultsFolder, spreadsheet); err != nil {", + "\t\tlog.Fatal(err)", + "\t}", + "", + "\tif err = addBasicFilterToSpreadSheet(sheetService, spreadsheet); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tif err = addDescendingSortFilterToSheet(sheetService, spreadsheet, conclusionSheet.Properties.Title, \"Category\"); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tfmt.Printf(\"Results spreadsheet was created successfully: %s\\n\", spreadsheet.SpreadsheetUrl)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CreateSheetsAndDriveServices(credentials string) (sheetService *sheets.Service, driveService *drive.Service, err error) {", + "\tctx := context.TODO()", + "", + "\tsheetSrv, err := sheets.NewService(ctx, option.WithCredentialsFile(credentials))", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"unable to retrieve Sheets service: %v\", err)", + "\t}", + "", + "\tdriveSrv, err := drive.NewService(ctx, option.WithCredentialsFile(credentials))", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"unable to retrieve Drive service: %v\", err)", + "\t}", + "", + "\treturn sheetSrv, driveSrv, nil", + "}" + ] + }, + { + "name": "GetHeaderIndicesByColumnNames", + "qualifiedName": "GetHeaderIndicesByColumnNames", + "exported": true, + "signature": "func([]string, []string)([]int, error)", + "doc": "GetHeaderIndicesByColumnNames Finds header positions for specified column names\n\nThe function scans a slice of header strings to locate the index of each\nrequested column name. It returns an integer slice containing the indices in\nthe same order as the input names or an error if any name is missing from the\nheaders. The returned indices can be used to reference columns when\nmanipulating spreadsheet data.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/sheet_utils.go:45", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "addDescendingSortFilterToSheet", + "kind": "function", + "source": [ + "func addDescendingSortFilterToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName, colName string) error {", + "\tsheetsValues, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, sheetName).Do()", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s values: %v\", sheetName, err)", + "\t}", + "\theaders := GetHeadersFromValueRange(sheetsValues)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{colName})", + "\tif err != nil {", + "\t\treturn nil", + "\t}", + "", + "\tsheetID, err := GetSheetIDByName(spreadsheet, sheetName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s id: %v\", sheetName, err)", + "\t}", + "", + "\trequests := []*sheets.Request{", + "\t\t{", + "\t\t\tSortRange: \u0026sheets.SortRangeRequest{", + "\t\t\t\tRange: \u0026sheets.GridRange{", + "\t\t\t\t\tSheetId: sheetID,", + "\t\t\t\t\tStartRowIndex: 1,", + "\t\t\t\t},", + "\t\t\t\tSortSpecs: []*sheets.SortSpec{", + "\t\t\t\t\t{", + "\t\t\t\t\t\tDimensionIndex: int64(indices[0]),", + "\t\t\t\t\t\tSortOrder: \"DESCENDING\",", + "\t\t\t\t\t},", + "\t\t\t\t},", + "\t\t\t},", + "\t\t},", + "\t}", + "", + "\t_, err = srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, \u0026sheets.BatchUpdateSpreadsheetRequest{", + "\t\tRequests: requests,", + "\t}).Do()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "addFilterByFailedAndMandatoryToSheet", + "kind": "function", + "source": [ + "func addFilterByFailedAndMandatoryToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName string) error {", + "\tsheetsValues, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, sheetName).Do()", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s values: %v\", sheetName, err)", + "\t}", + "\theaders := GetHeadersFromValueRange(sheetsValues)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{\"State\", \"Mandatory/Optional\"})", + "\tif err != nil {", + "\t\treturn nil", + "\t}", + "", + "\tstateColIndex := indices[0]", + "\tisMandatoryColIndex := indices[1]", + "", + "\tsheetID, err := GetSheetIDByName(spreadsheet, sheetName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s id: %v\", sheetName, err)", + "\t}", + "", + "\trequests := []*sheets.Request{", + "\t\t{", + "\t\t\tSetBasicFilter: \u0026sheets.SetBasicFilterRequest{", + "\t\t\t\tFilter: \u0026sheets.BasicFilter{", + "\t\t\t\t\tRange: \u0026sheets.GridRange{SheetId: sheetID},", + "\t\t\t\t\tCriteria: map[string]sheets.FilterCriteria{", + "\t\t\t\t\t\tfmt.Sprint(stateColIndex): {", + "\t\t\t\t\t\t\tCondition: \u0026sheets.BooleanCondition{", + "\t\t\t\t\t\t\t\tType: \"TEXT_EQ\",", + "\t\t\t\t\t\t\t\tValues: []*sheets.ConditionValue{", + "\t\t\t\t\t\t\t\t\t{UserEnteredValue: \"failed\"},", + "\t\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t},", + "\t\t\t\t\t\tfmt.Sprint(isMandatoryColIndex): {", + "\t\t\t\t\t\t\tCondition: \u0026sheets.BooleanCondition{", + "\t\t\t\t\t\t\t\tType: \"TEXT_EQ\",", + "\t\t\t\t\t\t\t\tValues: []*sheets.ConditionValue{", + "\t\t\t\t\t\t\t\t\t{UserEnteredValue: \"Mandatory\"},", + "\t\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t},", + "\t\t\t\t\t},", + "\t\t\t\t},", + "\t\t\t},", + "\t\t},", + "\t}", + "", + "\t_, err = srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, \u0026sheets.BatchUpdateSpreadsheetRequest{", + "\t\tRequests: requests,", + "\t}).Do()", + "\treturn err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "createConclusionsSheet", + "kind": "function", + "source": [ + "func createConclusionsSheet(sheetsService *sheets.Service, driveService *drive.Service, rawResultsSheet *sheets.Sheet, mainResultsFolderID string) (*sheets.Sheet, error) {", + "\tworkloadsFolderName := \"Results Per Workload\"", + "\tworkloadsResultsFolder, err := createDriveFolder(driveService, workloadsFolderName, mainResultsFolderID)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to create workloads results folder: %v\", err)", + "\t}", + "", + "\trawSheetHeaders := GetHeadersFromSheet(rawResultsSheet)", + "\tcolsIndices, err := GetHeaderIndicesByColumnNames(rawSheetHeaders, []string{workloadNameRawResultsCol, workloadTypeRawResultsCol, operatorVersionRawResultsCol})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tworkloadNameColIndex := colsIndices[0]", + "\tworkloadTypeColIndex := colsIndices[1]", + "\toperatorVersionColIndex := colsIndices[2]", + "", + "\t// Initialize sheet with headers", + "\tconclusionsSheetRowsValues := []*sheets.CellData{}", + "\tfor _, colHeader := range conclusionSheetHeaders {", + "\t\theaderCellData := \u0026sheets.CellData{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: \u0026colHeader}}", + "\t\tconclusionsSheetRowsValues = append(conclusionsSheetRowsValues, headerCellData)", + "\t}", + "\tconclusionsSheetRows := []*sheets.RowData{{Values: conclusionsSheetRowsValues}}", + "", + "\t// If rawResultsSheet has now workloads data, return an error", + "\tif len(rawResultsSheet.Data[0].RowData) \u003c= 1 {", + "\t\treturn nil, fmt.Errorf(\"raw results has no workloads data\")", + "\t}", + "", + "\t// Extract unique values from the CNFName column and fill sheet", + "\tuniqueWorkloadNames := make(map[string]bool)", + "\tfor _, rawResultsSheetrow := range rawResultsSheet.Data[0].RowData[1:] {", + "\t\tworkloadName := *rawResultsSheetrow.Values[workloadNameColIndex].UserEnteredValue.StringValue", + "\t\t// if workload has already been added to sheet, skip it", + "\t\tif uniqueWorkloadNames[workloadName] {", + "\t\t\tcontinue", + "\t\t}", + "\t\tuniqueWorkloadNames[workloadName] = true", + "", + "\t\tcurConsclusionRowValues := []*sheets.CellData{}", + "\t\tfor _, colHeader := range conclusionSheetHeaders {", + "\t\t\tcurCellData := \u0026sheets.CellData{UserEnteredValue: \u0026sheets.ExtendedValue{}}", + "", + "\t\t\tswitch colHeader {", + "\t\t\tcase categoryConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = rawResultsSheetrow.Values[workloadTypeColIndex].UserEnteredValue.StringValue", + "", + "\t\t\tcase workloadVersionConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = rawResultsSheetrow.Values[operatorVersionColIndex].UserEnteredValue.StringValue", + "", + "\t\t\tcase ocpVersionConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = stringToPointer(ocpVersion + \" \")", + "", + "\t\t\tcase WorkloadNameConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = \u0026workloadName", + "", + "\t\t\tcase ResultsConclusionsCol:", + "\t\t\t\tworkloadResultsSpreadsheet, err := createSingleWorkloadRawResultsSpreadSheet(sheetsService, driveService, workloadsResultsFolder, rawResultsSheet, workloadName)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\treturn nil, fmt.Errorf(\"error has occurred while creating %s results file: %v\", workloadName, err)", + "\t\t\t\t}", + "", + "\t\t\t\thyperlinkFormula := fmt.Sprintf(\"=HYPERLINK(%q, %q)\", workloadResultsSpreadsheet.SpreadsheetUrl, \"Results\")", + "\t\t\t\tcurCellData.UserEnteredValue.FormulaValue = \u0026hyperlinkFormula", + "", + "\t\t\tdefault:", + "\t\t\t\t// use space for empty values to avoid cells overlapping", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = stringToPointer(\" \")", + "\t\t\t}", + "", + "\t\t\tcurConsclusionRowValues = append(curConsclusionRowValues, curCellData)", + "\t\t}", + "\t\tconclusionsSheetRows = append(conclusionsSheetRows, \u0026sheets.RowData{Values: curConsclusionRowValues})", + "\t}", + "", + "\tconclusionSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: ConclusionSheetName,", + "\t\t\tGridProperties: \u0026sheets.GridProperties{FrozenRowCount: 1},", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: conclusionsSheetRows}},", + "\t}", + "", + "\treturn conclusionSheet, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "createSingleWorkloadRawResultsSheet", + "kind": "function", + "source": [ + "func createSingleWorkloadRawResultsSheet(rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Sheet, error) {", + "\t// Initialize sheet with the two new column headers only.", + "\tfilteredRows := []*sheets.RowData{{Values: []*sheets.CellData{", + "\t\t{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: stringToPointer(conclusionIndividualSingleWorkloadSheetCol)}},", + "\t\t{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: stringToPointer(nextStepAIIfFailSingleWorkloadSheetCol)}},", + "\t}}}", + "", + "\t// Add existing column headers from the rawResultsSheet", + "\tfilteredRows[0].Values = append(filteredRows[0].Values, rawResultsSheet.Data[0].RowData[0].Values...)", + "", + "\theaders := GetHeadersFromSheet(rawResultsSheet)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{\"CNFName\"})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\tworkloadNameIndex := indices[0]", + "", + "\t// add to sheet only rows of given workload name", + "\tfor _, row := range rawResultsSheet.Data[0].RowData[1:] {", + "\t\tif len(row.Values) \u003c= workloadNameIndex {", + "\t\t\treturn nil, fmt.Errorf(\"workload %s not found in raw spreadsheet\", workloadName)", + "\t\t}", + "\t\tcurWorkloadName := *row.Values[workloadNameIndex].UserEnteredValue.StringValue", + "\t\tif curWorkloadName == workloadName {", + "\t\t\t// add empty values in 2 added columns", + "\t\t\tnewRow := \u0026sheets.RowData{", + "\t\t\t\tValues: append([]*sheets.CellData{{}, {}}, row.Values...),", + "\t\t\t}", + "\t\t\tfilteredRows = append(filteredRows, newRow)", + "\t\t}", + "\t}", + "", + "\tworkloadResultsSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: \"results\",", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: filteredRows}},", + "\t}", + "", + "\treturn workloadResultsSheet, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetHeaderIndicesByColumnNames(headers, names []string) ([]int, error) {", + "\tindices := []int{}", + "\tfor _, name := range names {", + "\t\tfound := false", + "\t\tfor i, val := range headers {", + "\t\t\tif name == val {", + "\t\t\t\tfound = true", + "\t\t\t\tindices = append(indices, i)", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !found {", + "\t\t\treturn nil, fmt.Errorf(\"column %s doesn't exist in given headers list\", name)", + "\t\t}", + "\t}", + "\treturn indices, nil", + "}" + ] + }, + { + "name": "GetHeadersFromSheet", + "qualifiedName": "GetHeadersFromSheet", + "exported": true, + "signature": "func(*sheets.Sheet)([]string)", + "doc": "GetHeadersFromSheet Retrieves header names from a spreadsheet sheet\n\nThe function accesses the first row of the provided sheet, extracts each\ncell's string value, and returns them as a slice of strings. It assumes that\nthe sheet contains at least one row with headers. The returned slice\npreserves the order of columns as they appear in the sheet.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/sheet_utils.go:15", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "createConclusionsSheet", + "kind": "function", + "source": [ + "func createConclusionsSheet(sheetsService *sheets.Service, driveService *drive.Service, rawResultsSheet *sheets.Sheet, mainResultsFolderID string) (*sheets.Sheet, error) {", + "\tworkloadsFolderName := \"Results Per Workload\"", + "\tworkloadsResultsFolder, err := createDriveFolder(driveService, workloadsFolderName, mainResultsFolderID)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to create workloads results folder: %v\", err)", + "\t}", + "", + "\trawSheetHeaders := GetHeadersFromSheet(rawResultsSheet)", + "\tcolsIndices, err := GetHeaderIndicesByColumnNames(rawSheetHeaders, []string{workloadNameRawResultsCol, workloadTypeRawResultsCol, operatorVersionRawResultsCol})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tworkloadNameColIndex := colsIndices[0]", + "\tworkloadTypeColIndex := colsIndices[1]", + "\toperatorVersionColIndex := colsIndices[2]", + "", + "\t// Initialize sheet with headers", + "\tconclusionsSheetRowsValues := []*sheets.CellData{}", + "\tfor _, colHeader := range conclusionSheetHeaders {", + "\t\theaderCellData := \u0026sheets.CellData{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: \u0026colHeader}}", + "\t\tconclusionsSheetRowsValues = append(conclusionsSheetRowsValues, headerCellData)", + "\t}", + "\tconclusionsSheetRows := []*sheets.RowData{{Values: conclusionsSheetRowsValues}}", + "", + "\t// If rawResultsSheet has now workloads data, return an error", + "\tif len(rawResultsSheet.Data[0].RowData) \u003c= 1 {", + "\t\treturn nil, fmt.Errorf(\"raw results has no workloads data\")", + "\t}", + "", + "\t// Extract unique values from the CNFName column and fill sheet", + "\tuniqueWorkloadNames := make(map[string]bool)", + "\tfor _, rawResultsSheetrow := range rawResultsSheet.Data[0].RowData[1:] {", + "\t\tworkloadName := *rawResultsSheetrow.Values[workloadNameColIndex].UserEnteredValue.StringValue", + "\t\t// if workload has already been added to sheet, skip it", + "\t\tif uniqueWorkloadNames[workloadName] {", + "\t\t\tcontinue", + "\t\t}", + "\t\tuniqueWorkloadNames[workloadName] = true", + "", + "\t\tcurConsclusionRowValues := []*sheets.CellData{}", + "\t\tfor _, colHeader := range conclusionSheetHeaders {", + "\t\t\tcurCellData := \u0026sheets.CellData{UserEnteredValue: \u0026sheets.ExtendedValue{}}", + "", + "\t\t\tswitch colHeader {", + "\t\t\tcase categoryConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = rawResultsSheetrow.Values[workloadTypeColIndex].UserEnteredValue.StringValue", + "", + "\t\t\tcase workloadVersionConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = rawResultsSheetrow.Values[operatorVersionColIndex].UserEnteredValue.StringValue", + "", + "\t\t\tcase ocpVersionConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = stringToPointer(ocpVersion + \" \")", + "", + "\t\t\tcase WorkloadNameConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = \u0026workloadName", + "", + "\t\t\tcase ResultsConclusionsCol:", + "\t\t\t\tworkloadResultsSpreadsheet, err := createSingleWorkloadRawResultsSpreadSheet(sheetsService, driveService, workloadsResultsFolder, rawResultsSheet, workloadName)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\treturn nil, fmt.Errorf(\"error has occurred while creating %s results file: %v\", workloadName, err)", + "\t\t\t\t}", + "", + "\t\t\t\thyperlinkFormula := fmt.Sprintf(\"=HYPERLINK(%q, %q)\", workloadResultsSpreadsheet.SpreadsheetUrl, \"Results\")", + "\t\t\t\tcurCellData.UserEnteredValue.FormulaValue = \u0026hyperlinkFormula", + "", + "\t\t\tdefault:", + "\t\t\t\t// use space for empty values to avoid cells overlapping", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = stringToPointer(\" \")", + "\t\t\t}", + "", + "\t\t\tcurConsclusionRowValues = append(curConsclusionRowValues, curCellData)", + "\t\t}", + "\t\tconclusionsSheetRows = append(conclusionsSheetRows, \u0026sheets.RowData{Values: curConsclusionRowValues})", + "\t}", + "", + "\tconclusionSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: ConclusionSheetName,", + "\t\t\tGridProperties: \u0026sheets.GridProperties{FrozenRowCount: 1},", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: conclusionsSheetRows}},", + "\t}", + "", + "\treturn conclusionSheet, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "createSingleWorkloadRawResultsSheet", + "kind": "function", + "source": [ + "func createSingleWorkloadRawResultsSheet(rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Sheet, error) {", + "\t// Initialize sheet with the two new column headers only.", + "\tfilteredRows := []*sheets.RowData{{Values: []*sheets.CellData{", + "\t\t{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: stringToPointer(conclusionIndividualSingleWorkloadSheetCol)}},", + "\t\t{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: stringToPointer(nextStepAIIfFailSingleWorkloadSheetCol)}},", + "\t}}}", + "", + "\t// Add existing column headers from the rawResultsSheet", + "\tfilteredRows[0].Values = append(filteredRows[0].Values, rawResultsSheet.Data[0].RowData[0].Values...)", + "", + "\theaders := GetHeadersFromSheet(rawResultsSheet)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{\"CNFName\"})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\tworkloadNameIndex := indices[0]", + "", + "\t// add to sheet only rows of given workload name", + "\tfor _, row := range rawResultsSheet.Data[0].RowData[1:] {", + "\t\tif len(row.Values) \u003c= workloadNameIndex {", + "\t\t\treturn nil, fmt.Errorf(\"workload %s not found in raw spreadsheet\", workloadName)", + "\t\t}", + "\t\tcurWorkloadName := *row.Values[workloadNameIndex].UserEnteredValue.StringValue", + "\t\tif curWorkloadName == workloadName {", + "\t\t\t// add empty values in 2 added columns", + "\t\t\tnewRow := \u0026sheets.RowData{", + "\t\t\t\tValues: append([]*sheets.CellData{{}, {}}, row.Values...),", + "\t\t\t}", + "\t\t\tfilteredRows = append(filteredRows, newRow)", + "\t\t}", + "\t}", + "", + "\tworkloadResultsSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: \"results\",", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: filteredRows}},", + "\t}", + "", + "\treturn workloadResultsSheet, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetHeadersFromSheet(sheet *sheets.Sheet) []string {", + "\theaders := []string{}", + "\tfor _, val := range sheet.Data[0].RowData[0].Values {", + "\t\theaders = append(headers, *val.UserEnteredValue.StringValue)", + "\t}", + "\treturn headers", + "}" + ] + }, + { + "name": "GetHeadersFromValueRange", + "qualifiedName": "GetHeadersFromValueRange", + "exported": true, + "signature": "func(*sheets.ValueRange)([]string)", + "doc": "GetHeadersFromValueRange extracts header names from the first row of a spreadsheet\n\nThe function receives a ValueRange object containing cell values, accesses\nits first row, and converts each entry to a string using formatting logic. It\ncollects these strings into a slice that represents column headers for later\nlookup operations. The returned slice is used by other utilities to map\nheader names to column indices.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/sheet_utils.go:30", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "addDescendingSortFilterToSheet", + "kind": "function", + "source": [ + "func addDescendingSortFilterToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName, colName string) error {", + "\tsheetsValues, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, sheetName).Do()", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s values: %v\", sheetName, err)", + "\t}", + "\theaders := GetHeadersFromValueRange(sheetsValues)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{colName})", + "\tif err != nil {", + "\t\treturn nil", + "\t}", + "", + "\tsheetID, err := GetSheetIDByName(spreadsheet, sheetName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s id: %v\", sheetName, err)", + "\t}", + "", + "\trequests := []*sheets.Request{", + "\t\t{", + "\t\t\tSortRange: \u0026sheets.SortRangeRequest{", + "\t\t\t\tRange: \u0026sheets.GridRange{", + "\t\t\t\t\tSheetId: sheetID,", + "\t\t\t\t\tStartRowIndex: 1,", + "\t\t\t\t},", + "\t\t\t\tSortSpecs: []*sheets.SortSpec{", + "\t\t\t\t\t{", + "\t\t\t\t\t\tDimensionIndex: int64(indices[0]),", + "\t\t\t\t\t\tSortOrder: \"DESCENDING\",", + "\t\t\t\t\t},", + "\t\t\t\t},", + "\t\t\t},", + "\t\t},", + "\t}", + "", + "\t_, err = srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, \u0026sheets.BatchUpdateSpreadsheetRequest{", + "\t\tRequests: requests,", + "\t}).Do()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "addFilterByFailedAndMandatoryToSheet", + "kind": "function", + "source": [ + "func addFilterByFailedAndMandatoryToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName string) error {", + "\tsheetsValues, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, sheetName).Do()", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s values: %v\", sheetName, err)", + "\t}", + "\theaders := GetHeadersFromValueRange(sheetsValues)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{\"State\", \"Mandatory/Optional\"})", + "\tif err != nil {", + "\t\treturn nil", + "\t}", + "", + "\tstateColIndex := indices[0]", + "\tisMandatoryColIndex := indices[1]", + "", + "\tsheetID, err := GetSheetIDByName(spreadsheet, sheetName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s id: %v\", sheetName, err)", + "\t}", + "", + "\trequests := []*sheets.Request{", + "\t\t{", + "\t\t\tSetBasicFilter: \u0026sheets.SetBasicFilterRequest{", + "\t\t\t\tFilter: \u0026sheets.BasicFilter{", + "\t\t\t\t\tRange: \u0026sheets.GridRange{SheetId: sheetID},", + "\t\t\t\t\tCriteria: map[string]sheets.FilterCriteria{", + "\t\t\t\t\t\tfmt.Sprint(stateColIndex): {", + "\t\t\t\t\t\t\tCondition: \u0026sheets.BooleanCondition{", + "\t\t\t\t\t\t\t\tType: \"TEXT_EQ\",", + "\t\t\t\t\t\t\t\tValues: []*sheets.ConditionValue{", + "\t\t\t\t\t\t\t\t\t{UserEnteredValue: \"failed\"},", + "\t\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t},", + "\t\t\t\t\t\tfmt.Sprint(isMandatoryColIndex): {", + "\t\t\t\t\t\t\tCondition: \u0026sheets.BooleanCondition{", + "\t\t\t\t\t\t\t\tType: \"TEXT_EQ\",", + "\t\t\t\t\t\t\t\tValues: []*sheets.ConditionValue{", + "\t\t\t\t\t\t\t\t\t{UserEnteredValue: \"Mandatory\"},", + "\t\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t},", + "\t\t\t\t\t},", + "\t\t\t\t},", + "\t\t\t},", + "\t\t},", + "\t}", + "", + "\t_, err = srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, \u0026sheets.BatchUpdateSpreadsheetRequest{", + "\t\tRequests: requests,", + "\t}).Do()", + "\treturn err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetHeadersFromValueRange(sheetsValues *sheets.ValueRange) []string {", + "\theaders := []string{}", + "\tfor _, val := range sheetsValues.Values[0] {", + "\t\theaders = append(headers, fmt.Sprint(val))", + "\t}", + "\treturn headers", + "}" + ] + }, + { + "name": "GetSheetIDByName", + "qualifiedName": "GetSheetIDByName", + "exported": true, + "signature": "func(*sheets.Spreadsheet, string)(int64, error)", + "doc": "GetSheetIDByName Retrieves a sheet's numeric identifier by its title\n\nThis function scans the list of sheets in a spreadsheet for one whose title\nmatches the provided name. If found, it returns that sheet's unique ID and no\nerror; otherwise it returns -1 and an error describing the missing sheet.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/sheet_utils.go:68", + "calls": [ + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "addDescendingSortFilterToSheet", + "kind": "function", + "source": [ + "func addDescendingSortFilterToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName, colName string) error {", + "\tsheetsValues, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, sheetName).Do()", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s values: %v\", sheetName, err)", + "\t}", + "\theaders := GetHeadersFromValueRange(sheetsValues)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{colName})", + "\tif err != nil {", + "\t\treturn nil", + "\t}", + "", + "\tsheetID, err := GetSheetIDByName(spreadsheet, sheetName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s id: %v\", sheetName, err)", + "\t}", + "", + "\trequests := []*sheets.Request{", + "\t\t{", + "\t\t\tSortRange: \u0026sheets.SortRangeRequest{", + "\t\t\t\tRange: \u0026sheets.GridRange{", + "\t\t\t\t\tSheetId: sheetID,", + "\t\t\t\t\tStartRowIndex: 1,", + "\t\t\t\t},", + "\t\t\t\tSortSpecs: []*sheets.SortSpec{", + "\t\t\t\t\t{", + "\t\t\t\t\t\tDimensionIndex: int64(indices[0]),", + "\t\t\t\t\t\tSortOrder: \"DESCENDING\",", + "\t\t\t\t\t},", + "\t\t\t\t},", + "\t\t\t},", + "\t\t},", + "\t}", + "", + "\t_, err = srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, \u0026sheets.BatchUpdateSpreadsheetRequest{", + "\t\tRequests: requests,", + "\t}).Do()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "addFilterByFailedAndMandatoryToSheet", + "kind": "function", + "source": [ + "func addFilterByFailedAndMandatoryToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName string) error {", + "\tsheetsValues, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, sheetName).Do()", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s values: %v\", sheetName, err)", + "\t}", + "\theaders := GetHeadersFromValueRange(sheetsValues)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{\"State\", \"Mandatory/Optional\"})", + "\tif err != nil {", + "\t\treturn nil", + "\t}", + "", + "\tstateColIndex := indices[0]", + "\tisMandatoryColIndex := indices[1]", + "", + "\tsheetID, err := GetSheetIDByName(spreadsheet, sheetName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s id: %v\", sheetName, err)", + "\t}", + "", + "\trequests := []*sheets.Request{", + "\t\t{", + "\t\t\tSetBasicFilter: \u0026sheets.SetBasicFilterRequest{", + "\t\t\t\tFilter: \u0026sheets.BasicFilter{", + "\t\t\t\t\tRange: \u0026sheets.GridRange{SheetId: sheetID},", + "\t\t\t\t\tCriteria: map[string]sheets.FilterCriteria{", + "\t\t\t\t\t\tfmt.Sprint(stateColIndex): {", + "\t\t\t\t\t\t\tCondition: \u0026sheets.BooleanCondition{", + "\t\t\t\t\t\t\t\tType: \"TEXT_EQ\",", + "\t\t\t\t\t\t\t\tValues: []*sheets.ConditionValue{", + "\t\t\t\t\t\t\t\t\t{UserEnteredValue: \"failed\"},", + "\t\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t},", + "\t\t\t\t\t\tfmt.Sprint(isMandatoryColIndex): {", + "\t\t\t\t\t\t\tCondition: \u0026sheets.BooleanCondition{", + "\t\t\t\t\t\t\t\tType: \"TEXT_EQ\",", + "\t\t\t\t\t\t\t\tValues: []*sheets.ConditionValue{", + "\t\t\t\t\t\t\t\t\t{UserEnteredValue: \"Mandatory\"},", + "\t\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t},", + "\t\t\t\t\t},", + "\t\t\t\t},", + "\t\t\t},", + "\t\t},", + "\t}", + "", + "\t_, err = srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, \u0026sheets.BatchUpdateSpreadsheetRequest{", + "\t\tRequests: requests,", + "\t}).Do()", + "\treturn err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetSheetIDByName(spreadsheet *sheets.Spreadsheet, name string) (int64, error) {", + "\tfor _, sheet := range spreadsheet.Sheets {", + "\t\tif sheet.Properties.Title == name {", + "\t\t\treturn sheet.Properties.SheetId, nil", + "\t\t}", + "\t}", + "\treturn -1, fmt.Errorf(\"there is no sheet named %s in spreadsheet %s\", name, spreadsheet.SpreadsheetUrl)", + "}" + ] + }, + { + "name": "MoveSpreadSheetToFolder", + "qualifiedName": "MoveSpreadSheetToFolder", + "exported": true, + "signature": "func(*drive.Service, *drive.File, *sheets.Spreadsheet)(error)", + "doc": "MoveSpreadSheetToFolder Moves a spreadsheet into a specified Google Drive folder\n\nThis function retrieves the current parent folders of the given spreadsheet\nusing the Drive service, then updates the file to add the target folder as a\nnew parent while removing any existing parents. It performs these operations\nvia the Drive API's Update call and logs fatal errors if any step fails. On\nsuccess it returns nil, indicating the spreadsheet has been relocated.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/drive_utils.go:56", + "calls": [ + { + "name": "Do", + "kind": "function" + }, + { + "name": "Fields", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "Update", + "kind": "function" + }, + { + "name": "AddParents", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "RemoveParents", + "kind": "function" + }, + { + "name": "Do", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "createSingleWorkloadRawResultsSpreadSheet", + "kind": "function", + "source": [ + "func createSingleWorkloadRawResultsSpreadSheet(sheetService *sheets.Service, driveService *drive.Service, folder *drive.File, rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Spreadsheet, error) {", + "\tworkloadResultsSheet, err := createSingleWorkloadRawResultsSheet(rawResultsSheet, workloadName)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tworkloadResultsSpreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: fmt.Sprintf(\"%s Best Practices Test Results\", workloadName),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{workloadResultsSheet},", + "\t}", + "", + "\tworkloadResultsSpreadsheet, err = sheetService.Spreadsheets.Create(workloadResultsSpreadsheet).Do()", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif err := addFilterByFailedAndMandatoryToSheet(sheetService, workloadResultsSpreadsheet, \"results\"); err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, folder, workloadResultsSpreadsheet); err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tlog.Printf(\"%s workload's results sheet has been created.\\n\", workloadName)", + "", + "\treturn workloadResultsSpreadsheet, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "generateResultsSpreadSheet", + "kind": "function", + "source": [ + "func generateResultsSpreadSheet() {", + "\tsheetService, driveService, err := CreateSheetsAndDriveServices(credentials)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create services: %v\", err)", + "\t}", + "", + "\trootFolderID, err := extractFolderIDFromURL(rootFolderURL)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"error getting folder ID from URL\")", + "\t}", + "\tmainFolderName := strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results %s\", ocpVersion, time.Now().Format(\"2006-01-02T15:04:05Z07:00\")), \" \")", + "\tmainResultsFolder, err := createDriveFolder(driveService, mainFolderName, rootFolderID)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create main results folder: %v\", err)", + "\t}", + "", + "\tlog.Printf(\"Generating raw results sheet...\")", + "\trawResultsSheet, err := createRawResultsSheet(resultsFilePath)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create raw results sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Raw results sheet has been generated.\")", + "", + "\tlog.Printf(\"Generating conclusion sheet...\")", + "\tconclusionSheet, err := createConclusionsSheet(sheetService, driveService, rawResultsSheet, mainResultsFolder.Id)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create conclusions sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Conclusion sheet has been generated.\")", + "", + "\tspreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results\", ocpVersion), \" \"),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{rawResultsSheet, conclusionSheet},", + "\t}", + "", + "\tspreadsheet, err = sheetService.Spreadsheets.Create(spreadsheet).Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create spreadsheet: %v\", err)", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, mainResultsFolder, spreadsheet); err != nil {", + "\t\tlog.Fatal(err)", + "\t}", + "", + "\tif err = addBasicFilterToSpreadSheet(sheetService, spreadsheet); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tif err = addDescendingSortFilterToSheet(sheetService, spreadsheet, conclusionSheet.Properties.Title, \"Category\"); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tfmt.Printf(\"Results spreadsheet was created successfully: %s\\n\", spreadsheet.SpreadsheetUrl)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func MoveSpreadSheetToFolder(srv *drive.Service, folder *drive.File, spreadsheet *sheets.Spreadsheet) error {", + "\tfile, err := srv.Files.Get(spreadsheet.SpreadsheetId).Fields(\"parents\").Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to get file: %v\", err)", + "\t}", + "", + "\t// Collect the current parent IDs to remove (if needed)", + "\toldParents := append([]string{}, file.Parents...)", + "", + "\tupdateCall := srv.Files.Update(spreadsheet.SpreadsheetId, nil)", + "\tupdateCall.AddParents(folder.Id)", + "", + "\t// Remove the file from its old parents", + "\tif len(oldParents) \u003e 0 {", + "\t\tfor _, parent := range oldParents {", + "\t\t\tupdateCall.RemoveParents(parent)", + "\t\t}", + "\t}", + "", + "\t_, err = updateCall.Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable change file location: %v\", err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Creates a command for uploading results spreadsheets\n\nThis function configures flags for the spreadsheet upload command, including\npaths to the results file, destination URL, optional OCP version, and\ncredentials file. It marks the required flags and handles errors by logging\nfatal messages if flag validation fails. The configured command is then\nreturned for use in the larger CLI.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:46", + "calls": [ + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "StringVarP", + "kind": "function" + }, + { + "name": "Flags", + "kind": "function" + }, + { + "name": "MarkFlagRequired", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "name": "MarkFlagRequired", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tupload.AddCommand(resultsspreadsheet.NewCommand())", + "", + "\treturn upload", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\tuploadResultSpreadSheetCmd.Flags().StringVarP(\u0026resultsFilePath, \"results-file\", \"f\", \"\", \"Required: path to results file\")", + "\tuploadResultSpreadSheetCmd.Flags().StringVarP(\u0026rootFolderURL, \"dest-url\", \"d\", \"\", \"Required: Destination drive folder's URL\")", + "\tuploadResultSpreadSheetCmd.Flags().StringVarP(\u0026ocpVersion, \"version\", \"v\", \"\", \"Optional: OCP Version\")", + "\tuploadResultSpreadSheetCmd.Flags().StringVarP(\u0026credentials, \"credentials\", \"c\", \"credentials.json\", \"Optional: Google credentials file path, default path: credentials.json\")", + "", + "\terr := uploadResultSpreadSheetCmd.MarkFlagRequired(\"results-file\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to mark results file path as required parameter: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\terr = uploadResultSpreadSheetCmd.MarkFlagRequired(\"dest-url\")", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Failed to mark dest url path as required parameter: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\treturn uploadResultSpreadSheetCmd", + "}" + ] + }, + { + "name": "addBasicFilterToSpreadSheet", + "qualifiedName": "addBasicFilterToSpreadSheet", + "exported": false, + "signature": "func(*sheets.Service, *sheets.Spreadsheet)(error)", + "doc": "addBasicFilterToSpreadSheet Adds a basic filter to every sheet in the spreadsheet\n\nThe function iterates over each sheet in the provided spreadsheet, creating a\nrequest that sets a basic filter covering the entire sheet range. It then\nsends all requests as a batch update to the Google Sheets API. If the update\nsucceeds it returns nil; otherwise it propagates the error.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/sheet_utils.go:83", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "name": "Do", + "kind": "function" + }, + { + "name": "BatchUpdate", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "generateResultsSpreadSheet", + "kind": "function", + "source": [ + "func generateResultsSpreadSheet() {", + "\tsheetService, driveService, err := CreateSheetsAndDriveServices(credentials)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create services: %v\", err)", + "\t}", + "", + "\trootFolderID, err := extractFolderIDFromURL(rootFolderURL)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"error getting folder ID from URL\")", + "\t}", + "\tmainFolderName := strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results %s\", ocpVersion, time.Now().Format(\"2006-01-02T15:04:05Z07:00\")), \" \")", + "\tmainResultsFolder, err := createDriveFolder(driveService, mainFolderName, rootFolderID)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create main results folder: %v\", err)", + "\t}", + "", + "\tlog.Printf(\"Generating raw results sheet...\")", + "\trawResultsSheet, err := createRawResultsSheet(resultsFilePath)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create raw results sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Raw results sheet has been generated.\")", + "", + "\tlog.Printf(\"Generating conclusion sheet...\")", + "\tconclusionSheet, err := createConclusionsSheet(sheetService, driveService, rawResultsSheet, mainResultsFolder.Id)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create conclusions sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Conclusion sheet has been generated.\")", + "", + "\tspreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results\", ocpVersion), \" \"),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{rawResultsSheet, conclusionSheet},", + "\t}", + "", + "\tspreadsheet, err = sheetService.Spreadsheets.Create(spreadsheet).Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create spreadsheet: %v\", err)", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, mainResultsFolder, spreadsheet); err != nil {", + "\t\tlog.Fatal(err)", + "\t}", + "", + "\tif err = addBasicFilterToSpreadSheet(sheetService, spreadsheet); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tif err = addDescendingSortFilterToSheet(sheetService, spreadsheet, conclusionSheet.Properties.Title, \"Category\"); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tfmt.Printf(\"Results spreadsheet was created successfully: %s\\n\", spreadsheet.SpreadsheetUrl)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func addBasicFilterToSpreadSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet) error {", + "\trequests := []*sheets.Request{}", + "\tfor _, sheet := range spreadsheet.Sheets {", + "\t\trequests = append(requests, \u0026sheets.Request{", + "\t\t\tSetBasicFilter: \u0026sheets.SetBasicFilterRequest{", + "\t\t\t\tFilter: \u0026sheets.BasicFilter{", + "\t\t\t\t\tRange: \u0026sheets.GridRange{SheetId: sheet.Properties.SheetId},", + "\t\t\t\t},", + "\t\t\t},", + "\t\t})", + "\t}", + "", + "\t_, err := srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, \u0026sheets.BatchUpdateSpreadsheetRequest{", + "\t\tRequests: requests,", + "\t}).Do()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "name": "addDescendingSortFilterToSheet", + "qualifiedName": "addDescendingSortFilterToSheet", + "exported": false, + "signature": "func(*sheets.Service, *sheets.Spreadsheet, string, string)(error)", + "doc": "addDescendingSortFilterToSheet applies a descending sort filter to a specified column in a spreadsheet sheet\n\nThis routine retrieves the values of the target sheet, determines the index\nof the requested column header, obtains the sheet ID, and then constructs a\nbatch update request that sorts all rows below the header in descending order\nbased on that column. It returns an error if any step fails, otherwise\ncompletes silently.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/sheet_utils.go:111", + "calls": [ + { + "name": "Do", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "GetHeadersFromValueRange", + "kind": "function", + "source": [ + "func GetHeadersFromValueRange(sheetsValues *sheets.ValueRange) []string {", + "\theaders := []string{}", + "\tfor _, val := range sheetsValues.Values[0] {", + "\t\theaders = append(headers, fmt.Sprint(val))", + "\t}", + "\treturn headers", + "}" + ] + }, + { + "name": "GetHeaderIndicesByColumnNames", + "kind": "function", + "source": [ + "func GetHeaderIndicesByColumnNames(headers, names []string) ([]int, error) {", + "\tindices := []int{}", + "\tfor _, name := range names {", + "\t\tfound := false", + "\t\tfor i, val := range headers {", + "\t\t\tif name == val {", + "\t\t\t\tfound = true", + "\t\t\t\tindices = append(indices, i)", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !found {", + "\t\t\treturn nil, fmt.Errorf(\"column %s doesn't exist in given headers list\", name)", + "\t\t}", + "\t}", + "\treturn indices, nil", + "}" + ] + }, + { + "name": "GetSheetIDByName", + "kind": "function", + "source": [ + "func GetSheetIDByName(spreadsheet *sheets.Spreadsheet, name string) (int64, error) {", + "\tfor _, sheet := range spreadsheet.Sheets {", + "\t\tif sheet.Properties.Title == name {", + "\t\t\treturn sheet.Properties.SheetId, nil", + "\t\t}", + "\t}", + "\treturn -1, fmt.Errorf(\"there is no sheet named %s in spreadsheet %s\", name, spreadsheet.SpreadsheetUrl)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "int64", + "kind": "function" + }, + { + "name": "Do", + "kind": "function" + }, + { + "name": "BatchUpdate", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "generateResultsSpreadSheet", + "kind": "function", + "source": [ + "func generateResultsSpreadSheet() {", + "\tsheetService, driveService, err := CreateSheetsAndDriveServices(credentials)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create services: %v\", err)", + "\t}", + "", + "\trootFolderID, err := extractFolderIDFromURL(rootFolderURL)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"error getting folder ID from URL\")", + "\t}", + "\tmainFolderName := strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results %s\", ocpVersion, time.Now().Format(\"2006-01-02T15:04:05Z07:00\")), \" \")", + "\tmainResultsFolder, err := createDriveFolder(driveService, mainFolderName, rootFolderID)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create main results folder: %v\", err)", + "\t}", + "", + "\tlog.Printf(\"Generating raw results sheet...\")", + "\trawResultsSheet, err := createRawResultsSheet(resultsFilePath)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create raw results sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Raw results sheet has been generated.\")", + "", + "\tlog.Printf(\"Generating conclusion sheet...\")", + "\tconclusionSheet, err := createConclusionsSheet(sheetService, driveService, rawResultsSheet, mainResultsFolder.Id)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create conclusions sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Conclusion sheet has been generated.\")", + "", + "\tspreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results\", ocpVersion), \" \"),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{rawResultsSheet, conclusionSheet},", + "\t}", + "", + "\tspreadsheet, err = sheetService.Spreadsheets.Create(spreadsheet).Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create spreadsheet: %v\", err)", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, mainResultsFolder, spreadsheet); err != nil {", + "\t\tlog.Fatal(err)", + "\t}", + "", + "\tif err = addBasicFilterToSpreadSheet(sheetService, spreadsheet); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tif err = addDescendingSortFilterToSheet(sheetService, spreadsheet, conclusionSheet.Properties.Title, \"Category\"); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tfmt.Printf(\"Results spreadsheet was created successfully: %s\\n\", spreadsheet.SpreadsheetUrl)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func addDescendingSortFilterToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName, colName string) error {", + "\tsheetsValues, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, sheetName).Do()", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s values: %v\", sheetName, err)", + "\t}", + "\theaders := GetHeadersFromValueRange(sheetsValues)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{colName})", + "\tif err != nil {", + "\t\treturn nil", + "\t}", + "", + "\tsheetID, err := GetSheetIDByName(spreadsheet, sheetName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s id: %v\", sheetName, err)", + "\t}", + "", + "\trequests := []*sheets.Request{", + "\t\t{", + "\t\t\tSortRange: \u0026sheets.SortRangeRequest{", + "\t\t\t\tRange: \u0026sheets.GridRange{", + "\t\t\t\t\tSheetId: sheetID,", + "\t\t\t\t\tStartRowIndex: 1,", + "\t\t\t\t},", + "\t\t\t\tSortSpecs: []*sheets.SortSpec{", + "\t\t\t\t\t{", + "\t\t\t\t\t\tDimensionIndex: int64(indices[0]),", + "\t\t\t\t\t\tSortOrder: \"DESCENDING\",", + "\t\t\t\t\t},", + "\t\t\t\t},", + "\t\t\t},", + "\t\t},", + "\t}", + "", + "\t_, err = srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, \u0026sheets.BatchUpdateSpreadsheetRequest{", + "\t\tRequests: requests,", + "\t}).Do()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "name": "addFilterByFailedAndMandatoryToSheet", + "qualifiedName": "addFilterByFailedAndMandatoryToSheet", + "exported": false, + "signature": "func(*sheets.Service, *sheets.Spreadsheet, string)(error)", + "doc": "addFilterByFailedAndMandatoryToSheet applies a filter to show only failed mandatory tests\n\nThis function retrieves the specified sheet’s data, identifies the columns\nfor test state and mandatory status, then builds a request to set a basic\nfilter that displays rows where the state is \"failed\" and the test is marked\nas \"Mandatory\". It executes this filter through a batch update on the\nspreadsheet. If any step fails, it returns an error describing the issue.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/sheet_utils.go:160", + "calls": [ + { + "name": "Do", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "GetHeadersFromValueRange", + "kind": "function", + "source": [ + "func GetHeadersFromValueRange(sheetsValues *sheets.ValueRange) []string {", + "\theaders := []string{}", + "\tfor _, val := range sheetsValues.Values[0] {", + "\t\theaders = append(headers, fmt.Sprint(val))", + "\t}", + "\treturn headers", + "}" + ] + }, + { + "name": "GetHeaderIndicesByColumnNames", + "kind": "function", + "source": [ + "func GetHeaderIndicesByColumnNames(headers, names []string) ([]int, error) {", + "\tindices := []int{}", + "\tfor _, name := range names {", + "\t\tfound := false", + "\t\tfor i, val := range headers {", + "\t\t\tif name == val {", + "\t\t\t\tfound = true", + "\t\t\t\tindices = append(indices, i)", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !found {", + "\t\t\treturn nil, fmt.Errorf(\"column %s doesn't exist in given headers list\", name)", + "\t\t}", + "\t}", + "\treturn indices, nil", + "}" + ] + }, + { + "name": "GetSheetIDByName", + "kind": "function", + "source": [ + "func GetSheetIDByName(spreadsheet *sheets.Spreadsheet, name string) (int64, error) {", + "\tfor _, sheet := range spreadsheet.Sheets {", + "\t\tif sheet.Properties.Title == name {", + "\t\t\treturn sheet.Properties.SheetId, nil", + "\t\t}", + "\t}", + "\treturn -1, fmt.Errorf(\"there is no sheet named %s in spreadsheet %s\", name, spreadsheet.SpreadsheetUrl)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "name": "Do", + "kind": "function" + }, + { + "name": "BatchUpdate", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "createSingleWorkloadRawResultsSpreadSheet", + "kind": "function", + "source": [ + "func createSingleWorkloadRawResultsSpreadSheet(sheetService *sheets.Service, driveService *drive.Service, folder *drive.File, rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Spreadsheet, error) {", + "\tworkloadResultsSheet, err := createSingleWorkloadRawResultsSheet(rawResultsSheet, workloadName)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tworkloadResultsSpreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: fmt.Sprintf(\"%s Best Practices Test Results\", workloadName),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{workloadResultsSheet},", + "\t}", + "", + "\tworkloadResultsSpreadsheet, err = sheetService.Spreadsheets.Create(workloadResultsSpreadsheet).Do()", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif err := addFilterByFailedAndMandatoryToSheet(sheetService, workloadResultsSpreadsheet, \"results\"); err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, folder, workloadResultsSpreadsheet); err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tlog.Printf(\"%s workload's results sheet has been created.\\n\", workloadName)", + "", + "\treturn workloadResultsSpreadsheet, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func addFilterByFailedAndMandatoryToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName string) error {", + "\tsheetsValues, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, sheetName).Do()", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s values: %v\", sheetName, err)", + "\t}", + "\theaders := GetHeadersFromValueRange(sheetsValues)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{\"State\", \"Mandatory/Optional\"})", + "\tif err != nil {", + "\t\treturn nil", + "\t}", + "", + "\tstateColIndex := indices[0]", + "\tisMandatoryColIndex := indices[1]", + "", + "\tsheetID, err := GetSheetIDByName(spreadsheet, sheetName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s id: %v\", sheetName, err)", + "\t}", + "", + "\trequests := []*sheets.Request{", + "\t\t{", + "\t\t\tSetBasicFilter: \u0026sheets.SetBasicFilterRequest{", + "\t\t\t\tFilter: \u0026sheets.BasicFilter{", + "\t\t\t\t\tRange: \u0026sheets.GridRange{SheetId: sheetID},", + "\t\t\t\t\tCriteria: map[string]sheets.FilterCriteria{", + "\t\t\t\t\t\tfmt.Sprint(stateColIndex): {", + "\t\t\t\t\t\t\tCondition: \u0026sheets.BooleanCondition{", + "\t\t\t\t\t\t\t\tType: \"TEXT_EQ\",", + "\t\t\t\t\t\t\t\tValues: []*sheets.ConditionValue{", + "\t\t\t\t\t\t\t\t\t{UserEnteredValue: \"failed\"},", + "\t\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t},", + "\t\t\t\t\t\tfmt.Sprint(isMandatoryColIndex): {", + "\t\t\t\t\t\t\tCondition: \u0026sheets.BooleanCondition{", + "\t\t\t\t\t\t\t\tType: \"TEXT_EQ\",", + "\t\t\t\t\t\t\t\tValues: []*sheets.ConditionValue{", + "\t\t\t\t\t\t\t\t\t{UserEnteredValue: \"Mandatory\"},", + "\t\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t},", + "\t\t\t\t\t},", + "\t\t\t\t},", + "\t\t\t},", + "\t\t},", + "\t}", + "", + "\t_, err = srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, \u0026sheets.BatchUpdateSpreadsheetRequest{", + "\t\tRequests: requests,", + "\t}).Do()", + "\treturn err", + "}" + ] + }, + { + "name": "createConclusionsSheet", + "qualifiedName": "createConclusionsSheet", + "exported": false, + "signature": "func(*sheets.Service, *drive.Service, *sheets.Sheet, string)(*sheets.Sheet, error)", + "doc": "createConclusionsSheet Creates a conclusion sheet summarizing unique workloads\n\nThe function builds a new Google Sheets tab that lists each distinct workload\nfrom the raw results, along with its category, version, OCP release, and a\nhyperlink to a dedicated results spreadsheet. It first creates a folder for\nper‑workload sheets, then iterates over the raw data rows, extracting\nunique names and assembling row values. For every new workload it generates\nan individual results file and inserts a link; if any step fails it returns\nan error.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:242", + "calls": [ + { + "name": "createDriveFolder", + "kind": "function", + "source": [ + "func createDriveFolder(srv *drive.Service, folderName, parentFolderID string) (*drive.File, error) {", + "\tdriveFolder := \u0026drive.File{", + "\t\tName: folderName,", + "\t\tParents: []string{parentFolderID},", + "\t\tMimeType: \"application/vnd.google-apps.folder\",", + "\t}", + "", + "\t// Search for an existing folder with the same name", + "\tq := fmt.Sprintf(\"name = '%s' and mimeType = 'application/vnd.google-apps.folder' and '%s' in parents and trashed = false\", folderName, parentFolderID)", + "\tcall := srv.Files.List().Q(q).Fields(\"files(id, name)\")", + "", + "\tfiles, err := call.Do()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to list files: %v\", err)", + "\t}", + "", + "\tif len(files.Files) \u003e 0 {", + "\t\treturn nil, fmt.Errorf(\"folder %s already exists in %s folder ID\", folderName, parentFolderID)", + "\t}", + "", + "\tcreatedFolder, err := srv.Files.Create(driveFolder).Do()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to create folder: %v\", err)", + "\t}", + "", + "\treturn createdFolder, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "GetHeadersFromSheet", + "kind": "function", + "source": [ + "func GetHeadersFromSheet(sheet *sheets.Sheet) []string {", + "\theaders := []string{}", + "\tfor _, val := range sheet.Data[0].RowData[0].Values {", + "\t\theaders = append(headers, *val.UserEnteredValue.StringValue)", + "\t}", + "\treturn headers", + "}" + ] + }, + { + "name": "GetHeaderIndicesByColumnNames", + "kind": "function", + "source": [ + "func GetHeaderIndicesByColumnNames(headers, names []string) ([]int, error) {", + "\tindices := []int{}", + "\tfor _, name := range names {", + "\t\tfound := false", + "\t\tfor i, val := range headers {", + "\t\t\tif name == val {", + "\t\t\t\tfound = true", + "\t\t\t\tindices = append(indices, i)", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !found {", + "\t\t\treturn nil, fmt.Errorf(\"column %s doesn't exist in given headers list\", name)", + "\t\t}", + "\t}", + "\treturn indices, nil", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "stringToPointer", + "kind": "function" + }, + { + "name": "createSingleWorkloadRawResultsSpreadSheet", + "kind": "function", + "source": [ + "func createSingleWorkloadRawResultsSpreadSheet(sheetService *sheets.Service, driveService *drive.Service, folder *drive.File, rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Spreadsheet, error) {", + "\tworkloadResultsSheet, err := createSingleWorkloadRawResultsSheet(rawResultsSheet, workloadName)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tworkloadResultsSpreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: fmt.Sprintf(\"%s Best Practices Test Results\", workloadName),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{workloadResultsSheet},", + "\t}", + "", + "\tworkloadResultsSpreadsheet, err = sheetService.Spreadsheets.Create(workloadResultsSpreadsheet).Do()", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif err := addFilterByFailedAndMandatoryToSheet(sheetService, workloadResultsSpreadsheet, \"results\"); err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, folder, workloadResultsSpreadsheet); err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tlog.Printf(\"%s workload's results sheet has been created.\\n\", workloadName)", + "", + "\treturn workloadResultsSpreadsheet, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "stringToPointer", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "generateResultsSpreadSheet", + "kind": "function", + "source": [ + "func generateResultsSpreadSheet() {", + "\tsheetService, driveService, err := CreateSheetsAndDriveServices(credentials)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create services: %v\", err)", + "\t}", + "", + "\trootFolderID, err := extractFolderIDFromURL(rootFolderURL)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"error getting folder ID from URL\")", + "\t}", + "\tmainFolderName := strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results %s\", ocpVersion, time.Now().Format(\"2006-01-02T15:04:05Z07:00\")), \" \")", + "\tmainResultsFolder, err := createDriveFolder(driveService, mainFolderName, rootFolderID)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create main results folder: %v\", err)", + "\t}", + "", + "\tlog.Printf(\"Generating raw results sheet...\")", + "\trawResultsSheet, err := createRawResultsSheet(resultsFilePath)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create raw results sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Raw results sheet has been generated.\")", + "", + "\tlog.Printf(\"Generating conclusion sheet...\")", + "\tconclusionSheet, err := createConclusionsSheet(sheetService, driveService, rawResultsSheet, mainResultsFolder.Id)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create conclusions sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Conclusion sheet has been generated.\")", + "", + "\tspreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results\", ocpVersion), \" \"),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{rawResultsSheet, conclusionSheet},", + "\t}", + "", + "\tspreadsheet, err = sheetService.Spreadsheets.Create(spreadsheet).Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create spreadsheet: %v\", err)", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, mainResultsFolder, spreadsheet); err != nil {", + "\t\tlog.Fatal(err)", + "\t}", + "", + "\tif err = addBasicFilterToSpreadSheet(sheetService, spreadsheet); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tif err = addDescendingSortFilterToSheet(sheetService, spreadsheet, conclusionSheet.Properties.Title, \"Category\"); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tfmt.Printf(\"Results spreadsheet was created successfully: %s\\n\", spreadsheet.SpreadsheetUrl)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createConclusionsSheet(sheetsService *sheets.Service, driveService *drive.Service, rawResultsSheet *sheets.Sheet, mainResultsFolderID string) (*sheets.Sheet, error) {", + "\tworkloadsFolderName := \"Results Per Workload\"", + "\tworkloadsResultsFolder, err := createDriveFolder(driveService, workloadsFolderName, mainResultsFolderID)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to create workloads results folder: %v\", err)", + "\t}", + "", + "\trawSheetHeaders := GetHeadersFromSheet(rawResultsSheet)", + "\tcolsIndices, err := GetHeaderIndicesByColumnNames(rawSheetHeaders, []string{workloadNameRawResultsCol, workloadTypeRawResultsCol, operatorVersionRawResultsCol})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tworkloadNameColIndex := colsIndices[0]", + "\tworkloadTypeColIndex := colsIndices[1]", + "\toperatorVersionColIndex := colsIndices[2]", + "", + "\t// Initialize sheet with headers", + "\tconclusionsSheetRowsValues := []*sheets.CellData{}", + "\tfor _, colHeader := range conclusionSheetHeaders {", + "\t\theaderCellData := \u0026sheets.CellData{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: \u0026colHeader}}", + "\t\tconclusionsSheetRowsValues = append(conclusionsSheetRowsValues, headerCellData)", + "\t}", + "\tconclusionsSheetRows := []*sheets.RowData{{Values: conclusionsSheetRowsValues}}", + "", + "\t// If rawResultsSheet has now workloads data, return an error", + "\tif len(rawResultsSheet.Data[0].RowData) \u003c= 1 {", + "\t\treturn nil, fmt.Errorf(\"raw results has no workloads data\")", + "\t}", + "", + "\t// Extract unique values from the CNFName column and fill sheet", + "\tuniqueWorkloadNames := make(map[string]bool)", + "\tfor _, rawResultsSheetrow := range rawResultsSheet.Data[0].RowData[1:] {", + "\t\tworkloadName := *rawResultsSheetrow.Values[workloadNameColIndex].UserEnteredValue.StringValue", + "\t\t// if workload has already been added to sheet, skip it", + "\t\tif uniqueWorkloadNames[workloadName] {", + "\t\t\tcontinue", + "\t\t}", + "\t\tuniqueWorkloadNames[workloadName] = true", + "", + "\t\tcurConsclusionRowValues := []*sheets.CellData{}", + "\t\tfor _, colHeader := range conclusionSheetHeaders {", + "\t\t\tcurCellData := \u0026sheets.CellData{UserEnteredValue: \u0026sheets.ExtendedValue{}}", + "", + "\t\t\tswitch colHeader {", + "\t\t\tcase categoryConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = rawResultsSheetrow.Values[workloadTypeColIndex].UserEnteredValue.StringValue", + "", + "\t\t\tcase workloadVersionConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = rawResultsSheetrow.Values[operatorVersionColIndex].UserEnteredValue.StringValue", + "", + "\t\t\tcase ocpVersionConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = stringToPointer(ocpVersion + \" \")", + "", + "\t\t\tcase WorkloadNameConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = \u0026workloadName", + "", + "\t\t\tcase ResultsConclusionsCol:", + "\t\t\t\tworkloadResultsSpreadsheet, err := createSingleWorkloadRawResultsSpreadSheet(sheetsService, driveService, workloadsResultsFolder, rawResultsSheet, workloadName)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\treturn nil, fmt.Errorf(\"error has occurred while creating %s results file: %v\", workloadName, err)", + "\t\t\t\t}", + "", + "\t\t\t\thyperlinkFormula := fmt.Sprintf(\"=HYPERLINK(%q, %q)\", workloadResultsSpreadsheet.SpreadsheetUrl, \"Results\")", + "\t\t\t\tcurCellData.UserEnteredValue.FormulaValue = \u0026hyperlinkFormula", + "", + "\t\t\tdefault:", + "\t\t\t\t// use space for empty values to avoid cells overlapping", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = stringToPointer(\" \")", + "\t\t\t}", + "", + "\t\t\tcurConsclusionRowValues = append(curConsclusionRowValues, curCellData)", + "\t\t}", + "\t\tconclusionsSheetRows = append(conclusionsSheetRows, \u0026sheets.RowData{Values: curConsclusionRowValues})", + "\t}", + "", + "\tconclusionSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: ConclusionSheetName,", + "\t\t\tGridProperties: \u0026sheets.GridProperties{FrozenRowCount: 1},", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: conclusionsSheetRows}},", + "\t}", + "", + "\treturn conclusionSheet, nil", + "}" + ] + }, + { + "name": "createDriveFolder", + "qualifiedName": "createDriveFolder", + "exported": false, + "signature": "func(*drive.Service, string, string)(*drive.File, error)", + "doc": "createDriveFolder creates a new folder in Google Drive\n\nThe function builds a folder metadata object with the specified name, parent\nID, and MIME type for folders. It first checks if a folder with that name\nalready exists under the given parent by querying the Drive API; if found it\nreturns an error to avoid duplication. If no existing folder is detected, it\ncalls the API to create the folder and returns the resulting file object or\nany creation errors.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/drive_utils.go:21", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "Fields", + "kind": "function" + }, + { + "name": "Q", + "kind": "function" + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "Do", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Do", + "kind": "function" + }, + { + "name": "Create", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "createConclusionsSheet", + "kind": "function", + "source": [ + "func createConclusionsSheet(sheetsService *sheets.Service, driveService *drive.Service, rawResultsSheet *sheets.Sheet, mainResultsFolderID string) (*sheets.Sheet, error) {", + "\tworkloadsFolderName := \"Results Per Workload\"", + "\tworkloadsResultsFolder, err := createDriveFolder(driveService, workloadsFolderName, mainResultsFolderID)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to create workloads results folder: %v\", err)", + "\t}", + "", + "\trawSheetHeaders := GetHeadersFromSheet(rawResultsSheet)", + "\tcolsIndices, err := GetHeaderIndicesByColumnNames(rawSheetHeaders, []string{workloadNameRawResultsCol, workloadTypeRawResultsCol, operatorVersionRawResultsCol})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tworkloadNameColIndex := colsIndices[0]", + "\tworkloadTypeColIndex := colsIndices[1]", + "\toperatorVersionColIndex := colsIndices[2]", + "", + "\t// Initialize sheet with headers", + "\tconclusionsSheetRowsValues := []*sheets.CellData{}", + "\tfor _, colHeader := range conclusionSheetHeaders {", + "\t\theaderCellData := \u0026sheets.CellData{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: \u0026colHeader}}", + "\t\tconclusionsSheetRowsValues = append(conclusionsSheetRowsValues, headerCellData)", + "\t}", + "\tconclusionsSheetRows := []*sheets.RowData{{Values: conclusionsSheetRowsValues}}", + "", + "\t// If rawResultsSheet has now workloads data, return an error", + "\tif len(rawResultsSheet.Data[0].RowData) \u003c= 1 {", + "\t\treturn nil, fmt.Errorf(\"raw results has no workloads data\")", + "\t}", + "", + "\t// Extract unique values from the CNFName column and fill sheet", + "\tuniqueWorkloadNames := make(map[string]bool)", + "\tfor _, rawResultsSheetrow := range rawResultsSheet.Data[0].RowData[1:] {", + "\t\tworkloadName := *rawResultsSheetrow.Values[workloadNameColIndex].UserEnteredValue.StringValue", + "\t\t// if workload has already been added to sheet, skip it", + "\t\tif uniqueWorkloadNames[workloadName] {", + "\t\t\tcontinue", + "\t\t}", + "\t\tuniqueWorkloadNames[workloadName] = true", + "", + "\t\tcurConsclusionRowValues := []*sheets.CellData{}", + "\t\tfor _, colHeader := range conclusionSheetHeaders {", + "\t\t\tcurCellData := \u0026sheets.CellData{UserEnteredValue: \u0026sheets.ExtendedValue{}}", + "", + "\t\t\tswitch colHeader {", + "\t\t\tcase categoryConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = rawResultsSheetrow.Values[workloadTypeColIndex].UserEnteredValue.StringValue", + "", + "\t\t\tcase workloadVersionConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = rawResultsSheetrow.Values[operatorVersionColIndex].UserEnteredValue.StringValue", + "", + "\t\t\tcase ocpVersionConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = stringToPointer(ocpVersion + \" \")", + "", + "\t\t\tcase WorkloadNameConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = \u0026workloadName", + "", + "\t\t\tcase ResultsConclusionsCol:", + "\t\t\t\tworkloadResultsSpreadsheet, err := createSingleWorkloadRawResultsSpreadSheet(sheetsService, driveService, workloadsResultsFolder, rawResultsSheet, workloadName)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\treturn nil, fmt.Errorf(\"error has occurred while creating %s results file: %v\", workloadName, err)", + "\t\t\t\t}", + "", + "\t\t\t\thyperlinkFormula := fmt.Sprintf(\"=HYPERLINK(%q, %q)\", workloadResultsSpreadsheet.SpreadsheetUrl, \"Results\")", + "\t\t\t\tcurCellData.UserEnteredValue.FormulaValue = \u0026hyperlinkFormula", + "", + "\t\t\tdefault:", + "\t\t\t\t// use space for empty values to avoid cells overlapping", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = stringToPointer(\" \")", + "\t\t\t}", + "", + "\t\t\tcurConsclusionRowValues = append(curConsclusionRowValues, curCellData)", + "\t\t}", + "\t\tconclusionsSheetRows = append(conclusionsSheetRows, \u0026sheets.RowData{Values: curConsclusionRowValues})", + "\t}", + "", + "\tconclusionSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: ConclusionSheetName,", + "\t\t\tGridProperties: \u0026sheets.GridProperties{FrozenRowCount: 1},", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: conclusionsSheetRows}},", + "\t}", + "", + "\treturn conclusionSheet, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "generateResultsSpreadSheet", + "kind": "function", + "source": [ + "func generateResultsSpreadSheet() {", + "\tsheetService, driveService, err := CreateSheetsAndDriveServices(credentials)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create services: %v\", err)", + "\t}", + "", + "\trootFolderID, err := extractFolderIDFromURL(rootFolderURL)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"error getting folder ID from URL\")", + "\t}", + "\tmainFolderName := strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results %s\", ocpVersion, time.Now().Format(\"2006-01-02T15:04:05Z07:00\")), \" \")", + "\tmainResultsFolder, err := createDriveFolder(driveService, mainFolderName, rootFolderID)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create main results folder: %v\", err)", + "\t}", + "", + "\tlog.Printf(\"Generating raw results sheet...\")", + "\trawResultsSheet, err := createRawResultsSheet(resultsFilePath)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create raw results sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Raw results sheet has been generated.\")", + "", + "\tlog.Printf(\"Generating conclusion sheet...\")", + "\tconclusionSheet, err := createConclusionsSheet(sheetService, driveService, rawResultsSheet, mainResultsFolder.Id)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create conclusions sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Conclusion sheet has been generated.\")", + "", + "\tspreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results\", ocpVersion), \" \"),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{rawResultsSheet, conclusionSheet},", + "\t}", + "", + "\tspreadsheet, err = sheetService.Spreadsheets.Create(spreadsheet).Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create spreadsheet: %v\", err)", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, mainResultsFolder, spreadsheet); err != nil {", + "\t\tlog.Fatal(err)", + "\t}", + "", + "\tif err = addBasicFilterToSpreadSheet(sheetService, spreadsheet); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tif err = addDescendingSortFilterToSheet(sheetService, spreadsheet, conclusionSheet.Properties.Title, \"Category\"); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tfmt.Printf(\"Results spreadsheet was created successfully: %s\\n\", spreadsheet.SpreadsheetUrl)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createDriveFolder(srv *drive.Service, folderName, parentFolderID string) (*drive.File, error) {", + "\tdriveFolder := \u0026drive.File{", + "\t\tName: folderName,", + "\t\tParents: []string{parentFolderID},", + "\t\tMimeType: \"application/vnd.google-apps.folder\",", + "\t}", + "", + "\t// Search for an existing folder with the same name", + "\tq := fmt.Sprintf(\"name = '%s' and mimeType = 'application/vnd.google-apps.folder' and '%s' in parents and trashed = false\", folderName, parentFolderID)", + "\tcall := srv.Files.List().Q(q).Fields(\"files(id, name)\")", + "", + "\tfiles, err := call.Do()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to list files: %v\", err)", + "\t}", + "", + "\tif len(files.Files) \u003e 0 {", + "\t\treturn nil, fmt.Errorf(\"folder %s already exists in %s folder ID\", folderName, parentFolderID)", + "\t}", + "", + "\tcreatedFolder, err := srv.Files.Create(driveFolder).Do()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to create folder: %v\", err)", + "\t}", + "", + "\treturn createdFolder, nil", + "}" + ] + }, + { + "name": "createRawResultsSheet", + "qualifiedName": "createRawResultsSheet", + "exported": false, + "signature": "func(string)(*sheets.Sheet, error)", + "doc": "createRawResultsSheet parses a CSV file into a Google Sheets sheet\n\nThe function reads the specified CSV file, converts each row into spreadsheet\nrows while trimming overly long cell content and normalizing empty cells and\nline breaks. It builds a Sheet object with a title and frozen header row,\nthen returns this sheet or an error if reading fails.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:335", + "calls": [ + { + "name": "readCSV", + "kind": "function", + "source": [ + "func readCSV(fp string) ([][]string, error) {", + "\tfile, err := os.Open(fp)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\tdefer file.Close()", + "", + "\treader := csv.NewReader(file)", + "\trecords, err := reader.ReadAll()", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treturn records, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "prepareRecordsForSpreadSheet", + "kind": "function", + "source": [ + "func prepareRecordsForSpreadSheet(records [][]string) []*sheets.RowData {", + "\tvar rows []*sheets.RowData", + "\tfor _, row := range records {", + "\t\tvar rowData []*sheets.CellData", + "\t\tfor _, col := range row {", + "\t\t\tvar val string", + "\t\t\t// cell content cannot exceed 50,000 letters.", + "\t\t\tif len(col) \u003e cellContentLimit {", + "\t\t\t\tcol = col[:cellContentLimit]", + "\t\t\t}", + "\t\t\t// use space for empty values to avoid cells overlapping", + "\t\t\tif col == \"\" {", + "\t\t\t\tval = \" \"", + "\t\t\t}", + "\t\t\t// avoid line breaks in cell", + "\t\t\tval = strings.ReplaceAll(strings.ReplaceAll(col, \"\\r\\n\", \" \"), \"\\n\", \" \")", + "", + "\t\t\trowData = append(rowData, \u0026sheets.CellData{", + "\t\t\t\tUserEnteredValue: \u0026sheets.ExtendedValue{StringValue: \u0026val},", + "\t\t\t})", + "\t\t}", + "\t\trows = append(rows, \u0026sheets.RowData{Values: rowData})", + "\t}", + "\treturn rows", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "generateResultsSpreadSheet", + "kind": "function", + "source": [ + "func generateResultsSpreadSheet() {", + "\tsheetService, driveService, err := CreateSheetsAndDriveServices(credentials)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create services: %v\", err)", + "\t}", + "", + "\trootFolderID, err := extractFolderIDFromURL(rootFolderURL)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"error getting folder ID from URL\")", + "\t}", + "\tmainFolderName := strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results %s\", ocpVersion, time.Now().Format(\"2006-01-02T15:04:05Z07:00\")), \" \")", + "\tmainResultsFolder, err := createDriveFolder(driveService, mainFolderName, rootFolderID)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create main results folder: %v\", err)", + "\t}", + "", + "\tlog.Printf(\"Generating raw results sheet...\")", + "\trawResultsSheet, err := createRawResultsSheet(resultsFilePath)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create raw results sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Raw results sheet has been generated.\")", + "", + "\tlog.Printf(\"Generating conclusion sheet...\")", + "\tconclusionSheet, err := createConclusionsSheet(sheetService, driveService, rawResultsSheet, mainResultsFolder.Id)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create conclusions sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Conclusion sheet has been generated.\")", + "", + "\tspreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results\", ocpVersion), \" \"),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{rawResultsSheet, conclusionSheet},", + "\t}", + "", + "\tspreadsheet, err = sheetService.Spreadsheets.Create(spreadsheet).Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create spreadsheet: %v\", err)", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, mainResultsFolder, spreadsheet); err != nil {", + "\t\tlog.Fatal(err)", + "\t}", + "", + "\tif err = addBasicFilterToSpreadSheet(sheetService, spreadsheet); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tif err = addDescendingSortFilterToSheet(sheetService, spreadsheet, conclusionSheet.Properties.Title, \"Category\"); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tfmt.Printf(\"Results spreadsheet was created successfully: %s\\n\", spreadsheet.SpreadsheetUrl)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createRawResultsSheet(fp string) (*sheets.Sheet, error) {", + "\trecords, err := readCSV(fp)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to read csv file: %v\", err)", + "\t}", + "", + "\trows := prepareRecordsForSpreadSheet(records)", + "", + "\trawResultsSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: RawResultsSheetName,", + "\t\t\tGridProperties: \u0026sheets.GridProperties{FrozenRowCount: 1},", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: rows}},", + "\t}", + "", + "\treturn rawResultsSheet, nil", + "}" + ] + }, + { + "name": "createSingleWorkloadRawResultsSheet", + "qualifiedName": "createSingleWorkloadRawResultsSheet", + "exported": false, + "signature": "func(*sheets.Sheet, string)(*sheets.Sheet, error)", + "doc": "createSingleWorkloadRawResultsSheet Creates a new sheet containing only the rows for a specified workload\n\nThe function filters an existing raw results sheet to include only the test\ncase rows that match the given workload name, adding two empty columns for\nowner/tech lead conclusion and next step actions. It retains the original\nheader row from the raw sheet while inserting the new headers at the\nbeginning. The resulting sheet is returned along with any error encountered\nduring processing.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:151", + "calls": [ + { + "name": "stringToPointer", + "kind": "function" + }, + { + "name": "stringToPointer", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "GetHeadersFromSheet", + "kind": "function", + "source": [ + "func GetHeadersFromSheet(sheet *sheets.Sheet) []string {", + "\theaders := []string{}", + "\tfor _, val := range sheet.Data[0].RowData[0].Values {", + "\t\theaders = append(headers, *val.UserEnteredValue.StringValue)", + "\t}", + "\treturn headers", + "}" + ] + }, + { + "name": "GetHeaderIndicesByColumnNames", + "kind": "function", + "source": [ + "func GetHeaderIndicesByColumnNames(headers, names []string) ([]int, error) {", + "\tindices := []int{}", + "\tfor _, name := range names {", + "\t\tfound := false", + "\t\tfor i, val := range headers {", + "\t\t\tif name == val {", + "\t\t\t\tfound = true", + "\t\t\t\tindices = append(indices, i)", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !found {", + "\t\t\treturn nil, fmt.Errorf(\"column %s doesn't exist in given headers list\", name)", + "\t\t}", + "\t}", + "\treturn indices, nil", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "createSingleWorkloadRawResultsSpreadSheet", + "kind": "function", + "source": [ + "func createSingleWorkloadRawResultsSpreadSheet(sheetService *sheets.Service, driveService *drive.Service, folder *drive.File, rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Spreadsheet, error) {", + "\tworkloadResultsSheet, err := createSingleWorkloadRawResultsSheet(rawResultsSheet, workloadName)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tworkloadResultsSpreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: fmt.Sprintf(\"%s Best Practices Test Results\", workloadName),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{workloadResultsSheet},", + "\t}", + "", + "\tworkloadResultsSpreadsheet, err = sheetService.Spreadsheets.Create(workloadResultsSpreadsheet).Do()", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif err := addFilterByFailedAndMandatoryToSheet(sheetService, workloadResultsSpreadsheet, \"results\"); err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, folder, workloadResultsSpreadsheet); err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tlog.Printf(\"%s workload's results sheet has been created.\\n\", workloadName)", + "", + "\treturn workloadResultsSpreadsheet, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createSingleWorkloadRawResultsSheet(rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Sheet, error) {", + "\t// Initialize sheet with the two new column headers only.", + "\tfilteredRows := []*sheets.RowData{{Values: []*sheets.CellData{", + "\t\t{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: stringToPointer(conclusionIndividualSingleWorkloadSheetCol)}},", + "\t\t{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: stringToPointer(nextStepAIIfFailSingleWorkloadSheetCol)}},", + "\t}}}", + "", + "\t// Add existing column headers from the rawResultsSheet", + "\tfilteredRows[0].Values = append(filteredRows[0].Values, rawResultsSheet.Data[0].RowData[0].Values...)", + "", + "\theaders := GetHeadersFromSheet(rawResultsSheet)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{\"CNFName\"})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\tworkloadNameIndex := indices[0]", + "", + "\t// add to sheet only rows of given workload name", + "\tfor _, row := range rawResultsSheet.Data[0].RowData[1:] {", + "\t\tif len(row.Values) \u003c= workloadNameIndex {", + "\t\t\treturn nil, fmt.Errorf(\"workload %s not found in raw spreadsheet\", workloadName)", + "\t\t}", + "\t\tcurWorkloadName := *row.Values[workloadNameIndex].UserEnteredValue.StringValue", + "\t\tif curWorkloadName == workloadName {", + "\t\t\t// add empty values in 2 added columns", + "\t\t\tnewRow := \u0026sheets.RowData{", + "\t\t\t\tValues: append([]*sheets.CellData{{}, {}}, row.Values...),", + "\t\t\t}", + "\t\t\tfilteredRows = append(filteredRows, newRow)", + "\t\t}", + "\t}", + "", + "\tworkloadResultsSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: \"results\",", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: filteredRows}},", + "\t}", + "", + "\treturn workloadResultsSheet, nil", + "}" + ] + }, + { + "name": "createSingleWorkloadRawResultsSpreadSheet", + "qualifiedName": "createSingleWorkloadRawResultsSpreadSheet", + "exported": false, + "signature": "func(*sheets.Service, *drive.Service, *drive.File, *sheets.Sheet, string)(*sheets.Spreadsheet, error)", + "doc": "createSingleWorkloadRawResultsSpreadSheet Creates a Google Sheets spreadsheet containing raw results for a specific workload\n\nThe function builds a new sheet from the provided raw results, then creates a\nspreadsheet titled with the workload name. It applies a filter to show only\nfailed or mandatory entries and moves the file into the designated Drive\nfolder. Errors are returned if any step fails, and a log message confirms\ncreation.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:200", + "calls": [ + { + "name": "createSingleWorkloadRawResultsSheet", + "kind": "function", + "source": [ + "func createSingleWorkloadRawResultsSheet(rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Sheet, error) {", + "\t// Initialize sheet with the two new column headers only.", + "\tfilteredRows := []*sheets.RowData{{Values: []*sheets.CellData{", + "\t\t{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: stringToPointer(conclusionIndividualSingleWorkloadSheetCol)}},", + "\t\t{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: stringToPointer(nextStepAIIfFailSingleWorkloadSheetCol)}},", + "\t}}}", + "", + "\t// Add existing column headers from the rawResultsSheet", + "\tfilteredRows[0].Values = append(filteredRows[0].Values, rawResultsSheet.Data[0].RowData[0].Values...)", + "", + "\theaders := GetHeadersFromSheet(rawResultsSheet)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{\"CNFName\"})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\tworkloadNameIndex := indices[0]", + "", + "\t// add to sheet only rows of given workload name", + "\tfor _, row := range rawResultsSheet.Data[0].RowData[1:] {", + "\t\tif len(row.Values) \u003c= workloadNameIndex {", + "\t\t\treturn nil, fmt.Errorf(\"workload %s not found in raw spreadsheet\", workloadName)", + "\t\t}", + "\t\tcurWorkloadName := *row.Values[workloadNameIndex].UserEnteredValue.StringValue", + "\t\tif curWorkloadName == workloadName {", + "\t\t\t// add empty values in 2 added columns", + "\t\t\tnewRow := \u0026sheets.RowData{", + "\t\t\t\tValues: append([]*sheets.CellData{{}, {}}, row.Values...),", + "\t\t\t}", + "\t\t\tfilteredRows = append(filteredRows, newRow)", + "\t\t}", + "\t}", + "", + "\tworkloadResultsSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: \"results\",", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: filteredRows}},", + "\t}", + "", + "\treturn workloadResultsSheet, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "Do", + "kind": "function" + }, + { + "name": "Create", + "kind": "function" + }, + { + "name": "addFilterByFailedAndMandatoryToSheet", + "kind": "function", + "source": [ + "func addFilterByFailedAndMandatoryToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName string) error {", + "\tsheetsValues, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, sheetName).Do()", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s values: %v\", sheetName, err)", + "\t}", + "\theaders := GetHeadersFromValueRange(sheetsValues)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{\"State\", \"Mandatory/Optional\"})", + "\tif err != nil {", + "\t\treturn nil", + "\t}", + "", + "\tstateColIndex := indices[0]", + "\tisMandatoryColIndex := indices[1]", + "", + "\tsheetID, err := GetSheetIDByName(spreadsheet, sheetName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s id: %v\", sheetName, err)", + "\t}", + "", + "\trequests := []*sheets.Request{", + "\t\t{", + "\t\t\tSetBasicFilter: \u0026sheets.SetBasicFilterRequest{", + "\t\t\t\tFilter: \u0026sheets.BasicFilter{", + "\t\t\t\t\tRange: \u0026sheets.GridRange{SheetId: sheetID},", + "\t\t\t\t\tCriteria: map[string]sheets.FilterCriteria{", + "\t\t\t\t\t\tfmt.Sprint(stateColIndex): {", + "\t\t\t\t\t\t\tCondition: \u0026sheets.BooleanCondition{", + "\t\t\t\t\t\t\t\tType: \"TEXT_EQ\",", + "\t\t\t\t\t\t\t\tValues: []*sheets.ConditionValue{", + "\t\t\t\t\t\t\t\t\t{UserEnteredValue: \"failed\"},", + "\t\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t},", + "\t\t\t\t\t\tfmt.Sprint(isMandatoryColIndex): {", + "\t\t\t\t\t\t\tCondition: \u0026sheets.BooleanCondition{", + "\t\t\t\t\t\t\t\tType: \"TEXT_EQ\",", + "\t\t\t\t\t\t\t\tValues: []*sheets.ConditionValue{", + "\t\t\t\t\t\t\t\t\t{UserEnteredValue: \"Mandatory\"},", + "\t\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t\t},", + "\t\t\t\t\t\t},", + "\t\t\t\t\t},", + "\t\t\t\t},", + "\t\t\t},", + "\t\t},", + "\t}", + "", + "\t_, err = srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, \u0026sheets.BatchUpdateSpreadsheetRequest{", + "\t\tRequests: requests,", + "\t}).Do()", + "\treturn err", + "}" + ] + }, + { + "name": "MoveSpreadSheetToFolder", + "kind": "function", + "source": [ + "func MoveSpreadSheetToFolder(srv *drive.Service, folder *drive.File, spreadsheet *sheets.Spreadsheet) error {", + "\tfile, err := srv.Files.Get(spreadsheet.SpreadsheetId).Fields(\"parents\").Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to get file: %v\", err)", + "\t}", + "", + "\t// Collect the current parent IDs to remove (if needed)", + "\toldParents := append([]string{}, file.Parents...)", + "", + "\tupdateCall := srv.Files.Update(spreadsheet.SpreadsheetId, nil)", + "\tupdateCall.AddParents(folder.Id)", + "", + "\t// Remove the file from its old parents", + "\tif len(oldParents) \u003e 0 {", + "\t\tfor _, parent := range oldParents {", + "\t\t\tupdateCall.RemoveParents(parent)", + "\t\t}", + "\t}", + "", + "\t_, err = updateCall.Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable change file location: %v\", err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "createConclusionsSheet", + "kind": "function", + "source": [ + "func createConclusionsSheet(sheetsService *sheets.Service, driveService *drive.Service, rawResultsSheet *sheets.Sheet, mainResultsFolderID string) (*sheets.Sheet, error) {", + "\tworkloadsFolderName := \"Results Per Workload\"", + "\tworkloadsResultsFolder, err := createDriveFolder(driveService, workloadsFolderName, mainResultsFolderID)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to create workloads results folder: %v\", err)", + "\t}", + "", + "\trawSheetHeaders := GetHeadersFromSheet(rawResultsSheet)", + "\tcolsIndices, err := GetHeaderIndicesByColumnNames(rawSheetHeaders, []string{workloadNameRawResultsCol, workloadTypeRawResultsCol, operatorVersionRawResultsCol})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tworkloadNameColIndex := colsIndices[0]", + "\tworkloadTypeColIndex := colsIndices[1]", + "\toperatorVersionColIndex := colsIndices[2]", + "", + "\t// Initialize sheet with headers", + "\tconclusionsSheetRowsValues := []*sheets.CellData{}", + "\tfor _, colHeader := range conclusionSheetHeaders {", + "\t\theaderCellData := \u0026sheets.CellData{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: \u0026colHeader}}", + "\t\tconclusionsSheetRowsValues = append(conclusionsSheetRowsValues, headerCellData)", + "\t}", + "\tconclusionsSheetRows := []*sheets.RowData{{Values: conclusionsSheetRowsValues}}", + "", + "\t// If rawResultsSheet has now workloads data, return an error", + "\tif len(rawResultsSheet.Data[0].RowData) \u003c= 1 {", + "\t\treturn nil, fmt.Errorf(\"raw results has no workloads data\")", + "\t}", + "", + "\t// Extract unique values from the CNFName column and fill sheet", + "\tuniqueWorkloadNames := make(map[string]bool)", + "\tfor _, rawResultsSheetrow := range rawResultsSheet.Data[0].RowData[1:] {", + "\t\tworkloadName := *rawResultsSheetrow.Values[workloadNameColIndex].UserEnteredValue.StringValue", + "\t\t// if workload has already been added to sheet, skip it", + "\t\tif uniqueWorkloadNames[workloadName] {", + "\t\t\tcontinue", + "\t\t}", + "\t\tuniqueWorkloadNames[workloadName] = true", + "", + "\t\tcurConsclusionRowValues := []*sheets.CellData{}", + "\t\tfor _, colHeader := range conclusionSheetHeaders {", + "\t\t\tcurCellData := \u0026sheets.CellData{UserEnteredValue: \u0026sheets.ExtendedValue{}}", + "", + "\t\t\tswitch colHeader {", + "\t\t\tcase categoryConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = rawResultsSheetrow.Values[workloadTypeColIndex].UserEnteredValue.StringValue", + "", + "\t\t\tcase workloadVersionConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = rawResultsSheetrow.Values[operatorVersionColIndex].UserEnteredValue.StringValue", + "", + "\t\t\tcase ocpVersionConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = stringToPointer(ocpVersion + \" \")", + "", + "\t\t\tcase WorkloadNameConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = \u0026workloadName", + "", + "\t\t\tcase ResultsConclusionsCol:", + "\t\t\t\tworkloadResultsSpreadsheet, err := createSingleWorkloadRawResultsSpreadSheet(sheetsService, driveService, workloadsResultsFolder, rawResultsSheet, workloadName)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\treturn nil, fmt.Errorf(\"error has occurred while creating %s results file: %v\", workloadName, err)", + "\t\t\t\t}", + "", + "\t\t\t\thyperlinkFormula := fmt.Sprintf(\"=HYPERLINK(%q, %q)\", workloadResultsSpreadsheet.SpreadsheetUrl, \"Results\")", + "\t\t\t\tcurCellData.UserEnteredValue.FormulaValue = \u0026hyperlinkFormula", + "", + "\t\t\tdefault:", + "\t\t\t\t// use space for empty values to avoid cells overlapping", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = stringToPointer(\" \")", + "\t\t\t}", + "", + "\t\t\tcurConsclusionRowValues = append(curConsclusionRowValues, curCellData)", + "\t\t}", + "\t\tconclusionsSheetRows = append(conclusionsSheetRows, \u0026sheets.RowData{Values: curConsclusionRowValues})", + "\t}", + "", + "\tconclusionSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: ConclusionSheetName,", + "\t\t\tGridProperties: \u0026sheets.GridProperties{FrozenRowCount: 1},", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: conclusionsSheetRows}},", + "\t}", + "", + "\treturn conclusionSheet, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createSingleWorkloadRawResultsSpreadSheet(sheetService *sheets.Service, driveService *drive.Service, folder *drive.File, rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Spreadsheet, error) {", + "\tworkloadResultsSheet, err := createSingleWorkloadRawResultsSheet(rawResultsSheet, workloadName)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tworkloadResultsSpreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: fmt.Sprintf(\"%s Best Practices Test Results\", workloadName),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{workloadResultsSheet},", + "\t}", + "", + "\tworkloadResultsSpreadsheet, err = sheetService.Spreadsheets.Create(workloadResultsSpreadsheet).Do()", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif err := addFilterByFailedAndMandatoryToSheet(sheetService, workloadResultsSpreadsheet, \"results\"); err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, folder, workloadResultsSpreadsheet); err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tlog.Printf(\"%s workload's results sheet has been created.\\n\", workloadName)", + "", + "\treturn workloadResultsSpreadsheet, nil", + "}" + ] + }, + { + "name": "extractFolderIDFromURL", + "qualifiedName": "extractFolderIDFromURL", + "exported": false, + "signature": "func(string)(string, error)", + "doc": "extractFolderIDFromURL extracts the final path segment from a URL\n\nThis routine parses an input string as a URL, splits its path into\ncomponents, and returns the last component which represents a folder\nidentifier. If parsing fails it propagates the error; otherwise it provides\nthe ID and no error.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/drive_utils.go:89", + "calls": [ + { + "pkgPath": "net/url", + "name": "Parse", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "generateResultsSpreadSheet", + "kind": "function", + "source": [ + "func generateResultsSpreadSheet() {", + "\tsheetService, driveService, err := CreateSheetsAndDriveServices(credentials)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create services: %v\", err)", + "\t}", + "", + "\trootFolderID, err := extractFolderIDFromURL(rootFolderURL)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"error getting folder ID from URL\")", + "\t}", + "\tmainFolderName := strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results %s\", ocpVersion, time.Now().Format(\"2006-01-02T15:04:05Z07:00\")), \" \")", + "\tmainResultsFolder, err := createDriveFolder(driveService, mainFolderName, rootFolderID)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create main results folder: %v\", err)", + "\t}", + "", + "\tlog.Printf(\"Generating raw results sheet...\")", + "\trawResultsSheet, err := createRawResultsSheet(resultsFilePath)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create raw results sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Raw results sheet has been generated.\")", + "", + "\tlog.Printf(\"Generating conclusion sheet...\")", + "\tconclusionSheet, err := createConclusionsSheet(sheetService, driveService, rawResultsSheet, mainResultsFolder.Id)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create conclusions sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Conclusion sheet has been generated.\")", + "", + "\tspreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results\", ocpVersion), \" \"),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{rawResultsSheet, conclusionSheet},", + "\t}", + "", + "\tspreadsheet, err = sheetService.Spreadsheets.Create(spreadsheet).Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create spreadsheet: %v\", err)", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, mainResultsFolder, spreadsheet); err != nil {", + "\t\tlog.Fatal(err)", + "\t}", + "", + "\tif err = addBasicFilterToSpreadSheet(sheetService, spreadsheet); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tif err = addDescendingSortFilterToSheet(sheetService, spreadsheet, conclusionSheet.Properties.Title, \"Category\"); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tfmt.Printf(\"Results spreadsheet was created successfully: %s\\n\", spreadsheet.SpreadsheetUrl)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func extractFolderIDFromURL(u string) (string, error) {", + "\tparsedURL, err := url.Parse(u)", + "\tif err != nil {", + "\t\treturn \"\", err", + "\t}", + "", + "\tpathSegments := strings.Split(parsedURL.Path, \"/\")", + "", + "\t// The folder ID is the last segment in the path", + "\treturn pathSegments[len(pathSegments)-1], nil", + "}" + ] + }, + { + "name": "generateResultsSpreadSheet", + "qualifiedName": "generateResultsSpreadSheet", + "exported": false, + "signature": "func()()", + "doc": "generateResultsSpreadSheet Creates a Google Sheets document with raw results and conclusions\n\nThis routine establishes Google Sheets and Drive services, extracts the root\nfolder ID from a URL, and creates a main results folder named with the OCP\nversion and timestamp. It then builds a raw results sheet from a CSV file and\na conclusions sheet that aggregates workload data, moves the new spreadsheet\ninto the created folder, applies basic filtering, sorts by category, and\nprints the final URL.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:362", + "calls": [ + { + "name": "CreateSheetsAndDriveServices", + "kind": "function", + "source": [ + "func CreateSheetsAndDriveServices(credentials string) (sheetService *sheets.Service, driveService *drive.Service, err error) {", + "\tctx := context.TODO()", + "", + "\tsheetSrv, err := sheets.NewService(ctx, option.WithCredentialsFile(credentials))", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"unable to retrieve Sheets service: %v\", err)", + "\t}", + "", + "\tdriveSrv, err := drive.NewService(ctx, option.WithCredentialsFile(credentials))", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"unable to retrieve Drive service: %v\", err)", + "\t}", + "", + "\treturn sheetSrv, driveSrv, nil", + "}" + ] + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "name": "extractFolderIDFromURL", + "kind": "function", + "source": [ + "func extractFolderIDFromURL(u string) (string, error) {", + "\tparsedURL, err := url.Parse(u)", + "\tif err != nil {", + "\t\treturn \"\", err", + "\t}", + "", + "\tpathSegments := strings.Split(parsedURL.Path, \"/\")", + "", + "\t// The folder ID is the last segment in the path", + "\treturn pathSegments[len(pathSegments)-1], nil", + "}" + ] + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimLeft", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "Format", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "name": "createDriveFolder", + "kind": "function", + "source": [ + "func createDriveFolder(srv *drive.Service, folderName, parentFolderID string) (*drive.File, error) {", + "\tdriveFolder := \u0026drive.File{", + "\t\tName: folderName,", + "\t\tParents: []string{parentFolderID},", + "\t\tMimeType: \"application/vnd.google-apps.folder\",", + "\t}", + "", + "\t// Search for an existing folder with the same name", + "\tq := fmt.Sprintf(\"name = '%s' and mimeType = 'application/vnd.google-apps.folder' and '%s' in parents and trashed = false\", folderName, parentFolderID)", + "\tcall := srv.Files.List().Q(q).Fields(\"files(id, name)\")", + "", + "\tfiles, err := call.Do()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to list files: %v\", err)", + "\t}", + "", + "\tif len(files.Files) \u003e 0 {", + "\t\treturn nil, fmt.Errorf(\"folder %s already exists in %s folder ID\", folderName, parentFolderID)", + "\t}", + "", + "\tcreatedFolder, err := srv.Files.Create(driveFolder).Do()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to create folder: %v\", err)", + "\t}", + "", + "\treturn createdFolder, nil", + "}" + ] + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "name": "createRawResultsSheet", + "kind": "function", + "source": [ + "func createRawResultsSheet(fp string) (*sheets.Sheet, error) {", + "\trecords, err := readCSV(fp)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to read csv file: %v\", err)", + "\t}", + "", + "\trows := prepareRecordsForSpreadSheet(records)", + "", + "\trawResultsSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: RawResultsSheetName,", + "\t\t\tGridProperties: \u0026sheets.GridProperties{FrozenRowCount: 1},", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: rows}},", + "\t}", + "", + "\treturn rawResultsSheet, nil", + "}" + ] + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "name": "createConclusionsSheet", + "kind": "function", + "source": [ + "func createConclusionsSheet(sheetsService *sheets.Service, driveService *drive.Service, rawResultsSheet *sheets.Sheet, mainResultsFolderID string) (*sheets.Sheet, error) {", + "\tworkloadsFolderName := \"Results Per Workload\"", + "\tworkloadsResultsFolder, err := createDriveFolder(driveService, workloadsFolderName, mainResultsFolderID)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to create workloads results folder: %v\", err)", + "\t}", + "", + "\trawSheetHeaders := GetHeadersFromSheet(rawResultsSheet)", + "\tcolsIndices, err := GetHeaderIndicesByColumnNames(rawSheetHeaders, []string{workloadNameRawResultsCol, workloadTypeRawResultsCol, operatorVersionRawResultsCol})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tworkloadNameColIndex := colsIndices[0]", + "\tworkloadTypeColIndex := colsIndices[1]", + "\toperatorVersionColIndex := colsIndices[2]", + "", + "\t// Initialize sheet with headers", + "\tconclusionsSheetRowsValues := []*sheets.CellData{}", + "\tfor _, colHeader := range conclusionSheetHeaders {", + "\t\theaderCellData := \u0026sheets.CellData{UserEnteredValue: \u0026sheets.ExtendedValue{StringValue: \u0026colHeader}}", + "\t\tconclusionsSheetRowsValues = append(conclusionsSheetRowsValues, headerCellData)", + "\t}", + "\tconclusionsSheetRows := []*sheets.RowData{{Values: conclusionsSheetRowsValues}}", + "", + "\t// If rawResultsSheet has now workloads data, return an error", + "\tif len(rawResultsSheet.Data[0].RowData) \u003c= 1 {", + "\t\treturn nil, fmt.Errorf(\"raw results has no workloads data\")", + "\t}", + "", + "\t// Extract unique values from the CNFName column and fill sheet", + "\tuniqueWorkloadNames := make(map[string]bool)", + "\tfor _, rawResultsSheetrow := range rawResultsSheet.Data[0].RowData[1:] {", + "\t\tworkloadName := *rawResultsSheetrow.Values[workloadNameColIndex].UserEnteredValue.StringValue", + "\t\t// if workload has already been added to sheet, skip it", + "\t\tif uniqueWorkloadNames[workloadName] {", + "\t\t\tcontinue", + "\t\t}", + "\t\tuniqueWorkloadNames[workloadName] = true", + "", + "\t\tcurConsclusionRowValues := []*sheets.CellData{}", + "\t\tfor _, colHeader := range conclusionSheetHeaders {", + "\t\t\tcurCellData := \u0026sheets.CellData{UserEnteredValue: \u0026sheets.ExtendedValue{}}", + "", + "\t\t\tswitch colHeader {", + "\t\t\tcase categoryConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = rawResultsSheetrow.Values[workloadTypeColIndex].UserEnteredValue.StringValue", + "", + "\t\t\tcase workloadVersionConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = rawResultsSheetrow.Values[operatorVersionColIndex].UserEnteredValue.StringValue", + "", + "\t\t\tcase ocpVersionConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = stringToPointer(ocpVersion + \" \")", + "", + "\t\t\tcase WorkloadNameConclusionsCol:", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = \u0026workloadName", + "", + "\t\t\tcase ResultsConclusionsCol:", + "\t\t\t\tworkloadResultsSpreadsheet, err := createSingleWorkloadRawResultsSpreadSheet(sheetsService, driveService, workloadsResultsFolder, rawResultsSheet, workloadName)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\treturn nil, fmt.Errorf(\"error has occurred while creating %s results file: %v\", workloadName, err)", + "\t\t\t\t}", + "", + "\t\t\t\thyperlinkFormula := fmt.Sprintf(\"=HYPERLINK(%q, %q)\", workloadResultsSpreadsheet.SpreadsheetUrl, \"Results\")", + "\t\t\t\tcurCellData.UserEnteredValue.FormulaValue = \u0026hyperlinkFormula", + "", + "\t\t\tdefault:", + "\t\t\t\t// use space for empty values to avoid cells overlapping", + "\t\t\t\tcurCellData.UserEnteredValue.StringValue = stringToPointer(\" \")", + "\t\t\t}", + "", + "\t\t\tcurConsclusionRowValues = append(curConsclusionRowValues, curCellData)", + "\t\t}", + "\t\tconclusionsSheetRows = append(conclusionsSheetRows, \u0026sheets.RowData{Values: curConsclusionRowValues})", + "\t}", + "", + "\tconclusionSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: ConclusionSheetName,", + "\t\t\tGridProperties: \u0026sheets.GridProperties{FrozenRowCount: 1},", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: conclusionsSheetRows}},", + "\t}", + "", + "\treturn conclusionSheet, nil", + "}" + ] + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimLeft", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "Do", + "kind": "function" + }, + { + "name": "Create", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "name": "MoveSpreadSheetToFolder", + "kind": "function", + "source": [ + "func MoveSpreadSheetToFolder(srv *drive.Service, folder *drive.File, spreadsheet *sheets.Spreadsheet) error {", + "\tfile, err := srv.Files.Get(spreadsheet.SpreadsheetId).Fields(\"parents\").Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to get file: %v\", err)", + "\t}", + "", + "\t// Collect the current parent IDs to remove (if needed)", + "\toldParents := append([]string{}, file.Parents...)", + "", + "\tupdateCall := srv.Files.Update(spreadsheet.SpreadsheetId, nil)", + "\tupdateCall.AddParents(folder.Id)", + "", + "\t// Remove the file from its old parents", + "\tif len(oldParents) \u003e 0 {", + "\t\tfor _, parent := range oldParents {", + "\t\t\tupdateCall.RemoveParents(parent)", + "\t\t}", + "\t}", + "", + "\t_, err = updateCall.Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable change file location: %v\", err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "log", + "name": "Fatal", + "kind": "function" + }, + { + "name": "addBasicFilterToSpreadSheet", + "kind": "function", + "source": [ + "func addBasicFilterToSpreadSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet) error {", + "\trequests := []*sheets.Request{}", + "\tfor _, sheet := range spreadsheet.Sheets {", + "\t\trequests = append(requests, \u0026sheets.Request{", + "\t\t\tSetBasicFilter: \u0026sheets.SetBasicFilterRequest{", + "\t\t\t\tFilter: \u0026sheets.BasicFilter{", + "\t\t\t\t\tRange: \u0026sheets.GridRange{SheetId: sheet.Properties.SheetId},", + "\t\t\t\t},", + "\t\t\t},", + "\t\t})", + "\t}", + "", + "\t_, err := srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, \u0026sheets.BatchUpdateSpreadsheetRequest{", + "\t\tRequests: requests,", + "\t}).Do()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "name": "addDescendingSortFilterToSheet", + "kind": "function", + "source": [ + "func addDescendingSortFilterToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName, colName string) error {", + "\tsheetsValues, err := srv.Spreadsheets.Values.Get(spreadsheet.SpreadsheetId, sheetName).Do()", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s values: %v\", sheetName, err)", + "\t}", + "\theaders := GetHeadersFromValueRange(sheetsValues)", + "\tindices, err := GetHeaderIndicesByColumnNames(headers, []string{colName})", + "\tif err != nil {", + "\t\treturn nil", + "\t}", + "", + "\tsheetID, err := GetSheetIDByName(spreadsheet, sheetName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"unable to retrieve sheet %s id: %v\", sheetName, err)", + "\t}", + "", + "\trequests := []*sheets.Request{", + "\t\t{", + "\t\t\tSortRange: \u0026sheets.SortRangeRequest{", + "\t\t\t\tRange: \u0026sheets.GridRange{", + "\t\t\t\t\tSheetId: sheetID,", + "\t\t\t\t\tStartRowIndex: 1,", + "\t\t\t\t},", + "\t\t\t\tSortSpecs: []*sheets.SortSpec{", + "\t\t\t\t\t{", + "\t\t\t\t\t\tDimensionIndex: int64(indices[0]),", + "\t\t\t\t\t\tSortOrder: \"DESCENDING\",", + "\t\t\t\t\t},", + "\t\t\t\t},", + "\t\t\t},", + "\t\t},", + "\t}", + "", + "\t_, err = srv.Spreadsheets.BatchUpdate(spreadsheet.SpreadsheetId, \u0026sheets.BatchUpdateSpreadsheetRequest{", + "\t\tRequests: requests,", + "\t}).Do()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "log", + "name": "Fatalf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func generateResultsSpreadSheet() {", + "\tsheetService, driveService, err := CreateSheetsAndDriveServices(credentials)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create services: %v\", err)", + "\t}", + "", + "\trootFolderID, err := extractFolderIDFromURL(rootFolderURL)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"error getting folder ID from URL\")", + "\t}", + "\tmainFolderName := strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results %s\", ocpVersion, time.Now().Format(\"2006-01-02T15:04:05Z07:00\")), \" \")", + "\tmainResultsFolder, err := createDriveFolder(driveService, mainFolderName, rootFolderID)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create main results folder: %v\", err)", + "\t}", + "", + "\tlog.Printf(\"Generating raw results sheet...\")", + "\trawResultsSheet, err := createRawResultsSheet(resultsFilePath)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create raw results sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Raw results sheet has been generated.\")", + "", + "\tlog.Printf(\"Generating conclusion sheet...\")", + "\tconclusionSheet, err := createConclusionsSheet(sheetService, driveService, rawResultsSheet, mainResultsFolder.Id)", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create conclusions sheet: %v\", err)", + "\t}", + "\tlog.Printf(\"Conclusion sheet has been generated.\")", + "", + "\tspreadsheet := \u0026sheets.Spreadsheet{", + "\t\tProperties: \u0026sheets.SpreadsheetProperties{", + "\t\t\tTitle: strings.TrimLeft(fmt.Sprintf(\"%s Redhat Best Practices for K8 Test Results\", ocpVersion), \" \"),", + "\t\t},", + "\t\tSheets: []*sheets.Sheet{rawResultsSheet, conclusionSheet},", + "\t}", + "", + "\tspreadsheet, err = sheetService.Spreadsheets.Create(spreadsheet).Do()", + "\tif err != nil {", + "\t\tlog.Fatalf(\"Unable to create spreadsheet: %v\", err)", + "\t}", + "", + "\tif err := MoveSpreadSheetToFolder(driveService, mainResultsFolder, spreadsheet); err != nil {", + "\t\tlog.Fatal(err)", + "\t}", + "", + "\tif err = addBasicFilterToSpreadSheet(sheetService, spreadsheet); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tif err = addDescendingSortFilterToSheet(sheetService, spreadsheet, conclusionSheet.Properties.Title, \"Category\"); err != nil {", + "\t\tlog.Fatalf(\"Unable to apply filter to the spread sheet: %v\", err)", + "\t}", + "", + "\tfmt.Printf(\"Results spreadsheet was created successfully: %s\\n\", spreadsheet.SpreadsheetUrl)", + "}" + ] + }, + { + "name": "prepareRecordsForSpreadSheet", + "qualifiedName": "prepareRecordsForSpreadSheet", + "exported": false, + "signature": "func([][]string)([]*sheets.RowData)", + "doc": "prepareRecordsForSpreadSheet Converts CSV rows into spreadsheet row data\n\nThis routine takes a two‑dimensional string slice, representing CSV\nrecords, and transforms each cell into a CellData object suitable for Google\nSheets. It trims overly long content to a predefined limit, replaces empty\ncells with a single space to preserve layout, and removes line breaks from\ntext. Each processed row is wrapped in a RowData structure; the function\nreturns a slice of these rows for use in sheet creation.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:117", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "createRawResultsSheet", + "kind": "function", + "source": [ + "func createRawResultsSheet(fp string) (*sheets.Sheet, error) {", + "\trecords, err := readCSV(fp)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to read csv file: %v\", err)", + "\t}", + "", + "\trows := prepareRecordsForSpreadSheet(records)", + "", + "\trawResultsSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: RawResultsSheetName,", + "\t\t\tGridProperties: \u0026sheets.GridProperties{FrozenRowCount: 1},", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: rows}},", + "\t}", + "", + "\treturn rawResultsSheet, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func prepareRecordsForSpreadSheet(records [][]string) []*sheets.RowData {", + "\tvar rows []*sheets.RowData", + "\tfor _, row := range records {", + "\t\tvar rowData []*sheets.CellData", + "\t\tfor _, col := range row {", + "\t\t\tvar val string", + "\t\t\t// cell content cannot exceed 50,000 letters.", + "\t\t\tif len(col) \u003e cellContentLimit {", + "\t\t\t\tcol = col[:cellContentLimit]", + "\t\t\t}", + "\t\t\t// use space for empty values to avoid cells overlapping", + "\t\t\tif col == \"\" {", + "\t\t\t\tval = \" \"", + "\t\t\t}", + "\t\t\t// avoid line breaks in cell", + "\t\t\tval = strings.ReplaceAll(strings.ReplaceAll(col, \"\\r\\n\", \" \"), \"\\n\", \" \")", + "", + "\t\t\trowData = append(rowData, \u0026sheets.CellData{", + "\t\t\t\tUserEnteredValue: \u0026sheets.ExtendedValue{StringValue: \u0026val},", + "\t\t\t})", + "\t\t}", + "\t\trows = append(rows, \u0026sheets.RowData{Values: rowData})", + "\t}", + "\treturn rows", + "}" + ] + }, + { + "name": "readCSV", + "qualifiedName": "readCSV", + "exported": false, + "signature": "func(string)([][]string, error)", + "doc": "readCSV loads CSV file contents into a two-dimensional string slice\n\nThe function opens the specified file path, reads all rows using the csv\npackage, and returns them as a slice of records where each record is a slice\nof fields. It propagates any I/O or parsing errors to the caller. The file is\nclosed automatically via defer before returning.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:73", + "calls": [ + { + "pkgPath": "os", + "name": "Open", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "pkgPath": "encoding/csv", + "name": "NewReader", + "kind": "function" + }, + { + "name": "ReadAll", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet", + "name": "createRawResultsSheet", + "kind": "function", + "source": [ + "func createRawResultsSheet(fp string) (*sheets.Sheet, error) {", + "\trecords, err := readCSV(fp)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to read csv file: %v\", err)", + "\t}", + "", + "\trows := prepareRecordsForSpreadSheet(records)", + "", + "\trawResultsSheet := \u0026sheets.Sheet{", + "\t\tProperties: \u0026sheets.SheetProperties{", + "\t\t\tTitle: RawResultsSheetName,", + "\t\t\tGridProperties: \u0026sheets.GridProperties{FrozenRowCount: 1},", + "\t\t},", + "\t\tData: []*sheets.GridData{{RowData: rows}},", + "\t}", + "", + "\treturn rawResultsSheet, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func readCSV(fp string) ([][]string, error) {", + "\tfile, err := os.Open(fp)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\tdefer file.Close()", + "", + "\treader := csv.NewReader(file)", + "\trecords, err := reader.ReadAll()", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treturn records, nil", + "}" + ] + } + ], + "globals": [ + { + "name": "conclusionSheetHeaders", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:20" + }, + { + "name": "credentials", + "exported": false, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:26" + }, + { + "name": "ocpVersion", + "exported": false, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:25" + }, + { + "name": "resultsFilePath", + "exported": false, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:23" + }, + { + "name": "rootFolderURL", + "exported": false, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:24" + }, + { + "name": "stringToPointer", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:19" + }, + { + "name": "uploadResultSpreadSheetCmd", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/results_spreadsheet.go:30" + } + ], + "consts": [ + { + "name": "ConclusionSheetName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:4" + }, + { + "name": "RawResultsSheetName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:5" + }, + { + "name": "ResultsConclusionsCol", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:12" + }, + { + "name": "SingleWorkloadResultsSheetName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:6" + }, + { + "name": "WorkloadNameConclusionsCol", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:11" + }, + { + "name": "categoryConclusionsCol", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:8" + }, + { + "name": "cellContentLimit", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:21" + }, + { + "name": "conclusionIndividualSingleWorkloadSheetCol", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:19" + }, + { + "name": "nextStepAIIfFailSingleWorkloadSheetCol", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:18" + }, + { + "name": "ocpVersionConclusionsCol", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:10" + }, + { + "name": "operatorVersionRawResultsCol", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:16" + }, + { + "name": "workloadNameRawResultsCol", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:14" + }, + { + "name": "workloadTypeRawResultsCol", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:15" + }, + { + "name": "workloadVersionConclusionsCol", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/upload/results_spreadsheet/const.go:9" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/version", + "name": "version", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions", + "github.com/spf13/cobra" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "NewCommand", + "qualifiedName": "NewCommand", + "exported": true, + "signature": "func()(*cobra.Command)", + "doc": "NewCommand Provides the CLI command for displaying application version\n\nThis function creates and returns a cobra command configured to show the\ncurrent version of the tool when invoked. The command is set up elsewhere in\nthe package, so this function simply exposes that preconfigured command\ninstance for use by the main application.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/version/version.go:37", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite", + "name": "newRootCmd", + "kind": "function", + "source": [ + "func newRootCmd() *cobra.Command {", + "\trootCmd := cobra.Command{", + "\t\tUse: \"certsuite\",", + "\t\tShort: \"A CLI tool for the Red Hat Best Practices Test Suite for Kubernetes.\",", + "\t}", + "", + "\trootCmd.AddCommand(claim.NewCommand())", + "\trootCmd.AddCommand(generate.NewCommand())", + "\trootCmd.AddCommand(check.NewCommand())", + "\trootCmd.AddCommand(run.NewCommand())", + "\trootCmd.AddCommand(info.NewCommand())", + "\trootCmd.AddCommand(version.NewCommand())", + "\trootCmd.AddCommand(upload.NewCommand())", + "", + "\treturn \u0026rootCmd", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCommand() *cobra.Command {", + "\treturn versionCmd", + "}" + ] + }, + { + "name": "showVersion", + "qualifiedName": "showVersion", + "exported": false, + "signature": "func(*cobra.Command, []string)(error)", + "doc": "showVersion Displays the current application and claim file versions\n\nThis function prints out two pieces of information: the version string for\nthe Certsuite binary, which includes release and commit details, and the\nversion number used for claim files. It formats both strings with newline\nseparators and returns nil to indicate successful execution.", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/version/version.go:24", + "calls": [ + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions", + "name": "GitVersion", + "kind": "function", + "source": [ + "func GitVersion() string {", + "\tif GitRelease == \"\" {", + "\t\tGitDisplayRelease = \"Unreleased build post \" + GitPreviousRelease", + "\t} else {", + "\t\tGitDisplayRelease = GitRelease", + "\t}", + "", + "\treturn GitDisplayRelease + \" (\" + GitCommit + \")\"", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func showVersion(cmd *cobra.Command, _ []string) error {", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "", + "\treturn nil", + "}" + ] + } + ], + "globals": [ + { + "name": "versionCmd", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/cmd/certsuite/version/version.go:11" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "cli", + "files": 1, + "imports": [ + "fmt", + "golang.org/x/term", + "os", + "strings", + "time" + ], + "structs": [ + { + "name": "cliCheckLogSniffer", + "exported": false, + "doc": "cliCheckLogSniffer forwards terminal output to a logging channel\n\nThis type implements an io.Writer that captures data written by the CLI when\nrunning in a TTY environment. When Write is called, it attempts to send the\nbyte slice as a string over a dedicated channel; if the channel is not ready\nor closed, the data is silently dropped to avoid blocking execution. In\nnon‑TTY scenarios, all writes are simply acknowledged without any side\neffects.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:77", + "fields": {}, + "methodNames": [ + "Write" + ], + "source": [ + "type cliCheckLogSniffer struct{}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "LineAlignCenter", + "qualifiedName": "LineAlignCenter", + "exported": true, + "signature": "func(string, int)(string)", + "doc": "LineAlignCenter Centers a string within a specified width\n\nThe function takes an input string and a target width, then returns the\nstring padded with spaces so it appears centered when printed. It calculates\npadding by determining how many leading spaces are needed to shift the\noriginal text toward the middle of the given width. The resulting string is\nalways exactly the specified length.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:374", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "printTestCaseInfoBox", + "kind": "function", + "source": [ + "func printTestCaseInfoBox(testCase *claim.TestCaseDescription) {", + "\t// Test case identifier", + "\tborder := strings.Repeat(\"-\", lineMaxWidth+linePadding)", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(testCase.Identifier.Id, lineMaxWidth), cli.Cyan))", + "", + "\t// Description", + "\tborder = strings.Repeat(\"-\", lineMaxWidth+linePadding)", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"DESCRIPTION\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.Description, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Remediation", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"REMEDIATION\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.Remediation, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Exceptions", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"EXCEPTIONS\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.ExceptionProcess, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Best Practices reference", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"BEST PRACTICES REFERENCE\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.BestPracticeReference, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "\tfmt.Println(border)", + "\tfmt.Printf(\"\\n\\n\")", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LineAlignCenter(s string, w int) string {", + "\treturn fmt.Sprintf(\"%[1]*s\", -w, fmt.Sprintf(\"%[1]*s\", (w+len(s))/2, s)) //nolint:mnd // magic number", + "}" + ] + }, + { + "name": "LineAlignLeft", + "qualifiedName": "LineAlignLeft", + "exported": true, + "signature": "func(string, int)(string)", + "doc": "LineAlignLeft left‑justifies a string to a given column width\n\nThe function takes an input string and a desired width, returning the string\npadded with spaces on the right so that its total length equals the specified\nwidth. It uses formatted printing with a negative field width to achieve left\nalignment. If the original string exceeds the requested width, it is returned\nunchanged without truncation.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:363", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "printTestCaseInfoBox", + "kind": "function", + "source": [ + "func printTestCaseInfoBox(testCase *claim.TestCaseDescription) {", + "\t// Test case identifier", + "\tborder := strings.Repeat(\"-\", lineMaxWidth+linePadding)", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(testCase.Identifier.Id, lineMaxWidth), cli.Cyan))", + "", + "\t// Description", + "\tborder = strings.Repeat(\"-\", lineMaxWidth+linePadding)", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"DESCRIPTION\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.Description, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Remediation", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"REMEDIATION\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.Remediation, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Exceptions", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"EXCEPTIONS\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.ExceptionProcess, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Best Practices reference", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"BEST PRACTICES REFERENCE\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.BestPracticeReference, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "\tfmt.Println(border)", + "\tfmt.Printf(\"\\n\\n\")", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LineAlignLeft(s string, w int) string {", + "\treturn fmt.Sprintf(\"%[1]*s\", -w, s)", + "}" + ] + }, + { + "name": "LineColor", + "qualifiedName": "LineColor", + "exported": true, + "signature": "func(string, string)(string)", + "doc": "LineColor Adds ANSI color codes around text\n\nThis function takes a plain string and a color code, prefixes the string with\nthe color escape sequence, appends the reset code, and returns the resulting\ncolored string. It is used to display terminal output in different colors\nwithout altering the original content.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:384", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "printTestCaseInfoBox", + "kind": "function", + "source": [ + "func printTestCaseInfoBox(testCase *claim.TestCaseDescription) {", + "\t// Test case identifier", + "\tborder := strings.Repeat(\"-\", lineMaxWidth+linePadding)", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(testCase.Identifier.Id, lineMaxWidth), cli.Cyan))", + "", + "\t// Description", + "\tborder = strings.Repeat(\"-\", lineMaxWidth+linePadding)", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"DESCRIPTION\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.Description, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Remediation", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"REMEDIATION\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.Remediation, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Exceptions", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"EXCEPTIONS\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.ExceptionProcess, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Best Practices reference", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"BEST PRACTICES REFERENCE\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.BestPracticeReference, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "\tfmt.Println(border)", + "\tfmt.Printf(\"\\n\\n\")", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LineColor(s, color string) string {", + "\treturn color + s + Reset", + "}" + ] + }, + { + "name": "PrintBanner", + "qualifiedName": "PrintBanner", + "exported": true, + "signature": "func()()", + "doc": "PrintBanner Displays a banner at startup\n\nThis function writes the predefined banner string to standard output using\nthe fmt package. It is invoked during application initialization to show\nbranding or version information. No parameters are taken, and it does not\nreturn a value.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:65", + "calls": [ + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Startup", + "kind": "function", + "source": [ + "func Startup() {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Create an evaluator to filter test cases with labels", + "\tif err := checksdb.InitLabelsExprEvaluator(testParams.LabelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(testParams.OutputDir, testParams.LogLevel); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\t// Diagnostic functions will run when no labels are provided.", + "\tif testParams.LabelsFilter == noLabelsFilterExpr {", + "\t\tlog.Warn(\"The Best Practices Test Suite will run in diagnostic mode so no test case will be launched\")", + "\t}", + "", + "\t// Set clientsholder singleton with the filenames from the env vars.", + "\t_ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...)", + "\tLoadChecksDB(testParams.LabelsFilter)", + "", + "\tlog.Info(\"Certsuite Version: %v\", versions.GitVersion())", + "\tlog.Info(\"Claim Format Version: %s\", versions.ClaimFormatVersion)", + "\tlog.Info(\"Labels filter: %v\", testParams.LabelsFilter)", + "\tlog.Info(\"Log level: %s\", strings.ToUpper(testParams.LogLevel))", + "", + "\tcli.PrintBanner()", + "", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "\tfmt.Printf(\"Checks filter: %s\\n\", testParams.LabelsFilter)", + "\tfmt.Printf(\"Output folder: %s\\n\", testParams.OutputDir)", + "\tfmt.Printf(\"Log file: %s (level=%s)\\n\", log.LogFileName, testParams.LogLevel)", + "\tfmt.Printf(\"\\n\")", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PrintBanner() {", + "\tfmt.Print(banner)", + "}" + ] + }, + { + "name": "PrintCheckAborted", + "qualifiedName": "PrintCheckAborted", + "exported": true, + "signature": "func(string, string)()", + "doc": "PrintCheckAborted Notifies the user that a check has been aborted\n\nThis routine stops any ongoing line‑printing goroutine, then outputs a\nformatted message indicating the check’s name and the reason for abortion.\nThe output includes special control codes to clear the current terminal line\nbefore displaying the status tag. No value is returned.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:305", + "calls": [ + { + "name": "stopCheckLineGoroutine", + "kind": "function", + "source": [ + "func stopCheckLineGoroutine() {", + "\tif stopChan == nil {", + "\t\t// This may happen for checks that were skipped if no compliant nor non-compliant objects found.", + "\t\treturn", + "\t}", + "", + "\tstopChan \u003c- true", + "\t// Make this chnanel immediately unavailable.", + "\tstopChan = nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "printCheckResult", + "kind": "function", + "source": [ + "func printCheckResult(check *Check) {", + "\tswitch check.Result {", + "\tcase CheckResultPassed:", + "\t\tcli.PrintCheckPassed(check.ID)", + "\tcase CheckResultFailed:", + "\t\tcli.PrintCheckFailed(check.ID)", + "\tcase CheckResultSkipped:", + "\t\tcli.PrintCheckSkipped(check.ID, check.skipReason)", + "\tcase CheckResultAborted:", + "\t\tcli.PrintCheckAborted(check.ID, check.skipReason)", + "\tcase CheckResultError:", + "\t\tcli.PrintCheckErrored(check.ID)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PrintCheckAborted(checkName, reason string) {", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagAborted + \" ] \" + checkName + \" (\" + reason + \")\\n\")", + "}" + ] + }, + { + "name": "PrintCheckErrored", + "qualifiedName": "PrintCheckErrored", + "exported": true, + "signature": "func(string)()", + "doc": "PrintCheckErrored Stops the progress display and shows an error line\n\nThis routine halts any ongoing check‑line goroutine, clears the current\nterminal line, and prints a formatted message indicating that the specified\ncheck has failed with an error. The output includes a clear line code, an\nerror tag, and the check identifier.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:317", + "calls": [ + { + "name": "stopCheckLineGoroutine", + "kind": "function", + "source": [ + "func stopCheckLineGoroutine() {", + "\tif stopChan == nil {", + "\t\t// This may happen for checks that were skipped if no compliant nor non-compliant objects found.", + "\t\treturn", + "\t}", + "", + "\tstopChan \u003c- true", + "\t// Make this chnanel immediately unavailable.", + "\tstopChan = nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "printCheckResult", + "kind": "function", + "source": [ + "func printCheckResult(check *Check) {", + "\tswitch check.Result {", + "\tcase CheckResultPassed:", + "\t\tcli.PrintCheckPassed(check.ID)", + "\tcase CheckResultFailed:", + "\t\tcli.PrintCheckFailed(check.ID)", + "\tcase CheckResultSkipped:", + "\t\tcli.PrintCheckSkipped(check.ID, check.skipReason)", + "\tcase CheckResultAborted:", + "\t\tcli.PrintCheckAborted(check.ID, check.skipReason)", + "\tcase CheckResultError:", + "\t\tcli.PrintCheckErrored(check.ID)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PrintCheckErrored(checkName string) {", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagError + \" ] \" + checkName + \"\\n\")", + "}" + ] + }, + { + "name": "PrintCheckFailed", + "qualifiedName": "PrintCheckFailed", + "exported": true, + "signature": "func(string)()", + "doc": "PrintCheckFailed Displays a failed check status line\n\nThe function stops the running goroutine that updates the check progress,\nthen prints a formatted message indicating failure for the given check name.\nIt writes the output directly to standard output with escape codes to clear\nthe previous line and show a red \"FAIL\" tag followed by the check identifier.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:293", + "calls": [ + { + "name": "stopCheckLineGoroutine", + "kind": "function", + "source": [ + "func stopCheckLineGoroutine() {", + "\tif stopChan == nil {", + "\t\t// This may happen for checks that were skipped if no compliant nor non-compliant objects found.", + "\t\treturn", + "\t}", + "", + "\tstopChan \u003c- true", + "\t// Make this chnanel immediately unavailable.", + "\tstopChan = nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "printCheckResult", + "kind": "function", + "source": [ + "func printCheckResult(check *Check) {", + "\tswitch check.Result {", + "\tcase CheckResultPassed:", + "\t\tcli.PrintCheckPassed(check.ID)", + "\tcase CheckResultFailed:", + "\t\tcli.PrintCheckFailed(check.ID)", + "\tcase CheckResultSkipped:", + "\t\tcli.PrintCheckSkipped(check.ID, check.skipReason)", + "\tcase CheckResultAborted:", + "\t\tcli.PrintCheckAborted(check.ID, check.skipReason)", + "\tcase CheckResultError:", + "\t\tcli.PrintCheckErrored(check.ID)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PrintCheckFailed(checkName string) {", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagFail + \" ] \" + checkName + \"\\n\")", + "}" + ] + }, + { + "name": "PrintCheckPassed", + "qualifiedName": "PrintCheckPassed", + "exported": true, + "signature": "func(string)()", + "doc": "PrintCheckPassed Shows a passed check with formatted output\n\nThe function stops any active line‑printing goroutine, then writes a clear\nline indicator followed by a pass tag and the provided check name to standard\noutput. It uses predefined constants for formatting and ensures the display\nis updated correctly before returning.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:281", + "calls": [ + { + "name": "stopCheckLineGoroutine", + "kind": "function", + "source": [ + "func stopCheckLineGoroutine() {", + "\tif stopChan == nil {", + "\t\t// This may happen for checks that were skipped if no compliant nor non-compliant objects found.", + "\t\treturn", + "\t}", + "", + "\tstopChan \u003c- true", + "\t// Make this chnanel immediately unavailable.", + "\tstopChan = nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "printCheckResult", + "kind": "function", + "source": [ + "func printCheckResult(check *Check) {", + "\tswitch check.Result {", + "\tcase CheckResultPassed:", + "\t\tcli.PrintCheckPassed(check.ID)", + "\tcase CheckResultFailed:", + "\t\tcli.PrintCheckFailed(check.ID)", + "\tcase CheckResultSkipped:", + "\t\tcli.PrintCheckSkipped(check.ID, check.skipReason)", + "\tcase CheckResultAborted:", + "\t\tcli.PrintCheckAborted(check.ID, check.skipReason)", + "\tcase CheckResultError:", + "\t\tcli.PrintCheckErrored(check.ID)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PrintCheckPassed(checkName string) {", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagPass + \" ] \" + checkName + \"\\n\")", + "}" + ] + }, + { + "name": "PrintCheckRunning", + "qualifiedName": "PrintCheckRunning", + "exported": true, + "signature": "func(string)()", + "doc": "PrintCheckRunning Displays a running check status message\n\nThe function prints an initial line indicating that a specific check is in\nprogress, appending a newline when output is not a terminal to keep the\ndisplay clean. It then starts a background goroutine that updates this line\nevery second with elapsed time and any new log messages until the check\ncompletes and signals the stop channel.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:261", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "isTTY", + "kind": "function", + "source": [ + "func isTTY() bool {", + "\treturn term.IsTerminal(int(os.Stdin.Fd()))", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + }, + { + "name": "updateRunningCheckLine", + "kind": "function", + "source": [ + "func updateRunningCheckLine(checkName string, stopChan \u003c-chan bool) {", + "\tstartTime := time.Now()", + "", + "\t// Local string var to save the last received log line from the running check.", + "\tlastCheckLogLine := \"\"", + "", + "\ttickerPeriod := 1 * time.Second", + "\tif !isTTY() {", + "\t\t// Increase it to avoid flooding the text output.", + "\t\ttickerPeriod = tickerPeriodSeconds * time.Second", + "\t}", + "", + "\ttimer := time.NewTicker(tickerPeriod)", + "\tfor {", + "\t\tselect {", + "\t\tcase \u003c-timer.C:", + "\t\t\tprintRunningCheckLine(checkName, startTime, lastCheckLogLine)", + "\t\tcase newLogLine := \u003c-checkLoggerChan:", + "\t\t\tlastCheckLogLine = newLogLine", + "\t\t\tprintRunningCheckLine(checkName, startTime, lastCheckLogLine)", + "\t\tcase \u003c-stopChan:", + "\t\t\ttimer.Stop()", + "\t\t\treturn", + "\t\t}", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.Run", + "kind": "function", + "source": [ + "func (check *Check) Run() error {", + "\tif check == nil {", + "\t\treturn fmt.Errorf(\"check is a nil pointer\")", + "\t}", + "", + "\tif check.Error != nil {", + "\t\treturn fmt.Errorf(\"unable to run due to a previously existing error: %v\", check.Error)", + "\t}", + "", + "\tcli.PrintCheckRunning(check.ID)", + "", + "\tcheck.StartTime = time.Now()", + "\tdefer func() {", + "\t\tcheck.EndTime = time.Now()", + "\t}()", + "", + "\tcheck.LogInfo(\"Running check (labels: %v)\", check.Labels)", + "\tif check.BeforeCheckFn != nil {", + "\t\tif err := check.BeforeCheckFn(check); err != nil {", + "\t\t\treturn fmt.Errorf(\"check %s failed in before check function: %v\", check.ID, err)", + "\t\t}", + "\t}", + "", + "\tif err := check.CheckFn(check); err != nil {", + "\t\treturn fmt.Errorf(\"check %s failed in check function: %v\", check.ID, err)", + "\t}", + "", + "\tif check.AfterCheckFn != nil {", + "\t\tif err := check.AfterCheckFn(check); err != nil {", + "\t\t\treturn fmt.Errorf(\"check %s failed in after check function: %v\", check.ID, err)", + "\t\t}", + "\t}", + "", + "\tprintCheckResult(check)", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PrintCheckRunning(checkName string) {", + "\tstopChan = make(chan bool)", + "\tcheckLoggerChan = make(chan string)", + "", + "\tline := \"[ \" + CheckResultTagRunning + \" ] \" + checkName", + "\tif !isTTY() {", + "\t\tline += \"\\n\"", + "\t}", + "", + "\tfmt.Print(line)", + "", + "\tgo updateRunningCheckLine(checkName, stopChan)", + "}" + ] + }, + { + "name": "PrintCheckSkipped", + "qualifiedName": "PrintCheckSkipped", + "exported": true, + "signature": "func(string, string)()", + "doc": "PrintCheckSkipped Logs a skipped check with its reason\n\nThis function stops the ongoing check line goroutine, then prints a formatted\nmessage indicating that the specified check was skipped along with the\nprovided reason. The output includes control codes to clear the current\nterminal line and displays the skip tag followed by the check name and\nexplanation. No value is returned.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:246", + "calls": [ + { + "name": "stopCheckLineGoroutine", + "kind": "function", + "source": [ + "func stopCheckLineGoroutine() {", + "\tif stopChan == nil {", + "\t\t// This may happen for checks that were skipped if no compliant nor non-compliant objects found.", + "\t\treturn", + "\t}", + "", + "\tstopChan \u003c- true", + "\t// Make this chnanel immediately unavailable.", + "\tstopChan = nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "printCheckResult", + "kind": "function", + "source": [ + "func printCheckResult(check *Check) {", + "\tswitch check.Result {", + "\tcase CheckResultPassed:", + "\t\tcli.PrintCheckPassed(check.ID)", + "\tcase CheckResultFailed:", + "\t\tcli.PrintCheckFailed(check.ID)", + "\tcase CheckResultSkipped:", + "\t\tcli.PrintCheckSkipped(check.ID, check.skipReason)", + "\tcase CheckResultAborted:", + "\t\tcli.PrintCheckAborted(check.ID, check.skipReason)", + "\tcase CheckResultError:", + "\t\tcli.PrintCheckErrored(check.ID)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PrintCheckSkipped(checkName, reason string) {", + "\t// It shouldn't happen too often, but some checks might be set as skipped inside the checkFn", + "\t// if neither compliant objects nor non-compliant objects were found.", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagSkip + \" ] \" + checkName + \" (\" + reason + \")\\n\")", + "}" + ] + }, + { + "name": "PrintResultsTable", + "qualifiedName": "PrintResultsTable", + "exported": true, + "signature": "func(map[string][]int)()", + "doc": "PrintResultsTable Displays a formatted summary of test suite outcomes\n\nThe function accepts a mapping from group names to integer slices that\nrepresent passed, failed, and skipped counts. It outputs a neatly aligned\ntable with column headers and separators, iterating over each group to show\nits results. After listing all groups, it adds blank lines for readability.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:206", + "calls": [ + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "RunChecks", + "kind": "function", + "source": [ + "func RunChecks(timeout time.Duration) (failedCtr int, err error) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\t// Timeout channel", + "\ttimeOutChan := time.After(timeout)", + "\t// SIGINT(ctrl+c)/SIGTERM capture channel.", + "\tconst SIGINTBufferLen = 10", + "\tsigIntChan := make(chan os.Signal, SIGINTBufferLen)", + "\tsignal.Notify(sigIntChan, syscall.SIGINT, syscall.SIGTERM)", + "\t// turn off ctrl-c capture on exit", + "\tdefer signal.Stop(sigIntChan)", + "", + "\tabort := false", + "\tvar abortReason string", + "\tvar errs []error", + "\tfor _, group := range dbByGroup {", + "\t\tif abort {", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t\tgroup.RecordChecksResults()", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Stop channel, so we can send a stop signal to group.RunChecks()", + "\t\tstopChan := make(chan bool, 1)", + "\t\tabortChan := make(chan string, 1)", + "", + "\t\t// Done channel for the goroutine that runs group.RunChecks().", + "\t\tgroupDone := make(chan bool)", + "\t\tgo func() {", + "\t\t\tchecks, failedCheckCtr := group.RunChecks(stopChan, abortChan)", + "\t\t\tfailedCtr += failedCheckCtr", + "\t\t\terrs = append(errs, checks...)", + "\t\t\tgroupDone \u003c- true", + "\t\t}()", + "", + "\t\tselect {", + "\t\tcase \u003c-groupDone:", + "\t\t\tlog.Debug(\"Group %s finished running checks.\", group.name)", + "\t\tcase abortReason = \u003c-abortChan:", + "\t\t\tlog.Warn(\"Group %s aborted.\", group.name)", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-timeOutChan:", + "\t\t\tlog.Warn(\"Running all checks timed-out.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"global time-out\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-sigIntChan:", + "\t\t\tlog.Warn(\"SIGINT/SIGTERM received.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"SIGINT/SIGTERM\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t}", + "", + "\t\tgroup.RecordChecksResults()", + "\t}", + "", + "\t// Print the results in the CLI", + "\tcli.PrintResultsTable(getResultsSummary())", + "\tprintFailedChecksLog()", + "", + "\tif len(errs) \u003e 0 {", + "\t\tlog.Error(\"RunChecks errors: %v\", errs)", + "\t\treturn 0, fmt.Errorf(\"%d errors found in checks/groups\", len(errs))", + "\t}", + "", + "\treturn failedCtr, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PrintResultsTable(results map[string][]int) {", + "\tfmt.Printf(\"\\n\")", + "\tfmt.Println(\"-----------------------------------------------------------\")", + "\tfmt.Printf(\"| %-27s %-9s %-9s %s |\\n\", \"SUITE\", \"PASSED\", \"FAILED\", \"SKIPPED\")", + "\tfmt.Println(\"-----------------------------------------------------------\")", + "\tfor groupName, groupResults := range results {", + "\t\tfmt.Printf(\"| %-25s %8d %9d %10d |\\n\", groupName,", + "\t\t\tgroupResults[0],", + "\t\t\tgroupResults[1],", + "\t\t\tgroupResults[2])", + "\t\tfmt.Println(\"-----------------------------------------------------------\")", + "\t}", + "\tfmt.Printf(\"\\n\")", + "}" + ] + }, + { + "name": "WrapLines", + "qualifiedName": "WrapLines", + "exported": true, + "signature": "func(string, int)([]string)", + "doc": "WrapLines Breaks a string into lines that fit within a maximum width\n\nThe function splits the input text on newline characters, then examines each\nline to see if it exceeds the specified width. Lines longer than the limit\nare broken into words and reassembled so no resulting line surpasses the\nmaximum length. The wrapped lines are returned as a slice of strings.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:329", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Fields", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "printTestCaseInfoBox", + "kind": "function", + "source": [ + "func printTestCaseInfoBox(testCase *claim.TestCaseDescription) {", + "\t// Test case identifier", + "\tborder := strings.Repeat(\"-\", lineMaxWidth+linePadding)", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(testCase.Identifier.Id, lineMaxWidth), cli.Cyan))", + "", + "\t// Description", + "\tborder = strings.Repeat(\"-\", lineMaxWidth+linePadding)", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"DESCRIPTION\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.Description, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Remediation", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"REMEDIATION\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.Remediation, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Exceptions", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"EXCEPTIONS\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.ExceptionProcess, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "", + "\t// Best Practices reference", + "\tfmt.Println(border)", + "\tfmt.Printf(\"| %s |\\n\", cli.LineColor(cli.LineAlignCenter(\"BEST PRACTICES REFERENCE\", lineMaxWidth), cli.Green))", + "\tfmt.Println(border)", + "\tfor _, line := range cli.WrapLines(testCase.BestPracticeReference, lineMaxWidth) {", + "\t\tfmt.Printf(\"| %s |\\n\", cli.LineAlignLeft(line, lineMaxWidth))", + "\t}", + "\tfmt.Println(border)", + "\tfmt.Printf(\"\\n\\n\")", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func WrapLines(text string, maxWidth int) []string {", + "\tlines := strings.Split(text, \"\\n\")", + "\twrappedLines := make([]string, 0, len(lines))", + "\tfor _, line := range lines {", + "\t\tif len(line) \u003c= maxWidth {", + "\t\t\twrappedLines = append(wrappedLines, line)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Break lines longer than maxWidth", + "\t\twords := strings.Fields(line)", + "\t\tcurrentLine := words[0]", + "\t\tfor _, word := range words[1:] {", + "\t\t\tif len(currentLine)+len(word)+1 \u003c= maxWidth {", + "\t\t\t\tcurrentLine += \" \" + word", + "\t\t\t} else {", + "\t\t\t\twrappedLines = append(wrappedLines, currentLine)", + "\t\t\t\tcurrentLine = word", + "\t\t\t}", + "\t\t}", + "", + "\t\twrappedLines = append(wrappedLines, currentLine)", + "\t}", + "", + "\treturn wrappedLines", + "}" + ] + }, + { + "name": "Write", + "qualifiedName": "cliCheckLogSniffer.Write", + "exported": true, + "receiver": "cliCheckLogSniffer", + "signature": "func([]byte)(int, error)", + "doc": "cliCheckLogSniffer.Write Writes log data to a channel when running in a terminal\n\nWhen the process is attached to a TTY, this method attempts to send the\nprovided byte slice as a string into a dedicated logger channel without\nblocking; if the channel is not ready or closed, the data is silently\ndropped. In non‑TTY environments it simply returns the length of the input\nand no error, effectively discarding output. The function always reports the\nfull number of bytes processed.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:186", + "calls": [ + { + "name": "isTTY", + "kind": "function", + "source": [ + "func isTTY() bool {", + "\treturn term.IsTerminal(int(os.Stdin.Fd()))", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *cliCheckLogSniffer) Write(p []byte) (n int, err error) {", + "\tif !isTTY() {", + "\t\treturn len(p), nil", + "\t}", + "\t// Send to channel, or ignore it in case the channel is not ready or is closed.", + "\t// This way we avoid blocking the whole program.", + "\tselect {", + "\tcase checkLoggerChan \u003c- string(p):", + "\tdefault:", + "\t}", + "", + "\treturn len(p), nil", + "}" + ] + }, + { + "name": "cropLogLine", + "qualifiedName": "cropLogLine", + "exported": false, + "signature": "func(string, int)(string)", + "doc": "cropLogLine Trims a log line to fit terminal width\n\nThe function removes newline characters from the input string and then\ntruncates it if its length exceeds the specified maximum width. It returns\nthe processed string, which is safe to display in a single-line CLI output\nwithout breaking formatting.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:141", + "calls": [ + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "printRunningCheckLine", + "kind": "function", + "source": [ + "func printRunningCheckLine(checkName string, startTime time.Time, logLine string) {", + "\t// Minimum space on the right needed to show the current last log line.", + "\tconst minColsNeededForLogLine = 40", + "", + "\telapsedTime := time.Since(startTime).Round(time.Second)", + "\tline := \"[ \" + CheckResultTagRunning + \" ] \" + checkName + \" (\" + elapsedTime.String() + \")\"", + "\tif !isTTY() {", + "\t\tfmt.Print(line + \"\\n\")", + "\t\treturn", + "\t}", + "", + "\t// Add check's last log line only if the program is running in a tty/ptty.", + "\tmaxAvailableWidth := getTerminalWidth() - len(line) - lineLength", + "\tif logLine != \"\" \u0026\u0026 maxAvailableWidth \u003e minColsNeededForLogLine {", + "\t\t// Append a cropped log line only if it makes sense due to the available space on the right.", + "\t\tline += \" \" + cropLogLine(logLine, maxAvailableWidth)", + "\t}", + "", + "\tfmt.Print(ClearLineCode + line)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func cropLogLine(line string, maxAvailableWidth int) string {", + "\t// Remove line feeds to avoid the log line to break the cli output.", + "\tfilteredLine := strings.ReplaceAll(line, \"\\n\", \" \")", + "\t// Print only the chars that fit in the available space.", + "\tif len(filteredLine) \u003e maxAvailableWidth {", + "\t\treturn filteredLine[:maxAvailableWidth]", + "\t}", + "\treturn filteredLine", + "}" + ] + }, + { + "name": "getTerminalWidth", + "qualifiedName": "getTerminalWidth", + "exported": false, + "signature": "func()(int)", + "doc": "getTerminalWidth Determines the current terminal width in columns\n\nIt calls a system routine to query the size of the standard input device,\nreturning the number of columns available for output. The value is used to\nformat log lines so they fit within the terminal without wrapping or\ntruncating unexpectedly.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:130", + "calls": [ + { + "pkgPath": "golang.org/x/term", + "name": "GetSize", + "kind": "function" + }, + { + "name": "int", + "kind": "function" + }, + { + "name": "Fd", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "printRunningCheckLine", + "kind": "function", + "source": [ + "func printRunningCheckLine(checkName string, startTime time.Time, logLine string) {", + "\t// Minimum space on the right needed to show the current last log line.", + "\tconst minColsNeededForLogLine = 40", + "", + "\telapsedTime := time.Since(startTime).Round(time.Second)", + "\tline := \"[ \" + CheckResultTagRunning + \" ] \" + checkName + \" (\" + elapsedTime.String() + \")\"", + "\tif !isTTY() {", + "\t\tfmt.Print(line + \"\\n\")", + "\t\treturn", + "\t}", + "", + "\t// Add check's last log line only if the program is running in a tty/ptty.", + "\tmaxAvailableWidth := getTerminalWidth() - len(line) - lineLength", + "\tif logLine != \"\" \u0026\u0026 maxAvailableWidth \u003e minColsNeededForLogLine {", + "\t\t// Append a cropped log line only if it makes sense due to the available space on the right.", + "\t\tline += \" \" + cropLogLine(logLine, maxAvailableWidth)", + "\t}", + "", + "\tfmt.Print(ClearLineCode + line)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getTerminalWidth() int {", + "\twidth, _, _ := term.GetSize(int(os.Stdin.Fd()))", + "\treturn width", + "}" + ] + }, + { + "name": "isTTY", + "qualifiedName": "isTTY", + "exported": false, + "signature": "func()(bool)", + "doc": "isTTY determines whether standard input is a terminal\n\nThe function checks if the current process’s stdin corresponds to an\ninteractive terminal device by converting its file descriptor to an integer\nand using the external library’s IsTerminal call. It returns true when\noutput can be formatted for a tty, otherwise false. This value influences how\nlog lines are printed or suppressed in the CLI.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:86", + "calls": [ + { + "pkgPath": "golang.org/x/term", + "name": "IsTerminal", + "kind": "function" + }, + { + "name": "int", + "kind": "function" + }, + { + "name": "Fd", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckRunning", + "kind": "function", + "source": [ + "func PrintCheckRunning(checkName string) {", + "\tstopChan = make(chan bool)", + "\tcheckLoggerChan = make(chan string)", + "", + "\tline := \"[ \" + CheckResultTagRunning + \" ] \" + checkName", + "\tif !isTTY() {", + "\t\tline += \"\\n\"", + "\t}", + "", + "\tfmt.Print(line)", + "", + "\tgo updateRunningCheckLine(checkName, stopChan)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "cliCheckLogSniffer.Write", + "kind": "function", + "source": [ + "func (c *cliCheckLogSniffer) Write(p []byte) (n int, err error) {", + "\tif !isTTY() {", + "\t\treturn len(p), nil", + "\t}", + "\t// Send to channel, or ignore it in case the channel is not ready or is closed.", + "\t// This way we avoid blocking the whole program.", + "\tselect {", + "\tcase checkLoggerChan \u003c- string(p):", + "\tdefault:", + "\t}", + "", + "\treturn len(p), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "printRunningCheckLine", + "kind": "function", + "source": [ + "func printRunningCheckLine(checkName string, startTime time.Time, logLine string) {", + "\t// Minimum space on the right needed to show the current last log line.", + "\tconst minColsNeededForLogLine = 40", + "", + "\telapsedTime := time.Since(startTime).Round(time.Second)", + "\tline := \"[ \" + CheckResultTagRunning + \" ] \" + checkName + \" (\" + elapsedTime.String() + \")\"", + "\tif !isTTY() {", + "\t\tfmt.Print(line + \"\\n\")", + "\t\treturn", + "\t}", + "", + "\t// Add check's last log line only if the program is running in a tty/ptty.", + "\tmaxAvailableWidth := getTerminalWidth() - len(line) - lineLength", + "\tif logLine != \"\" \u0026\u0026 maxAvailableWidth \u003e minColsNeededForLogLine {", + "\t\t// Append a cropped log line only if it makes sense due to the available space on the right.", + "\t\tline += \" \" + cropLogLine(logLine, maxAvailableWidth)", + "\t}", + "", + "\tfmt.Print(ClearLineCode + line)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "updateRunningCheckLine", + "kind": "function", + "source": [ + "func updateRunningCheckLine(checkName string, stopChan \u003c-chan bool) {", + "\tstartTime := time.Now()", + "", + "\t// Local string var to save the last received log line from the running check.", + "\tlastCheckLogLine := \"\"", + "", + "\ttickerPeriod := 1 * time.Second", + "\tif !isTTY() {", + "\t\t// Increase it to avoid flooding the text output.", + "\t\ttickerPeriod = tickerPeriodSeconds * time.Second", + "\t}", + "", + "\ttimer := time.NewTicker(tickerPeriod)", + "\tfor {", + "\t\tselect {", + "\t\tcase \u003c-timer.C:", + "\t\t\tprintRunningCheckLine(checkName, startTime, lastCheckLogLine)", + "\t\tcase newLogLine := \u003c-checkLoggerChan:", + "\t\t\tlastCheckLogLine = newLogLine", + "\t\t\tprintRunningCheckLine(checkName, startTime, lastCheckLogLine)", + "\t\tcase \u003c-stopChan:", + "\t\t\ttimer.Stop()", + "\t\t\treturn", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isTTY() bool {", + "\treturn term.IsTerminal(int(os.Stdin.Fd()))", + "}" + ] + }, + { + "name": "printRunningCheckLine", + "qualifiedName": "printRunningCheckLine", + "exported": false, + "signature": "func(string, time.Time, string)()", + "doc": "printRunningCheckLine Displays the progress of a running check\n\nIt prints a status line that includes the check name, elapsed time since\nstart, and optionally a cropped log message when running in a terminal. If\noutput is not a TTY it simply writes the line with a newline. The function\nclears the current terminal line before printing to keep the display updated.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:157", + "calls": [ + { + "name": "Round", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Since", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "isTTY", + "kind": "function", + "source": [ + "func isTTY() bool {", + "\treturn term.IsTerminal(int(os.Stdin.Fd()))", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + }, + { + "name": "getTerminalWidth", + "kind": "function", + "source": [ + "func getTerminalWidth() int {", + "\twidth, _, _ := term.GetSize(int(os.Stdin.Fd()))", + "\treturn width", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "cropLogLine", + "kind": "function", + "source": [ + "func cropLogLine(line string, maxAvailableWidth int) string {", + "\t// Remove line feeds to avoid the log line to break the cli output.", + "\tfilteredLine := strings.ReplaceAll(line, \"\\n\", \" \")", + "\t// Print only the chars that fit in the available space.", + "\tif len(filteredLine) \u003e maxAvailableWidth {", + "\t\treturn filteredLine[:maxAvailableWidth]", + "\t}", + "\treturn filteredLine", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "updateRunningCheckLine", + "kind": "function", + "source": [ + "func updateRunningCheckLine(checkName string, stopChan \u003c-chan bool) {", + "\tstartTime := time.Now()", + "", + "\t// Local string var to save the last received log line from the running check.", + "\tlastCheckLogLine := \"\"", + "", + "\ttickerPeriod := 1 * time.Second", + "\tif !isTTY() {", + "\t\t// Increase it to avoid flooding the text output.", + "\t\ttickerPeriod = tickerPeriodSeconds * time.Second", + "\t}", + "", + "\ttimer := time.NewTicker(tickerPeriod)", + "\tfor {", + "\t\tselect {", + "\t\tcase \u003c-timer.C:", + "\t\t\tprintRunningCheckLine(checkName, startTime, lastCheckLogLine)", + "\t\tcase newLogLine := \u003c-checkLoggerChan:", + "\t\t\tlastCheckLogLine = newLogLine", + "\t\t\tprintRunningCheckLine(checkName, startTime, lastCheckLogLine)", + "\t\tcase \u003c-stopChan:", + "\t\t\ttimer.Stop()", + "\t\t\treturn", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func printRunningCheckLine(checkName string, startTime time.Time, logLine string) {", + "\t// Minimum space on the right needed to show the current last log line.", + "\tconst minColsNeededForLogLine = 40", + "", + "\telapsedTime := time.Since(startTime).Round(time.Second)", + "\tline := \"[ \" + CheckResultTagRunning + \" ] \" + checkName + \" (\" + elapsedTime.String() + \")\"", + "\tif !isTTY() {", + "\t\tfmt.Print(line + \"\\n\")", + "\t\treturn", + "\t}", + "", + "\t// Add check's last log line only if the program is running in a tty/ptty.", + "\tmaxAvailableWidth := getTerminalWidth() - len(line) - lineLength", + "\tif logLine != \"\" \u0026\u0026 maxAvailableWidth \u003e minColsNeededForLogLine {", + "\t\t// Append a cropped log line only if it makes sense due to the available space on the right.", + "\t\tline += \" \" + cropLogLine(logLine, maxAvailableWidth)", + "\t}", + "", + "\tfmt.Print(ClearLineCode + line)", + "}" + ] + }, + { + "name": "stopCheckLineGoroutine", + "qualifiedName": "stopCheckLineGoroutine", + "exported": false, + "signature": "func()()", + "doc": "stopCheckLineGoroutine Signals the check line goroutine to stop\n\nThis function checks whether a global channel used for signalling is set,\nsends a true value to that channel if it exists, then clears the reference so\nsubsequent calls have no effect. It is called by various print functions when\na check completes or is aborted, ensuring any ongoing line output goroutine\nterminates cleanly.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:228", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckAborted", + "kind": "function", + "source": [ + "func PrintCheckAborted(checkName, reason string) {", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagAborted + \" ] \" + checkName + \" (\" + reason + \")\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckErrored", + "kind": "function", + "source": [ + "func PrintCheckErrored(checkName string) {", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagError + \" ] \" + checkName + \"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckFailed", + "kind": "function", + "source": [ + "func PrintCheckFailed(checkName string) {", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagFail + \" ] \" + checkName + \"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckPassed", + "kind": "function", + "source": [ + "func PrintCheckPassed(checkName string) {", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagPass + \" ] \" + checkName + \"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckSkipped", + "kind": "function", + "source": [ + "func PrintCheckSkipped(checkName, reason string) {", + "\t// It shouldn't happen too often, but some checks might be set as skipped inside the checkFn", + "\t// if neither compliant objects nor non-compliant objects were found.", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagSkip + \" ] \" + checkName + \" (\" + reason + \")\\n\")", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func stopCheckLineGoroutine() {", + "\tif stopChan == nil {", + "\t\t// This may happen for checks that were skipped if no compliant nor non-compliant objects found.", + "\t\treturn", + "\t}", + "", + "\tstopChan \u003c- true", + "\t// Make this chnanel immediately unavailable.", + "\tstopChan = nil", + "}" + ] + }, + { + "name": "updateRunningCheckLine", + "qualifiedName": "updateRunningCheckLine", + "exported": false, + "signature": "func(string, \u003c-chan bool)()", + "doc": "updateRunningCheckLine updates the running check status line with elapsed time and latest log\n\nThis routine starts a ticker that triggers every to refresh the console\noutput for a running test. It listens on a channel for new log messages,\nupdating the displayed line accordingly, and stops when a stop signal is\nreceived. The function prints the check name, elapsed time, and optionally a\ncropped latest log if terminal width permits.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:97", + "calls": [ + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "name": "isTTY", + "kind": "function", + "source": [ + "func isTTY() bool {", + "\treturn term.IsTerminal(int(os.Stdin.Fd()))", + "}" + ] + }, + { + "pkgPath": "time", + "name": "NewTicker", + "kind": "function" + }, + { + "name": "printRunningCheckLine", + "kind": "function", + "source": [ + "func printRunningCheckLine(checkName string, startTime time.Time, logLine string) {", + "\t// Minimum space on the right needed to show the current last log line.", + "\tconst minColsNeededForLogLine = 40", + "", + "\telapsedTime := time.Since(startTime).Round(time.Second)", + "\tline := \"[ \" + CheckResultTagRunning + \" ] \" + checkName + \" (\" + elapsedTime.String() + \")\"", + "\tif !isTTY() {", + "\t\tfmt.Print(line + \"\\n\")", + "\t\treturn", + "\t}", + "", + "\t// Add check's last log line only if the program is running in a tty/ptty.", + "\tmaxAvailableWidth := getTerminalWidth() - len(line) - lineLength", + "\tif logLine != \"\" \u0026\u0026 maxAvailableWidth \u003e minColsNeededForLogLine {", + "\t\t// Append a cropped log line only if it makes sense due to the available space on the right.", + "\t\tline += \" \" + cropLogLine(logLine, maxAvailableWidth)", + "\t}", + "", + "\tfmt.Print(ClearLineCode + line)", + "}" + ] + }, + { + "name": "printRunningCheckLine", + "kind": "function", + "source": [ + "func printRunningCheckLine(checkName string, startTime time.Time, logLine string) {", + "\t// Minimum space on the right needed to show the current last log line.", + "\tconst minColsNeededForLogLine = 40", + "", + "\telapsedTime := time.Since(startTime).Round(time.Second)", + "\tline := \"[ \" + CheckResultTagRunning + \" ] \" + checkName + \" (\" + elapsedTime.String() + \")\"", + "\tif !isTTY() {", + "\t\tfmt.Print(line + \"\\n\")", + "\t\treturn", + "\t}", + "", + "\t// Add check's last log line only if the program is running in a tty/ptty.", + "\tmaxAvailableWidth := getTerminalWidth() - len(line) - lineLength", + "\tif logLine != \"\" \u0026\u0026 maxAvailableWidth \u003e minColsNeededForLogLine {", + "\t\t// Append a cropped log line only if it makes sense due to the available space on the right.", + "\t\tline += \" \" + cropLogLine(logLine, maxAvailableWidth)", + "\t}", + "", + "\tfmt.Print(ClearLineCode + line)", + "}" + ] + }, + { + "name": "Stop", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckRunning", + "kind": "function", + "source": [ + "func PrintCheckRunning(checkName string) {", + "\tstopChan = make(chan bool)", + "\tcheckLoggerChan = make(chan string)", + "", + "\tline := \"[ \" + CheckResultTagRunning + \" ] \" + checkName", + "\tif !isTTY() {", + "\t\tline += \"\\n\"", + "\t}", + "", + "\tfmt.Print(line)", + "", + "\tgo updateRunningCheckLine(checkName, stopChan)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func updateRunningCheckLine(checkName string, stopChan \u003c-chan bool) {", + "\tstartTime := time.Now()", + "", + "\t// Local string var to save the last received log line from the running check.", + "\tlastCheckLogLine := \"\"", + "", + "\ttickerPeriod := 1 * time.Second", + "\tif !isTTY() {", + "\t\t// Increase it to avoid flooding the text output.", + "\t\ttickerPeriod = tickerPeriodSeconds * time.Second", + "\t}", + "", + "\ttimer := time.NewTicker(tickerPeriod)", + "\tfor {", + "\t\tselect {", + "\t\tcase \u003c-timer.C:", + "\t\t\tprintRunningCheckLine(checkName, startTime, lastCheckLogLine)", + "\t\tcase newLogLine := \u003c-checkLoggerChan:", + "\t\t\tlastCheckLogLine = newLogLine", + "\t\t\tprintRunningCheckLine(checkName, startTime, lastCheckLogLine)", + "\t\tcase \u003c-stopChan:", + "\t\t\ttimer.Stop()", + "\t\t\treturn", + "\t\t}", + "\t}", + "}" + ] + } + ], + "globals": [ + { + "name": "CliCheckLogSniffer", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:52" + }, + { + "name": "checkLoggerChan", + "exported": false, + "type": "chan string", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:55" + }, + { + "name": "stopChan", + "exported": false, + "type": "chan bool", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:56" + } + ], + "consts": [ + { + "name": "Blue", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:17" + }, + { + "name": "CheckResultTagAborted", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:45" + }, + { + "name": "CheckResultTagError", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:46" + }, + { + "name": "CheckResultTagFail", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:42" + }, + { + "name": "CheckResultTagPass", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:41" + }, + { + "name": "CheckResultTagRunning", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:44" + }, + { + "name": "CheckResultTagSkip", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:43" + }, + { + "name": "ClearLineCode", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:23" + }, + { + "name": "Cyan", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:19" + }, + { + "name": "Gray", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:20" + }, + { + "name": "Green", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:15" + }, + { + "name": "Purple", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:18" + }, + { + "name": "Red", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:14" + }, + { + "name": "Reset", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:13" + }, + { + "name": "White", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:21" + }, + { + "name": "Yellow", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:16" + }, + { + "name": "banner", + "exported": false, + "doc": "ASCII art generated on http://www.patorjk.com/software/taag/ with\nthe font Standard by Glenn Chappell \u0026 Ian Chai 3/93.", + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:28" + }, + { + "name": "lineLength", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:49" + }, + { + "name": "tickerPeriodSeconds", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/internal/cli/cli.go:48" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "clientsholder", + "files": 3, + "imports": [ + "bytes", + "context", + "errors", + "fmt", + "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1", + "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned", + "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake", + "github.com/openshift/client-go/apiserver/clientset/versioned", + "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1", + "github.com/openshift/client-go/machineconfiguration/clientset/versioned", + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned", + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/fake", + "github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/client/clientset/versioned/typed/operators/v1", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "k8s.io/api/apps/v1", + "k8s.io/api/autoscaling/v1", + "k8s.io/api/core/v1", + "k8s.io/api/policy/v1", + "k8s.io/api/rbac/v1", + "k8s.io/api/storage/v1", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/client-go/discovery", + "k8s.io/client-go/dynamic", + "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/fake", + "k8s.io/client-go/kubernetes/typed/networking/v1", + "k8s.io/client-go/rest", + "k8s.io/client-go/restmapper", + "k8s.io/client-go/scale", + "k8s.io/client-go/tools/clientcmd", + "k8s.io/client-go/tools/clientcmd/api", + "k8s.io/client-go/tools/remotecommand", + "k8s.io/kubectl/pkg/scheme", + "strings", + "sync", + "time" + ], + "structs": [ + { + "name": "ClientsHolder", + "exported": true, + "doc": "ClientsHolder Holds configured Kubernetes API clients for cluster interaction\n\nThis structure aggregates multiple client interfaces, including core,\ndynamic, extension, networking, and OLM clients, along with configuration\ndata such as the REST config and kubeconfig bytes. It provides a single point\nfrom which tests or utilities can execute commands inside pods, query\nresources, or manipulate cluster objects. The ready flag indicates whether\nthe holder has been fully initialized.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:71", + "fields": { + "APIExtClient": "apiextv1.Interface", + "ApiserverClient": "apiserverscheme.Interface", + "CNCFNetworkingClient": "cncfNetworkAttachmentv1.Interface", + "DiscoveryClient": "discovery.DiscoveryInterface", + "DynamicClient": "dynamic.Interface", + "GroupResources": "[]*metav1.APIResourceList", + "K8sClient": "kubernetes.Interface", + "K8sNetworkingClient": "networkingv1.NetworkingV1Interface", + "KubeConfig": "[]byte", + "MachineCfg": "ocpMachine.Interface", + "OcpClient": "clientconfigv1.ConfigV1Interface", + "OlmClient": "olmClient.Interface", + "OlmPkgClient": "olmpkgclient.PackagesV1Interface", + "RestConfig": "*rest.Config", + "ScalingClient": "scale.ScalesGetter", + "ready": "bool" + }, + "methodNames": [ + "ExecCommandContainer" + ], + "source": [ + "type ClientsHolder struct {", + "\tRestConfig *rest.Config", + "\tDynamicClient dynamic.Interface", + "\tScalingClient scale.ScalesGetter", + "\tAPIExtClient apiextv1.Interface", + "\tOlmClient olmClient.Interface", + "\tOlmPkgClient olmpkgclient.PackagesV1Interface", + "\tOcpClient clientconfigv1.ConfigV1Interface", + "\tK8sClient kubernetes.Interface", + "\tK8sNetworkingClient networkingv1.NetworkingV1Interface", + "\tCNCFNetworkingClient cncfNetworkAttachmentv1.Interface", + "\tDiscoveryClient discovery.DiscoveryInterface", + "\tMachineCfg ocpMachine.Interface", + "\tKubeConfig []byte", + "\tready bool", + "\tGroupResources []*metav1.APIResourceList", + "\tApiserverClient apiserverscheme.Interface", + "}" + ] + }, + { + "name": "CommandMock", + "exported": true, + "doc": "CommandMock Provides a mock implementation of Command for testing\n\nThe struct holds a function field that replaces the real ExecCommandContainer\nmethod, allowing tests to supply custom behavior. It records each call with\nits context and string arguments in an internal slice protected by a\nread‑write mutex. A helper returns the recorded calls for assertions.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/command_moq.go:20", + "fields": { + "ExecCommandContainerFunc": "func(context Context, s string) (string, string, error)", + "calls": "struct{ExecCommandContainer []struct{Context Context; S string}}", + "lockExecCommandContainer": "sync.RWMutex" + }, + "methodNames": [ + "ExecCommandContainer", + "ExecCommandContainerCalls" + ], + "source": [ + "type CommandMock struct {", + "\t// ExecCommandContainerFunc mocks the ExecCommandContainer method.", + "\tExecCommandContainerFunc func(context Context, s string) (string, string, error)", + "", + "\t// calls tracks calls to the methods.", + "\tcalls struct {", + "\t\t// ExecCommandContainer holds details about calls to the ExecCommandContainer method.", + "\t\tExecCommandContainer []struct {", + "\t\t\t// Context is the context argument value.", + "\t\t\tContext Context", + "\t\t\t// S is the s argument value.", + "\t\t\tS string", + "\t\t}", + "\t}", + "\tlockExecCommandContainer sync.RWMutex", + "}" + ] + }, + { + "name": "Context", + "exported": true, + "doc": "Context Represents a target container within a pod\n\nThis structure holds the namespace, pod name, and container name used when\nexecuting commands inside Kubernetes pods. It provides accessor methods to\nretrieve each field value. The context is typically created with NewContext\nand passed to command execution functions.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:457", + "fields": { + "containerName": "string", + "namespace": "string", + "podName": "string" + }, + "methodNames": [ + "GetContainerName", + "GetNamespace", + "GetPodName" + ], + "source": [ + "type Context struct {", + "\tnamespace string", + "\tpodName string", + "\tcontainerName string", + "}" + ] + } + ], + "interfaces": [ + { + "name": "Command", + "exported": true, + "doc": "Command Executes a command inside a container\n\nThis method runs the given shell command within a specified container context\nand captures both its standard output and error streams. It returns the\ncaptured stdout, stderr, and an error value that indicates whether the\nexecution succeeded or failed.\n\ngo:generate moq -out command_moq.go . Command", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/command.go:38", + "methods": [ + "ExecCommandContainer" + ], + "source": [ + "type Command interface {", + "\tExecCommandContainer(Context, string) (string, string, error)", + "}" + ] + } + ], + "functions": [ + { + "name": "ClearTestClientsHolder", + "qualifiedName": "ClearTestClientsHolder", + "exported": true, + "signature": "func()()", + "doc": "ClearTestClientsHolder Resets the Kubernetes client and marks holder as not ready\n\nThis function clears the stored Kubernetes client reference, setting it to\nnil, and updates an internal flag to indicate that the holder is no longer\nready for use. It does not return a value and has no parameters. After\ncalling this, any attempt to access the client will need reinitialization.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:215", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ClearTestClientsHolder() {", + "\tclientsHolder.K8sClient = nil", + "\tclientsHolder.ready = false", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "qualifiedName": "ClientsHolder.ExecCommandContainer", + "exported": true, + "receiver": "ClientsHolder", + "signature": "func(Context, string)(string, error)", + "doc": "ClientsHolder.ExecCommandContainer Runs a shell command inside a specific pod container\n\nThe function builds an exec request to the Kubernetes API, targeting the\nnamespace, pod, and container provided by the context. It streams the command\noutput into buffers, returning both standard output and error as strings\nalong with any execution error. Logging is performed for debugging and error\ntracing.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/command.go:49", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Context.GetNamespace", + "kind": "function", + "source": [ + "func (c *Context) GetNamespace() string {", + "\treturn c.namespace", + "}" + ] + }, + { + "name": "Context.GetPodName", + "kind": "function", + "source": [ + "func (c *Context) GetPodName() string {", + "\treturn c.podName", + "}" + ] + }, + { + "name": "Context.GetContainerName", + "kind": "function", + "source": [ + "func (c *Context) GetContainerName() string {", + "\treturn c.containerName", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "name": "VersionedParams", + "kind": "function" + }, + { + "name": "SubResource", + "kind": "function" + }, + { + "name": "Name", + "kind": "function" + }, + { + "name": "Resource", + "kind": "function" + }, + { + "name": "Namespace", + "kind": "function" + }, + { + "name": "Post", + "kind": "function" + }, + { + "name": "RESTClient", + "kind": "function" + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "name": "Context.GetNamespace", + "kind": "function", + "source": [ + "func (c *Context) GetNamespace() string {", + "\treturn c.namespace", + "}" + ] + }, + { + "name": "Context.GetPodName", + "kind": "function", + "source": [ + "func (c *Context) GetPodName() string {", + "\treturn c.podName", + "}" + ] + }, + { + "name": "Context.GetContainerName", + "kind": "function", + "source": [ + "func (c *Context) GetContainerName() string {", + "\treturn c.containerName", + "}" + ] + }, + { + "pkgPath": "k8s.io/client-go/tools/remotecommand", + "name": "NewSPDYExecutor", + "kind": "function" + }, + { + "name": "URL", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "StreamWithContext", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "URL", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (clientsholder *ClientsHolder) ExecCommandContainer(", + "\tctx Context, command string) (stdout, stderr string, err error) {", + "\tcommandStr := []string{\"sh\", \"-c\", command}", + "\tvar buffOut bytes.Buffer", + "\tvar buffErr bytes.Buffer", + "", + "\tlog.Debug(\"execute command on ns=%s, pod=%s container=%s, cmd: %s\", ctx.GetNamespace(), ctx.GetPodName(), ctx.GetContainerName(), strings.Join(commandStr, \" \"))", + "\treq := clientsholder.K8sClient.CoreV1().RESTClient().", + "\t\tPost().", + "\t\tNamespace(ctx.GetNamespace()).", + "\t\tResource(\"pods\").", + "\t\tName(ctx.GetPodName()).", + "\t\tSubResource(\"exec\").", + "\t\tVersionedParams(\u0026corev1.PodExecOptions{", + "\t\t\tContainer: ctx.GetContainerName(),", + "\t\t\tCommand: commandStr,", + "\t\t\tStdin: false,", + "\t\t\tStdout: true,", + "\t\t\tStderr: true,", + "\t\t\tTTY: false,", + "\t\t}, scheme.ParameterCodec)", + "", + "\texec, err := remotecommand.NewSPDYExecutor(clientsholder.RestConfig, \"POST\", req.URL())", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\treturn stdout, stderr, err", + "\t}", + "\terr = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{", + "\t\tStdout: \u0026buffOut,", + "\t\tStderr: \u0026buffErr,", + "\t})", + "\tstdout, stderr = buffOut.String(), buffErr.String()", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\tlog.Error(\"%v\", req.URL())", + "\t\tlog.Error(\"command: %s\", command)", + "\t\tlog.Error(\"stderr: %s\", stderr)", + "\t\tlog.Error(\"stdout: %s\", stdout)", + "\t\treturn stdout, stderr, err", + "\t}", + "\treturn stdout, stderr, err", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "qualifiedName": "CommandMock.ExecCommandContainer", + "exported": true, + "receiver": "CommandMock", + "signature": "func(Context, string)(string, string, error)", + "doc": "CommandMock.ExecCommandContainer invokes a user-defined function to execute container commands\n\nThis method records the call arguments, ensures thread safety with locks, and\nthen delegates execution to the mock's ExecCommandContainerFunc. If no\nimplementation is provided it panics to signal misuse. The return values are\nthe stdout, stderr output strings and an error from the underlying function.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/command_moq.go:43", + "calls": [ + { + "name": "panic", + "kind": "function" + }, + { + "name": "Lock", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "Unlock", + "kind": "function" + }, + { + "name": "ExecCommandContainerFunc", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (mock *CommandMock) ExecCommandContainer(context Context, s string) (string, string, error) {", + "\tif mock.ExecCommandContainerFunc == nil {", + "\t\tpanic(\"CommandMock.ExecCommandContainerFunc: method is nil but Command.ExecCommandContainer was just called\")", + "\t}", + "\tcallInfo := struct {", + "\t\tContext Context", + "\t\tS string", + "\t}{", + "\t\tContext: context,", + "\t\tS: s,", + "\t}", + "\tmock.lockExecCommandContainer.Lock()", + "\tmock.calls.ExecCommandContainer = append(mock.calls.ExecCommandContainer, callInfo)", + "\tmock.lockExecCommandContainer.Unlock()", + "\treturn mock.ExecCommandContainerFunc(context, s)", + "}" + ] + }, + { + "name": "ExecCommandContainerCalls", + "qualifiedName": "CommandMock.ExecCommandContainerCalls", + "exported": true, + "receiver": "CommandMock", + "signature": "func()([]struct{Context Context; S string})", + "doc": "CommandMock.ExecCommandContainerCalls retrieves every ExecCommandContainer call that has been logged\n\nThis method gathers all the calls made to ExecCommandContainer into a slice\nof structures containing the execution context and the string argument used.\nIt acquires a read lock on the internal mutex to safely access the stored\ncalls, then releases the lock before returning the slice. The result allows\ncallers to inspect or assert how many times and with what parameters\nExecCommandContainer was invoked.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/command_moq.go:68", + "calls": [ + { + "name": "RLock", + "kind": "function" + }, + { + "name": "RUnlock", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (mock *CommandMock) ExecCommandContainerCalls() []struct {", + "\tContext Context", + "\tS string", + "} {", + "\tvar calls []struct {", + "\t\tContext Context", + "\t\tS string", + "\t}", + "\tmock.lockExecCommandContainer.RLock()", + "\tcalls = mock.calls.ExecCommandContainer", + "\tmock.lockExecCommandContainer.RUnlock()", + "\treturn calls", + "}" + ] + }, + { + "name": "GetContainerName", + "qualifiedName": "Context.GetContainerName", + "exported": true, + "receiver": "Context", + "signature": "func()(string)", + "doc": "Context.GetContainerName Returns the current pod's container name\n\nThis method retrieves the container name stored in the Context object. It\naccesses an internal field that holds the name of the container to which\ncommands will be executed or operations will target. The returned string is\nused by other components when interacting with Kubernetes pods.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:502", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "ClientsHolder.ExecCommandContainer", + "kind": "function", + "source": [ + "func (clientsholder *ClientsHolder) ExecCommandContainer(", + "\tctx Context, command string) (stdout, stderr string, err error) {", + "\tcommandStr := []string{\"sh\", \"-c\", command}", + "\tvar buffOut bytes.Buffer", + "\tvar buffErr bytes.Buffer", + "", + "\tlog.Debug(\"execute command on ns=%s, pod=%s container=%s, cmd: %s\", ctx.GetNamespace(), ctx.GetPodName(), ctx.GetContainerName(), strings.Join(commandStr, \" \"))", + "\treq := clientsholder.K8sClient.CoreV1().RESTClient().", + "\t\tPost().", + "\t\tNamespace(ctx.GetNamespace()).", + "\t\tResource(\"pods\").", + "\t\tName(ctx.GetPodName()).", + "\t\tSubResource(\"exec\").", + "\t\tVersionedParams(\u0026corev1.PodExecOptions{", + "\t\t\tContainer: ctx.GetContainerName(),", + "\t\t\tCommand: commandStr,", + "\t\t\tStdin: false,", + "\t\t\tStdout: true,", + "\t\t\tStderr: true,", + "\t\t\tTTY: false,", + "\t\t}, scheme.ParameterCodec)", + "", + "\texec, err := remotecommand.NewSPDYExecutor(clientsholder.RestConfig, \"POST\", req.URL())", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\treturn stdout, stderr, err", + "\t}", + "\terr = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{", + "\t\tStdout: \u0026buffOut,", + "\t\tStderr: \u0026buffErr,", + "\t})", + "\tstdout, stderr = buffOut.String(), buffErr.String()", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\tlog.Error(\"%v\", req.URL())", + "\t\tlog.Error(\"command: %s\", command)", + "\t\tlog.Error(\"stderr: %s\", stderr)", + "\t\tlog.Error(\"stdout: %s\", stdout)", + "\t\treturn stdout, stderr, err", + "\t}", + "\treturn stdout, stderr, err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Context) GetContainerName() string {", + "\treturn c.containerName", + "}" + ] + }, + { + "name": "GetNamespace", + "qualifiedName": "Context.GetNamespace", + "exported": true, + "receiver": "Context", + "signature": "func()(string)", + "doc": "Context.GetNamespace retrieves the namespace from the context\n\nThis method accesses the internal namespace field of a Context instance and\nreturns it as a string. It does not modify any state or perform additional\nlogic, simply exposing the value stored during context creation.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:483", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "ClientsHolder.ExecCommandContainer", + "kind": "function", + "source": [ + "func (clientsholder *ClientsHolder) ExecCommandContainer(", + "\tctx Context, command string) (stdout, stderr string, err error) {", + "\tcommandStr := []string{\"sh\", \"-c\", command}", + "\tvar buffOut bytes.Buffer", + "\tvar buffErr bytes.Buffer", + "", + "\tlog.Debug(\"execute command on ns=%s, pod=%s container=%s, cmd: %s\", ctx.GetNamespace(), ctx.GetPodName(), ctx.GetContainerName(), strings.Join(commandStr, \" \"))", + "\treq := clientsholder.K8sClient.CoreV1().RESTClient().", + "\t\tPost().", + "\t\tNamespace(ctx.GetNamespace()).", + "\t\tResource(\"pods\").", + "\t\tName(ctx.GetPodName()).", + "\t\tSubResource(\"exec\").", + "\t\tVersionedParams(\u0026corev1.PodExecOptions{", + "\t\t\tContainer: ctx.GetContainerName(),", + "\t\t\tCommand: commandStr,", + "\t\t\tStdin: false,", + "\t\t\tStdout: true,", + "\t\t\tStderr: true,", + "\t\t\tTTY: false,", + "\t\t}, scheme.ParameterCodec)", + "", + "\texec, err := remotecommand.NewSPDYExecutor(clientsholder.RestConfig, \"POST\", req.URL())", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\treturn stdout, stderr, err", + "\t}", + "\terr = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{", + "\t\tStdout: \u0026buffOut,", + "\t\tStderr: \u0026buffErr,", + "\t})", + "\tstdout, stderr = buffOut.String(), buffErr.String()", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\tlog.Error(\"%v\", req.URL())", + "\t\tlog.Error(\"command: %s\", command)", + "\t\tlog.Error(\"stderr: %s\", stderr)", + "\t\tlog.Error(\"stdout: %s\", stdout)", + "\t\treturn stdout, stderr, err", + "\t}", + "\treturn stdout, stderr, err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Context) GetNamespace() string {", + "\treturn c.namespace", + "}" + ] + }, + { + "name": "GetPodName", + "qualifiedName": "Context.GetPodName", + "exported": true, + "receiver": "Context", + "signature": "func()(string)", + "doc": "Context.GetPodName returns the pod name stored in the context\n\nThis method retrieves and returns the podName field from a Context instance.\nIt takes no arguments and always yields a string representing the current pod\nidentifier used for Kubernetes API calls.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:492", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "ClientsHolder.ExecCommandContainer", + "kind": "function", + "source": [ + "func (clientsholder *ClientsHolder) ExecCommandContainer(", + "\tctx Context, command string) (stdout, stderr string, err error) {", + "\tcommandStr := []string{\"sh\", \"-c\", command}", + "\tvar buffOut bytes.Buffer", + "\tvar buffErr bytes.Buffer", + "", + "\tlog.Debug(\"execute command on ns=%s, pod=%s container=%s, cmd: %s\", ctx.GetNamespace(), ctx.GetPodName(), ctx.GetContainerName(), strings.Join(commandStr, \" \"))", + "\treq := clientsholder.K8sClient.CoreV1().RESTClient().", + "\t\tPost().", + "\t\tNamespace(ctx.GetNamespace()).", + "\t\tResource(\"pods\").", + "\t\tName(ctx.GetPodName()).", + "\t\tSubResource(\"exec\").", + "\t\tVersionedParams(\u0026corev1.PodExecOptions{", + "\t\t\tContainer: ctx.GetContainerName(),", + "\t\t\tCommand: commandStr,", + "\t\t\tStdin: false,", + "\t\t\tStdout: true,", + "\t\t\tStderr: true,", + "\t\t\tTTY: false,", + "\t\t}, scheme.ParameterCodec)", + "", + "\texec, err := remotecommand.NewSPDYExecutor(clientsholder.RestConfig, \"POST\", req.URL())", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\treturn stdout, stderr, err", + "\t}", + "\terr = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{", + "\t\tStdout: \u0026buffOut,", + "\t\tStderr: \u0026buffErr,", + "\t})", + "\tstdout, stderr = buffOut.String(), buffErr.String()", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\tlog.Error(\"%v\", req.URL())", + "\t\tlog.Error(\"command: %s\", command)", + "\t\tlog.Error(\"stderr: %s\", stderr)", + "\t\tlog.Error(\"stdout: %s\", stdout)", + "\t\treturn stdout, stderr, err", + "\t}", + "\treturn stdout, stderr, err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Context) GetPodName() string {", + "\treturn c.podName", + "}" + ] + }, + { + "name": "GetClientConfigFromRestConfig", + "qualifiedName": "GetClientConfigFromRestConfig", + "exported": true, + "signature": "func(*rest.Config)(*clientcmdapi.Config)", + "doc": "GetClientConfigFromRestConfig Creates a kubeconfig configuration from a REST client\n\nIt accepts a Kubernetes rest.Config pointer and builds an equivalent\nclientcmdapi.Config structure containing cluster, context, and authentication\ninformation. The resulting config includes the server URL, certificate\nauthority path, bearer token, and sets a default cluster and context for use\nby other components.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:276", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "getClusterRestConfig", + "kind": "function", + "source": [ + "func getClusterRestConfig(filenames ...string) (*rest.Config, error) {", + "\trestConfig, err := rest.InClusterConfig()", + "\tif err == nil {", + "\t\tlog.Info(\"CNF Cert Suite is running inside a cluster.\")", + "", + "\t\t// Convert restConfig to clientcmdapi.Config so we can get the kubeconfig \"file\" bytes", + "\t\t// needed by preflight's operator checks.", + "\t\tclientConfig := GetClientConfigFromRestConfig(restConfig)", + "\t\tclientsHolder.KubeConfig, err = createByteArrayKubeConfig(clientConfig)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to create byte array from kube config reference: %v\", err)", + "\t\t}", + "", + "\t\t// No error: we're inside a cluster.", + "\t\treturn restConfig, nil", + "\t}", + "", + "\tlog.Info(\"Running outside a cluster. Parsing kubeconfig file/s %+v\", filenames)", + "\tif len(filenames) == 0 {", + "\t\treturn nil, errors.New(\"no kubeconfig files set\")", + "\t}", + "", + "\t// Get the rest.Config from the kubeconfig file/s.", + "\tprecedence := []string{}", + "\tprecedence = append(precedence, filenames...)", + "", + "\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()", + "\tloadingRules.Precedence = precedence", + "", + "\tkubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(", + "\t\tloadingRules,", + "\t\t\u0026clientcmd.ConfigOverrides{},", + "\t)", + "", + "\t// Save merged config to temporary kubeconfig file.", + "\tkubeRawConfig, err := kubeconfig.RawConfig()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get kube raw config: %w\", err)", + "\t}", + "", + "\tclientsHolder.KubeConfig, err = createByteArrayKubeConfig(\u0026kubeRawConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to byte array kube config reference: %w\", err)", + "\t}", + "", + "\trestConfig, err = kubeconfig.ClientConfig()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate rest config: %s\", err)", + "\t}", + "", + "\treturn restConfig, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetClientConfigFromRestConfig(restConfig *rest.Config) *clientcmdapi.Config {", + "\treturn \u0026clientcmdapi.Config{", + "\t\tKind: \"Config\",", + "\t\tAPIVersion: \"v1\",", + "\t\tClusters: map[string]*clientcmdapi.Cluster{", + "\t\t\t\"default-cluster\": {", + "\t\t\t\tServer: restConfig.Host,", + "\t\t\t\tCertificateAuthority: restConfig.CAFile,", + "\t\t\t},", + "\t\t},", + "\t\tContexts: map[string]*clientcmdapi.Context{", + "\t\t\t\"default-context\": {", + "\t\t\t\tCluster: \"default-cluster\",", + "\t\t\t\tAuthInfo: \"default-user\",", + "\t\t\t},", + "\t\t},", + "\t\tCurrentContext: \"default-context\",", + "\t\tAuthInfos: map[string]*clientcmdapi.AuthInfo{", + "\t\t\t\"default-user\": {", + "\t\t\t\tToken: restConfig.BearerToken,", + "\t\t\t},", + "\t\t},", + "\t}", + "}" + ] + }, + { + "name": "GetClientsHolder", + "qualifiedName": "GetClientsHolder", + "exported": true, + "signature": "func(...string)(*ClientsHolder)", + "doc": "GetClientsHolder Returns a cached instance of the Kubernetes clients holder\n\nThis function checks whether the global ClientsHolder has already been\ninitialized and ready; if so, it returns that instance immediately. If not,\nit attempts to create a new holder by calling the internal constructor with\nany provided configuration filenames. Errors during creation are logged as\nfatal, terminating the program. The resulting holder is returned for use by\nother parts of the application.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:228", + "calls": [ + { + "name": "newClientsHolder", + "kind": "function", + "source": [ + "func newClientsHolder(filenames ...string) (*ClientsHolder, error) { //nolint:funlen // this is a special function with lots of assignments", + "\tlog.Info(\"Creating k8s go-clients holder.\")", + "", + "\tvar err error", + "\tclientsHolder.RestConfig, err = getClusterRestConfig(filenames...)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get rest.Config: %v\", err)", + "\t}", + "\tclientsHolder.RestConfig.Timeout = DefaultTimeout", + "", + "\tclientsHolder.DynamicClient, err = dynamic.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate dynamic client (unstructured/dynamic): %s\", err)", + "\t}", + "\tclientsHolder.APIExtClient, err = apiextv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate apiextv1: %s\", err)", + "\t}", + "\tclientsHolder.OlmClient, err = olmClient.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate olm clientset: %s\", err)", + "\t}", + "\tclientsHolder.OlmPkgClient, err = olmpkgclient.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate olm clientset: %s\", err)", + "\t}", + "\tclientsHolder.K8sClient, err = kubernetes.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate k8sclient: %s\", err)", + "\t}", + "\t// create the oc client", + "\tclientsHolder.OcpClient, err = clientconfigv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate ocClient: %s\", err)", + "\t}", + "\tclientsHolder.MachineCfg, err = ocpMachine.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate MachineCfg client: %s\", err)", + "\t}", + "\tclientsHolder.K8sNetworkingClient, err = networkingv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate k8s networking client: %s\", err)", + "\t}", + "", + "\tdiscoveryClient, err := discovery.NewDiscoveryClientForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate discoveryClient: %s\", err)", + "\t}", + "", + "\tclientsHolder.GroupResources, err = discoveryClient.ServerPreferredResources()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot get list of resources in cluster: %s\", err)", + "\t}", + "", + "\tresolver := scale.NewDiscoveryScaleKindResolver(discoveryClient)", + "\tgr, err := restmapper.GetAPIGroupResources(clientsHolder.K8sClient.Discovery())", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate GetAPIGroupResources: %s\", err)", + "\t}", + "", + "\tmapper := restmapper.NewDiscoveryRESTMapper(gr)", + "\tclientsHolder.ScalingClient, err = scale.NewForConfig(clientsHolder.RestConfig, mapper, dynamic.LegacyAPIPathResolverFunc, resolver)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate ScalesGetter: %s\", err)", + "\t}", + "", + "\tclientsHolder.CNCFNetworkingClient, err = cncfNetworkAttachmentv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate CNCF networking client\")", + "\t}", + "", + "\tclientsHolder.ApiserverClient, err = apiserverscheme.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate apiserverscheme: %w\", err)", + "\t}", + "", + "\tclientsHolder.ready = true", + "\treturn \u0026clientsHolder, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "ExecCommandContainerNSEnter", + "kind": "function", + "source": [ + "func ExecCommandContainerNSEnter(command string,", + "\taContainer *provider.Container) (outStr, errStr string, err error) {", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(aContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", aContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\t// Get the container PID to build the nsenter command", + "\tcontainerPid, err := GetPidFromContainer(aContainer, ctx)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot get PID from: %s, err: %v\", aContainer, err)", + "\t}", + "", + "\t// Add the container PID and the specific command to run with nsenter", + "\tnsenterCommand := \"nsenter -t \" + strconv.Itoa(containerPid) + \" -n \" + command", + "", + "\t// Run the nsenter command on the probe pod with retry logic", + "\tfor attempt := 1; attempt \u003c= RetryAttempts; attempt++ {", + "\t\toutStr, errStr, err = ch.ExecCommandContainer(ctx, nsenterCommand)", + "\t\tif err == nil {", + "\t\t\tbreak", + "\t\t}", + "\t\tif attempt \u003c RetryAttempts {", + "\t\t\ttime.Sleep(RetrySleepSeconds * time.Second)", + "\t\t}", + "\t}", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", command, aContainer, err)", + "\t}", + "", + "\treturn outStr, errStr, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetContainerPidNamespace", + "kind": "function", + "source": [ + "func GetContainerPidNamespace(testContainer *provider.Container, env *provider.TestEnvironment) (string, error) {", + "\t// Get the container pid", + "\tocpContext, err := GetNodeProbePodContext(testContainer.NodeName, env)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tpid, err := GetPidFromContainer(testContainer, ocpContext)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"unable to get container process id due to: %v\", err)", + "\t}", + "\tlog.Debug(\"Obtained process id for %s is %d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"lsns -p %d -t pid -n\", pid)", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ocpContext, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn \"\", fmt.Errorf(\"unable to run nsenter due to : %v\", err)", + "\t}", + "", + "\treturn strings.Fields(stdout)[0], nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetPidFromContainer", + "kind": "function", + "source": [ + "func GetPidFromContainer(cut *provider.Container, ctx clientsholder.Context) (int, error) {", + "\tvar pidCmd string", + "", + "\tswitch cut.Runtime {", + "\tcase \"docker\":", + "\t\tpidCmd = DockerInspectPID + cut.UID + DevNull", + "\tcase \"docker-pullable\":", + "\t\tpidCmd = DockerInspectPID + cut.UID + DevNull", + "\tcase \"cri-o\", \"containerd\":", + "\t\tpidCmd = \"chroot /host crictl inspect --output go-template --template '{{.info.pid}}' \" + cut.UID + DevNull", + "\tdefault:", + "\t\tlog.Debug(\"Container runtime %s not supported yet for this test, skipping\", cut.Runtime)", + "\t\treturn 0, fmt.Errorf(\"container runtime %s not supported\", cut.Runtime)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "\toutStr, errStr, err := ch.ExecCommandContainer(ctx, pidCmd)", + "\tif err != nil {", + "\t\treturn 0, fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", pidCmd, cut, err)", + "\t}", + "\tif errStr != \"\" {", + "\t\treturn 0, fmt.Errorf(\"cmd: \\\" %s \\\" on %s returned %s\", pidCmd, cut, errStr)", + "\t}", + "", + "\treturn strconv.Atoi(strings.TrimSuffix(outStr, \"\\n\"))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetPidsFromPidNamespace", + "kind": "function", + "source": [ + "func GetPidsFromPidNamespace(pidNamespace string, container *provider.Container) (p []*Process, err error) {", + "\tconst command = \"trap \\\"\\\" SIGURG ; ps -e -o pidns,pid,ppid,args\"", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(container.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", container, err)", + "\t}", + "", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"command %q failed to run in probe pod=%s (node=%s): %v\", command, ctx.GetPodName(), container.NodeName, err)", + "\t}", + "", + "\tre := regexp.MustCompile(PsRegex)", + "\tmatches := re.FindAllStringSubmatch(stdout, -1)", + "\t// If we do not find a successful log, we fail", + "\tfor _, v := range matches {", + "\t\t// Matching only the right PidNs", + "\t\tif pidNamespace != v[1] {", + "\t\t\tcontinue", + "\t\t}", + "\t\taPidNs, err := strconv.Atoi(v[1])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[1], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPid, err := strconv.Atoi(v[2])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[2], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPPid, err := strconv.Atoi(v[3])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[3], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tp = append(p, \u0026Process{PidNs: aPidNs, Pid: aPid, Args: v[4], PPid: aPPid})", + "\t}", + "\treturn p, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "GetScaleCrUnderTest", + "kind": "function", + "source": [ + "func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDefinition) []ScaleObject {", + "\tdynamicClient := clientsholder.GetClientsHolder().DynamicClient", + "", + "\tvar scaleObjects []ScaleObject", + "\tfor _, crd := range crds {", + "\t\tif crd.Spec.Scope != apiextv1.NamespaceScoped {", + "\t\t\tlog.Warn(\"Target CRD %q is cluster-wide scoped. Skipping search of scale objects.\", crd.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range crd.Spec.Versions {", + "\t\t\tcrdVersion := crd.Spec.Versions[i]", + "\t\t\tgvr := schema.GroupVersionResource{", + "\t\t\t\tGroup: crd.Spec.Group,", + "\t\t\t\tVersion: crdVersion.Name,", + "\t\t\t\tResource: crd.Spec.Names.Plural,", + "\t\t\t}", + "", + "\t\t\t// Filter out non-scalable CRDs.", + "\t\t\tif crdVersion.Subresources == nil || crdVersion.Subresources.Scale == nil {", + "\t\t\t\tlog.Info(\"Target CRD %q is not scalable. Skipping search of scalable CRs.\", crd.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Looking for Scalable CRs of CRD %q (api version %q, group %q, plural %q) in target namespaces.\",", + "\t\t\t\tcrd.Name, crdVersion.Name, crd.Spec.Group, crd.Spec.Names.Plural)", + "", + "\t\t\tfor _, ns := range namespaces {", + "\t\t\t\tcrs, err := dynamicClient.Resource(gvr).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tlog.Fatal(\"Error getting CRs of CRD %q in namespace %q, err: %v\", crd.Name, ns, err)", + "\t\t\t\t}", + "", + "\t\t\t\tif len(crs.Items) \u003e 0 {", + "\t\t\t\t\tscaleObjects = append(scaleObjects, getCrScaleObjects(crs.Items, crd)...)", + "\t\t\t\t} else {", + "\t\t\t\t\tlog.Warn(\"No CRs of CRD %q found in the target namespaces.\", crd.Name)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn scaleObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getClusterCrdNames", + "kind": "function", + "source": [ + "func getClusterCrdNames() ([]*apiextv1.CustomResourceDefinition, error) {", + "\toc := clientsholder.GetClientsHolder()", + "\tcrds, err := oc.APIExtClient.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to get cluster CRDs, err: %v\", err)", + "\t}", + "", + "\tvar crdList []*apiextv1.CustomResourceDefinition", + "\tfor idx := range crds.Items {", + "\t\tcrdList = append(crdList, \u0026crds.Items[idx])", + "\t}", + "\treturn crdList, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getCrScaleObjects", + "kind": "function", + "source": [ + "func getCrScaleObjects(crs []unstructured.Unstructured, crd *apiextv1.CustomResourceDefinition) []ScaleObject {", + "\tvar scaleObjects []ScaleObject", + "\tclients := clientsholder.GetClientsHolder()", + "\tfor _, cr := range crs {", + "\t\tgroupResourceSchema := schema.GroupResource{", + "\t\t\tGroup: crd.Spec.Group,", + "\t\t\tResource: crd.Spec.Names.Plural,", + "\t\t}", + "", + "\t\tname := cr.GetName()", + "\t\tnamespace := cr.GetNamespace()", + "\t\tcrScale, err := clients.ScalingClient.Scales(namespace).Get(context.TODO(), groupResourceSchema, name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Error while getting the scale of CR=%s (CRD=%s) in namespace %s: %v\", name, crd.Name, namespace, err)", + "\t\t}", + "", + "\t\tscaleObjects = append(scaleObjects, ScaleObject{Scale: crScale, GroupResourceSchema: groupResourceSchema})", + "\t}", + "\treturn scaleObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getOperatorCsvPods", + "kind": "function", + "source": [ + "func getOperatorCsvPods(csvList []*olmv1Alpha.ClusterServiceVersion) (map[types.NamespacedName][]*corev1.Pod, error) {", + "\tconst nsAnnotation = \"olm.operatorNamespace\"", + "", + "\tclient := clientsholder.GetClientsHolder()", + "\tcsvToPodsMapping := make(map[types.NamespacedName][]*corev1.Pod)", + "", + "\t// The operator's pod (controller) should run in the subscription/operatorgroup ns.", + "\tfor _, csv := range csvList {", + "\t\tns, found := csv.Annotations[nsAnnotation]", + "\t\tif !found {", + "\t\t\treturn nil, fmt.Errorf(\"failed to get ns annotation %q from csv %v/%v\", nsAnnotation, csv.Namespace, csv.Name)", + "\t\t}", + "", + "\t\tpods, err := getPodsOwnedByCsv(csv.Name, strings.TrimSpace(ns), client)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to get pods from ns %v: %v\", ns, err)", + "\t\t}", + "", + "\t\tcsvToPodsMapping[types.NamespacedName{Name: csv.Name, Namespace: csv.Namespace}] = pods", + "\t}", + "\treturn csvToPodsMapping, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Startup", + "kind": "function", + "source": [ + "func Startup() {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Create an evaluator to filter test cases with labels", + "\tif err := checksdb.InitLabelsExprEvaluator(testParams.LabelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(testParams.OutputDir, testParams.LogLevel); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\t// Diagnostic functions will run when no labels are provided.", + "\tif testParams.LabelsFilter == noLabelsFilterExpr {", + "\t\tlog.Warn(\"The Best Practices Test Suite will run in diagnostic mode so no test case will be launched\")", + "\t}", + "", + "\t// Set clientsholder singleton with the filenames from the env vars.", + "\t_ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...)", + "\tLoadChecksDB(testParams.LabelsFilter)", + "", + "\tlog.Info(\"Certsuite Version: %v\", versions.GitVersion())", + "\tlog.Info(\"Claim Format Version: %s\", versions.ClaimFormatVersion)", + "\tlog.Info(\"Labels filter: %v\", testParams.LabelsFilter)", + "\tlog.Info(\"Log level: %s\", strings.ToUpper(testParams.LogLevel))", + "", + "\tcli.PrintBanner()", + "", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "\tfmt.Printf(\"Checks filter: %s\\n\", testParams.LabelsFilter)", + "\tfmt.Printf(\"Output folder: %s\\n\", testParams.OutputDir)", + "\tfmt.Printf(\"Log file: %s (level=%s)\\n\", log.LogFileName, testParams.LogLevel)", + "\tfmt.Printf(\"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetCniPlugins", + "kind": "function", + "source": [ + "func GetCniPlugins() (out map[string][]interface{}) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string][]interface{})", + "\tfor _, probePod := range env.ProbePods {", + "\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, cniPluginsCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cniPluginsCommand, probePod.String())", + "\t\t\tcontinue", + "\t\t}", + "\t\tdecoded := []interface{}{}", + "\t\terr = json.Unmarshal([]byte(outStr), \u0026decoded)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not decode json file because of: %s\", err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = decoded", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetCsiDriver", + "kind": "function", + "source": [ + "func GetCsiDriver() (out map[string]interface{}) {", + "\to := clientsholder.GetClientsHolder()", + "\tcsiDriver, err := o.K8sClient.StorageV1().CSIDrivers().List(context.TODO(), apimachineryv1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Fail CSIDrivers.list err:%s\", err)", + "\t\treturn out", + "\t}", + "\tscheme := runtime.NewScheme()", + "\terr = storagev1.AddToScheme(scheme)", + "\tif err != nil {", + "\t\tlog.Error(\"Fail AddToScheme err:%s\", err)", + "\t\treturn out", + "\t}", + "\tcodec := serializer.NewCodecFactory(scheme).LegacyCodec(storagev1.SchemeGroupVersion)", + "\tdata, err := runtime.Encode(codec, csiDriver)", + "\tif err != nil {", + "\t\tlog.Error(\"Fail to encode Nodes to json, er: %s\", err)", + "\t\treturn out", + "\t}", + "", + "\terr = json.Unmarshal(data, \u0026out)", + "\tif err != nil {", + "\t\tlog.Error(\"failed to marshall nodes json, err: %v\", err)", + "\t\treturn out", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetHwInfoAllNodes", + "kind": "function", + "source": [ + "func GetHwInfoAllNodes() (out map[string]NodeHwInfo) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string]NodeHwInfo)", + "\tfor _, probePod := range env.ProbePods {", + "\t\thw := NodeHwInfo{}", + "\t\tlscpu, err := getHWJsonOutput(probePod, o, lscpuCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lscpu for node %s\", probePod.Spec.NodeName)", + "\t\t} else {", + "\t\t\tvar ok bool", + "\t\t\ttemp, ok := lscpu.(map[string]interface{})", + "\t\t\tif !ok {", + "\t\t\t\tlog.Error(\"problem casting lscpu field for node %s, lscpu=%v\", probePod.Spec.NodeName, lscpu)", + "\t\t\t} else {", + "\t\t\t\thw.Lscpu = temp[\"lscpu\"]", + "\t\t\t}", + "\t\t}", + "\t\thw.IPconfig, err = getHWJsonOutput(probePod, o, ipCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting ip config for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lsblk, err = getHWJsonOutput(probePod, o, lsblkCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lsblk for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lspci, err = getHWTextOutput(probePod, o, lspciCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lspci for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = hw", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "name": "GetPodTopOwner", + "kind": "function", + "source": [ + "func GetPodTopOwner(podNamespace string, podOwnerReferences []metav1.OwnerReference) (topOwners map[string]TopOwner, err error) {", + "\ttopOwners = make(map[string]TopOwner)", + "\terr = followOwnerReferences(", + "\t\tclientsholder.GetClientsHolder().GroupResources,", + "\t\tclientsholder.GetClientsHolder().DynamicClient,", + "\t\ttopOwners,", + "\t\tpodNamespace,", + "\t\tpodOwnerReferences)", + "\tif err != nil {", + "\t\treturn topOwners, fmt.Errorf(\"could not get top owners, err: %v\", err)", + "\t}", + "\treturn topOwners, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetAllOperatorGroups", + "kind": "function", + "source": [ + "func GetAllOperatorGroups() ([]*olmv1.OperatorGroup, error) {", + "\tclient := clientsholder.GetClientsHolder()", + "", + "\tlist, err := client.OlmClient.OperatorsV1().OperatorGroups(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil \u0026\u0026 !k8serrors.IsNotFound(err) {", + "\t\treturn nil, err", + "\t}", + "", + "\tif k8serrors.IsNotFound(err) {", + "\t\tlog.Warn(\"No OperatorGroup(s) found in the cluster\")", + "\t\treturn nil, nil", + "\t}", + "", + "\tif len(list.Items) == 0 {", + "\t\tlog.Warn(\"OperatorGroup API resource found but no OperatorGroup(s) found in the cluster\")", + "\t\treturn nil, nil", + "\t}", + "", + "\t// Collect all OperatorGroup pointers", + "\tvar operatorGroups []*olmv1.OperatorGroup", + "\tfor i := range list.Items {", + "\t\toperatorGroups = append(operatorGroups, \u0026list.Items[i])", + "\t}", + "", + "\treturn operatorGroups, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Node.IsHyperThreadNode", + "kind": "function", + "source": [ + "func (node *Node) IsHyperThreadNode(env *TestEnvironment) (bool, error) {", + "\to := clientsholder.GetClientsHolder()", + "\tnodeName := node.Data.Name", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tcmdValue, errStr, err := o.ExecCommandContainer(ctx, isHyperThreadCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn false, fmt.Errorf(\"cannot execute %s on probe pod %s, err=%s, stderr=%s\", isHyperThreadCommand, env.ProbePods[nodeName], err, errStr)", + "\t}", + "\tre := regexp.MustCompile(`Thread\\(s\\) per core:\\s+(\\d+)`)", + "\tmatch := re.FindStringSubmatch(cmdValue)", + "\tnum := 0", + "\tif len(match) == expectedValue {", + "\t\tnum, _ = strconv.Atoi(match[1])", + "\t}", + "\treturn num \u003e 1, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Operator.SetPreflightResults", + "kind": "function", + "source": [ + "func (op *Operator) SetPreflightResults(env *TestEnvironment) error {", + "\tif len(op.InstallPlans) == 0 {", + "\t\tlog.Warn(\"Operator %q has no InstallPlans. Skipping setting Preflight results\", op)", + "\t\treturn nil", + "\t}", + "", + "\tbundleImage := op.InstallPlans[0].BundleImage", + "\tindexImage := op.InstallPlans[0].IndexImage", + "\toc := clientsholder.GetClientsHolder()", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\topts := []plibOperator.Option{}", + "\topts = append(opts, plibOperator.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibOperator.WithInsecureConnection())", + "\t}", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibOperator.NewCheck(bundleImage, indexImage, oc.KubeConfig, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\te := os.RemoveAll(\"artifacts/\")", + "\tif e != nil {", + "\t\tlog.Fatal(\"%v\", e)", + "\t}", + "", + "\tlog.Info(\"Storing operator Preflight results into object for %q\", bundleImage)", + "\top.PreflightResults = GetPreflightResultsDB(\u0026results)", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.CreatedByDeploymentConfig", + "kind": "function", + "source": [ + "func (p *Pod) CreatedByDeploymentConfig() (bool, error) {", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, podOwner := range p.GetOwnerReferences() {", + "\t\tif podOwner.Kind == replicationController {", + "\t\t\treplicationControllers, err := oc.K8sClient.CoreV1().ReplicationControllers(p.Namespace).Get(context.TODO(), podOwner.Name, metav1.GetOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\treturn false, err", + "\t\t\t}", + "\t\t\tfor _, rcOwner := range replicationControllers.GetOwnerReferences() {", + "\t\t\t\tif rcOwner.Name == podOwner.Name \u0026\u0026 rcOwner.Kind == deploymentConfig {", + "\t\t\t\t\treturn true, err", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsUsingSRIOV", + "kind": "function", + "source": [ + "func (p *Pod) IsUsingSRIOV() (bool, error) {", + "\tconst (", + "\t\tcncfNetworksAnnotation = \"k8s.v1.cni.cncf.io/networks\"", + "\t)", + "", + "\tcncfNetworks, exist := p.Annotations[cncfNetworksAnnotation]", + "\tif !exist {", + "\t\treturn false, nil", + "\t}", + "", + "\t// Get all CNCF network names", + "\tcncfNetworkNames := getCNCFNetworksNamesFromPodAnnotation(cncfNetworks)", + "", + "\t// For each CNCF network, get its network attachment definition and check", + "\t// whether its config's type is \"sriov\"", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tfor _, networkName := range cncfNetworkNames {", + "\t\tlog.Debug(\"%s: Reviewing network-attachment definition %q\", p, networkName)", + "\t\tnad, err := oc.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(p.Namespace).Get(context.TODO(), networkName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to get NetworkAttachment %s: %v\", networkName, err)", + "\t\t}", + "", + "\t\tisSRIOV, err := isNetworkAttachmentDefinitionConfigTypeSRIOV(nad.Spec.Config)", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to know if network-attachment %s is sriov: %v\", networkName, err)", + "\t\t}", + "", + "\t\tlog.Debug(\"%s: NAD config: %s\", p, nad.Spec.Config)", + "\t\tif isSRIOV {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\treturn false, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsUsingSRIOVWithMTU", + "kind": "function", + "source": [ + "func (p *Pod) IsUsingSRIOVWithMTU() (bool, error) {", + "\tconst (", + "\t\tcncfNetworksAnnotation = \"k8s.v1.cni.cncf.io/networks\"", + "\t)", + "", + "\tcncfNetworks, exist := p.Annotations[cncfNetworksAnnotation]", + "\tif !exist {", + "\t\treturn false, nil", + "\t}", + "", + "\t// Get all CNCF network names", + "\tcncfNetworkNames := getCNCFNetworksNamesFromPodAnnotation(cncfNetworks)", + "", + "\t// For each CNCF network, get its network attachment definition and check", + "\t// whether its config's type is \"sriov\"", + "", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, networkName := range cncfNetworkNames {", + "\t\tlog.Debug(\"%s: Reviewing network-attachment definition %q\", p, networkName)", + "\t\tnad, err := oc.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(", + "\t\t\tp.Namespace).Get(context.TODO(), networkName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to get NetworkAttachment %s: %v\", networkName, err)", + "\t\t}", + "", + "\t\t// If the network-status annotation is not set, let's check the SriovNetwork/SriovNetworkNodePolicy CRs", + "\t\t// to see if the MTU is set there.", + "\t\tlog.Debug(\"Number of SriovNetworks: %d\", len(env.AllSriovNetworks))", + "\t\tlog.Debug(\"Number of SriovNetworkNodePolicies: %d\", len(env.AllSriovNetworkNodePolicies))", + "\t\tif sriovNetworkUsesMTU(env.AllSriovNetworks, env.AllSriovNetworkNodePolicies, nad.Name) {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\treturn false, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "deployDaemonSet", + "kind": "function", + "source": [ + "func deployDaemonSet(namespace string) error {", + "\tk8sPrivilegedDs.SetDaemonSetClient(clientsholder.GetClientsHolder().K8sClient)", + "", + "\tdsImage := env.params.CertSuiteProbeImage", + "\tif k8sPrivilegedDs.IsDaemonSetReady(DaemonSetName, namespace, dsImage) {", + "\t\treturn nil", + "\t}", + "", + "\tmatchLabels := make(map[string]string)", + "\tmatchLabels[\"name\"] = DaemonSetName", + "\tmatchLabels[\"redhat-best-practices-for-k8s.com/app\"] = DaemonSetName", + "\t_, err := k8sPrivilegedDs.CreateDaemonSet(DaemonSetName, namespace, containerName, dsImage, matchLabels, probePodsTimeout,", + "\t\tconfiguration.GetTestParameters().DaemonsetCPUReq,", + "\t\tconfiguration.GetTestParameters().DaemonsetCPULim,", + "\t\tconfiguration.GetTestParameters().DaemonsetMemReq,", + "\t\tconfiguration.GetTestParameters().DaemonsetMemLim,", + "\t\tcorev1.PullIfNotPresent,", + "\t)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not deploy certsuite daemonset, err=%v\", err)", + "\t}", + "\terr = k8sPrivilegedDs.WaitDaemonsetReady(namespace, DaemonSetName, probePodsTimeout)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"timed out waiting for certsuite daemonset, err=%v\", err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "filterDPDKRunningPods", + "kind": "function", + "source": [ + "func filterDPDKRunningPods(pods []*Pod) []*Pod {", + "\tvar filteredPods []*Pod", + "\tconst (", + "\t\tdpdkDriver = \"vfio-pci\"", + "\t\tfindDeviceSubCommand = \"find /sys -name\"", + "\t)", + "\to := clientsholder.GetClientsHolder()", + "\tfor _, pod := range pods {", + "\t\tif len(pod.MultusPCIs) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tctx := clientsholder.NewContext(pod.Namespace, pod.Name, pod.Spec.Containers[0].Name)", + "\t\tfindCommand := fmt.Sprintf(\"%s '%s'\", findDeviceSubCommand, pod.MultusPCIs[0])", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, findCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tlog.Error(\"Failed to execute command %s in probe %s, errStr: %s, err: %v\", findCommand, pod.String(), errStr, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif strings.Contains(outStr, dpdkDriver) {", + "\t\t\tfilteredPods = append(filteredPods, pod)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getCatalogSourceBundleCountFromProbeContainer", + "kind": "function", + "source": [ + "func getCatalogSourceBundleCountFromProbeContainer(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// We need to use the probe container to get the bundle count", + "\t// This is because the package manifests are not available in the cluster", + "\t// for OCP versions \u003c= 4.12", + "\to := clientsholder.GetClientsHolder()", + "", + "\t// Find the kubernetes service associated with the catalog source", + "\tfor _, svc := range env.AllServices {", + "\t\t// Skip if the service is not associated with the catalog source", + "\t\tif svc.Spec.Selector[\"olm.catalogSource\"] != cs.Name {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tlog.Info(\"Found service %q associated with catalog source %q.\", svc.Name, cs.Name)", + "", + "\t\t// Use a probe pod to get the bundle count", + "\t\tfor _, probePod := range env.ProbePods {", + "\t\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\t\tcmd := \"grpcurl -plaintext \" + svc.Spec.ClusterIP + \":50051 api.Registry.ListBundles | jq -s 'length'\"", + "\t\t\tcmdValue, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\t\t\tif err != nil || errStr != \"\" {", + "\t\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cmd, probePod.String())", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Sanitize the command output", + "\t\t\tcmdValue = strings.TrimSpace(cmdValue)", + "\t\t\tcmdValue = strings.Trim(cmdValue, \"\\\"\")", + "", + "\t\t\t// Parse the command output", + "\t\t\tbundleCount, err := strconv.Atoi(cmdValue)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to convert bundle count to integer: %s\", cmdValue)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Try each probe pod until we get a valid bundle count (which should only be 1 probe pod)", + "\t\t\tlog.Info(\"Found bundle count via grpcurl %d for catalog source %q.\", bundleCount, cs.Name)", + "\t\t\treturn bundleCount", + "\t\t}", + "\t}", + "", + "\tlog.Warn(\"Warning: No services found associated with catalog source %q.\", cs.Name)", + "\treturn -1", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getMachineConfig", + "kind": "function", + "source": [ + "func getMachineConfig(mcName string, machineConfigs map[string]MachineConfig) (MachineConfig, error) {", + "\tclient := clientsholder.GetClientsHolder()", + "", + "\t// Check whether we had already downloaded and parsed that machineConfig resource.", + "\tif mc, exists := machineConfigs[mcName]; exists {", + "\t\treturn mc, nil", + "\t}", + "", + "\tnodeMc, err := client.MachineCfg.MachineconfigurationV1().MachineConfigs().Get(context.TODO(), mcName, metav1.GetOptions{})", + "\tif err != nil {", + "\t\treturn MachineConfig{}, err", + "\t}", + "", + "\tmc := MachineConfig{", + "\t\tMachineConfig: nodeMc,", + "\t}", + "", + "\terr = json.Unmarshal(nodeMc.Spec.Config.Raw, \u0026mc.Config)", + "\tif err != nil {", + "\t\treturn MachineConfig{}, fmt.Errorf(\"failed to unmarshal mc's Config field, err: %v\", err)", + "\t}", + "", + "\treturn mc, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getOperatorTargetNamespaces", + "kind": "function", + "source": [ + "func getOperatorTargetNamespaces(namespace string) ([]string, error) {", + "\tclient := clientsholder.GetClientsHolder()", + "", + "\tlist, err := client.OlmClient.OperatorsV1().OperatorGroups(namespace).List(", + "\t\tcontext.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif len(list.Items) == 0 {", + "\t\treturn nil, errors.New(\"no OperatorGroup found\")", + "\t}", + "", + "\treturn list.Items[0].Spec.TargetNamespaces, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling", + "name": "GetProcessCPUScheduling", + "kind": "function", + "source": [ + "func GetProcessCPUScheduling(pid int, testContainer *provider.Container) (schedulePolicy string, schedulePriority int, err error) {", + "\tlog.Info(\"Checking the scheduling policy/priority in %v for pid=%d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"chrt -p %d\", pid)", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := crclient.GetNodeProbePodContext(testContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", 0, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\tstdout, stderr, err := ch.ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"command %q failed to run in probe pod %s (node %s): %v (stderr: %v)\",", + "\t\t\tcommand, ctx.GetPodName(), testContainer.NodeName, err, stderr)", + "\t}", + "", + "\tschedulePolicy, schedulePriority, err = parseSchedulingPolicyAndPriority(stdout)", + "\tif err != nil {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"error getting the scheduling policy and priority for %v : %v\", testContainer, err)", + "\t}", + "\tlog.Info(\"pid %d in %v has the cpu scheduling policy %s, scheduling priority %d\", pid, testContainer, schedulePolicy, schedulePriority)", + "", + "\treturn schedulePolicy, schedulePriority, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testAutomountServiceToken", + "kind": "function", + "source": [ + "func testAutomountServiceToken(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.ServiceAccountName == defaultServiceAccount {", + "\t\t\tcheck.LogError(\"Pod %q uses the default service account name.\", put.Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found with default service account name\", false))", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Evaluate the pod's automount service tokens and any attached service accounts", + "\t\tclient := clientsholder.GetClientsHolder()", + "\t\tpodPassed, newMsg := rbac.EvaluateAutomountTokens(client.K8sClient.CoreV1(), put)", + "\t\tif !podPassed {", + "\t\t\tcheck.LogError(\"%s\", newMsg)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, newMsg, false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q does not have automount service tokens set to true\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod does not have automount service tokens set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testOneProcessPerContainer", + "kind": "function", + "source": [ + "func testOneProcessPerContainer(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t// the Istio sidecar container \"istio-proxy\" launches two processes: \"pilot-agent\" and \"envoy\"", + "\t\tif cut.IsIstioProxy() {", + "\t\t\tcheck.LogInfo(\"Skipping \\\"istio-proxy\\\" container\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Debug pod not found for node %q\", cut.NodeName)", + "\t\t\treturn", + "\t\t}", + "\t\tocpContext := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tpid, err := crclient.GetPidFromContainer(cut, ocpContext)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get PID for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tnbProcesses, err := getNbOfProcessesInPidNamespace(ocpContext, pid, clientsholder.GetClientsHolder())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get number of processes for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif nbProcesses \u003e 1 {", + "\t\t\tcheck.LogError(\"Container %q has more than one process running\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has more than one process running\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has only one process running\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has only one process running\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/namespace", + "name": "getCrsPerNamespaces", + "kind": "function", + "source": [ + "func getCrsPerNamespaces(aCrd *apiextv1.CustomResourceDefinition) (crdNamespaces map[string][]string, err error) {", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, version := range aCrd.Spec.Versions {", + "\t\tgvr := schema.GroupVersionResource{", + "\t\t\tGroup: aCrd.Spec.Group,", + "\t\t\tVersion: version.Name,", + "\t\t\tResource: aCrd.Spec.Names.Plural,", + "\t\t}", + "\t\tlog.Debug(\"Looking for CRs from CRD: %s api version:%s group:%s plural:%s\", aCrd.Name, version.Name, aCrd.Spec.Group, aCrd.Spec.Names.Plural)", + "\t\tcrs, err := oc.DynamicClient.Resource(gvr).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error getting %s: %v\\n\", aCrd.Name, err)", + "\t\t\treturn crdNamespaces, err", + "\t\t}", + "\t\tcrdNamespaces = make(map[string][]string)", + "\t\tfor _, cr := range crs.Items {", + "\t\t\tname := cr.Object[\"metadata\"].(map[string]interface{})[\"name\"]", + "\t\t\tnamespace := cr.Object[\"metadata\"].(map[string]interface{})[\"namespace\"]", + "\t\t\tvar namespaceStr, nameStr string", + "\t\t\tif namespace == nil {", + "\t\t\t\tnamespaceStr = \"\"", + "\t\t\t} else {", + "\t\t\t\tnamespaceStr = fmt.Sprintf(\"%s\", namespace)", + "\t\t\t}", + "\t\t\tif name == nil {", + "\t\t\t\tnameStr = \"\"", + "\t\t\t} else {", + "\t\t\t\tnameStr = fmt.Sprintf(\"%s\", name)", + "\t\t\t}", + "\t\t\tcrdNamespaces[namespaceStr] = append(crdNamespaces[namespaceStr], nameStr)", + "\t\t}", + "\t}", + "\treturn crdNamespaces, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "testHelmVersion", + "kind": "function", + "source": [ + "func testHelmVersion(check *checksdb.Check) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tclients := clientsholder.GetClientsHolder()", + "\t// Get the Tiller pod in the specified namespace", + "\tpodList, err := clients.K8sClient.CoreV1().Pods(\"\").List(context.TODO(), metav1.ListOptions{", + "\t\tLabelSelector: \"app=helm,name=tiller\",", + "\t})", + "\tif err != nil {", + "\t\tcheck.LogError(\"Could not get Tiller pod, err=%v\", err)", + "\t}", + "", + "\tif len(podList.Items) == 0 {", + "\t\tcheck.LogInfo(\"Tiller pod not found in any namespaces. Helm version is v3.\")", + "\t\tfor _, helm := range env.HelmChartReleases {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, \"helm chart was installed with helm v3\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.LogError(\"Tiller pod found, Helm version is v2 but v3 required\")", + "\tfor i := range podList.Items {", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(podList.Items[i].Namespace, podList.Items[i].Name,", + "\t\t\t\"This pod is a Tiller pod. Helm Chart version is v2 but needs to be v3 due to the security risks associated with Tiller\", false))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "CordonHelper", + "kind": "function", + "source": [ + "func CordonHelper(name, operation string) error {", + "\tclients := clientsholder.GetClientsHolder()", + "", + "\tlog.Info(\"Performing %s operation on node %s\", operation, name)", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Fetch node object", + "\t\tnode, err := clients.K8sClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t\tswitch operation {", + "\t\tcase Cordon:", + "\t\t\tnode.Spec.Unschedulable = true", + "\t\tcase Uncordon:", + "\t\t\tnode.Spec.Unschedulable = false", + "\t\tdefault:", + "\t\t\treturn fmt.Errorf(\"cordonHelper: Unsupported operation:%s\", operation)", + "\t\t}", + "\t\t// Update the node", + "\t\t_, err = clients.K8sClient.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})", + "\t\treturn err", + "\t})", + "\tif retryErr != nil {", + "\t\tlog.Error(\"can not %s node: %s, err=%v\", operation, name, retryErr)", + "\t}", + "\treturn retryErr", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "deletePod", + "kind": "function", + "source": [ + "func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error {", + "\tclients := clientsholder.GetClientsHolder()", + "\tlog.Debug(\"deleting ns=%s pod=%s with %s mode\", pod.Namespace, pod.Name, mode)", + "\tgracePeriodSeconds := *pod.Spec.TerminationGracePeriodSeconds", + "\t// Create watcher before deleting pod", + "\twatcher, err := clients.K8sClient.CoreV1().Pods(pod.Namespace).Watch(context.TODO(), metav1.ListOptions{", + "\t\tFieldSelector: \"metadata.name=\" + pod.Name + \",metadata.namespace=\" + pod.Namespace,", + "\t})", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"waitPodDeleted ns=%s pod=%s, err=%s\", pod.Namespace, pod.Name, err)", + "\t}", + "\t// Actually deleting pod", + "\terr = clients.K8sClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{", + "\t\tGracePeriodSeconds: \u0026gracePeriodSeconds,", + "\t})", + "\tif err != nil {", + "\t\tlog.Error(\"Error deleting %s err: %v\", pod.String(), err)", + "\t\treturn err", + "\t}", + "\tif mode == DeleteBackground {", + "\t\treturn nil", + "\t}", + "\twg.Add(1)", + "\tpodName := pod.Name", + "\tnamespace := pod.Namespace", + "\tgo func() {", + "\t\tdefer wg.Done()", + "\t\twaitPodDeleted(namespace, podName, gracePeriodSeconds, watcher)", + "\t}()", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForStatefulSetReady", + "kind": "function", + "source": [ + "func WaitForStatefulSetReady(ns, name string, timeout time.Duration, logger *log.Logger) bool {", + "\tlogger.Debug(\"Check if statefulset %s:%s is ready\", ns, name)", + "\tclients := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tss, err := provider.GetUpdatedStatefulset(clients.K8sClient.AppsV1(), ns, name)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Error while getting the %s, err: %v\", ss.ToString(), err)", + "\t\t} else if ss.IsStatefulSetReady() {", + "\t\t\tlogger.Info(\"%s is ready\", ss.ToString())", + "\t\t\treturn true", + "\t\t}", + "\t\ttime.Sleep(time.Second)", + "\t}", + "\tlogger.Error(\"Statefulset %s:%s is not ready\", ns, name)", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "isDeploymentReady", + "kind": "function", + "source": [ + "func isDeploymentReady(name, namespace string) (bool, error) {", + "\tappsV1Api := clientsholder.GetClientsHolder().K8sClient.AppsV1()", + "", + "\tdep, err := provider.GetUpdatedDeployment(appsV1Api, namespace, name)", + "\tif err != nil {", + "\t\treturn false, err", + "\t}", + "", + "\treturn dep.IsDeploymentReady(), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "isStatefulSetReady", + "kind": "function", + "source": [ + "func isStatefulSetReady(name, namespace string) (bool, error) {", + "\tappsV1Api := clientsholder.GetClientsHolder().K8sClient.AppsV1()", + "", + "\tsts, err := provider.GetUpdatedStatefulset(appsV1Api, namespace, name)", + "\tif err != nil {", + "\t\treturn false, err", + "\t}", + "", + "\treturn sts.IsStatefulSetReady(), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleCrd", + "kind": "function", + "source": [ + "func TestScaleCrd(crScale *provider.CrScale, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool {", + "\tif crScale == nil {", + "\t\tlogger.Error(\"CR object is nill\")", + "\t\treturn false", + "\t}", + "\tclients := clientsholder.GetClientsHolder()", + "\treplicas := crScale.Spec.Replicas", + "\tname := crScale.GetName()", + "\tnamespace := crScale.GetNamespace()", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t} // scale up", + "\t\treplicas++", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleDeployment", + "kind": "function", + "source": [ + "func TestScaleDeployment(deployment *appsv1.Deployment, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\tlogger.Info(\"Deployment not using HPA: %s:%s\", deployment.Namespace, deployment.Name)", + "\tvar replicas int32", + "\tif deployment.Spec.Replicas != nil {", + "\t\treplicas = *deployment.Spec.Replicas", + "\t} else {", + "\t\treplicas = 1", + "\t}", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, true, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, false, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, false, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t} // scale up", + "\t\treplicas++", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, true, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleHPACrd", + "kind": "function", + "source": [ + "func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscaler, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool {", + "\tif cr == nil {", + "\t\tlogger.Error(\"CR object is nill\")", + "\t\treturn false", + "\t}", + "\tclients := clientsholder.GetClientsHolder()", + "\tnamespace := cr.GetNamespace()", + "", + "\thpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(namespace)", + "\tmin := int32(1)", + "\tif hpa.Spec.MinReplicas != nil {", + "\t\tmin = *hpa.Spec.MinReplicas", + "\t}", + "\treplicas := cr.Spec.Replicas", + "\tname := cr.GetName()", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, hpa.Spec.MaxReplicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\t// back the min and the max value of the hpa", + "\tlogger.Debug(\"Back HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, min, hpa.Spec.MaxReplicas)", + "\treturn scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, hpa.Spec.MaxReplicas, timeout, groupResourceSchema, logger)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleHpaDeployment", + "kind": "function", + "source": [ + "func TestScaleHpaDeployment(deployment *provider.Deployment, hpa *v1autoscaling.HorizontalPodAutoscaler, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\thpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(deployment.Namespace)", + "\tvar min int32", + "\tif hpa.Spec.MinReplicas != nil {", + "\t\tmin = *hpa.Spec.MinReplicas", + "\t} else {", + "\t\tmin = 1", + "\t}", + "\treplicas := int32(1)", + "\tif deployment.Spec.Replicas != nil {", + "\t\treplicas = *deployment.Spec.Replicas", + "\t}", + "\tmax := hpa.Spec.MaxReplicas", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, min, max, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\t// back the min and the max value of the hpa", + "\tlogger.Debug(\"Back HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, min, max)", + "\treturn scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, min, max, timeout, logger)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleHpaStatefulSet", + "kind": "function", + "source": [ + "func TestScaleHpaStatefulSet(statefulset *appsv1.StatefulSet, hpa *v1autoscaling.HorizontalPodAutoscaler, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\thpaName := hpa.Name", + "\tname, namespace := statefulset.Name, statefulset.Namespace", + "\thpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(namespace)", + "\tmin := int32(1)", + "\tif hpa.Spec.MinReplicas != nil {", + "\t\tmin = *hpa.Spec.MinReplicas", + "\t}", + "\treplicas := int32(1)", + "\tif statefulset.Spec.Replicas != nil {", + "\t\treplicas = *statefulset.Spec.Replicas", + "\t}", + "\tmax := hpa.Spec.MaxReplicas", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpaName, replicas, replicas)", + "\t\tpass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpaName, replicas, replicas)", + "\t\tpass = scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpaName, replicas, replicas)", + "\t\tpass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpaName, min, max)", + "\t\tpass = scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\t// back the min and the max value of the hpa", + "\tlogger.Debug(\"Back HPA %s:%s to min=%d max=%d\", namespace, hpaName, min, max)", + "\tpass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, min, max, timeout, logger)", + "\treturn pass", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleStatefulSet", + "kind": "function", + "source": [ + "func TestScaleStatefulSet(statefulset *appsv1.StatefulSet, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\tname, namespace := statefulset.Name, statefulset.Namespace", + "\tssClients := clients.K8sClient.AppsV1().StatefulSets(namespace)", + "\tlogger.Debug(\"Scale statefulset not using HPA %s:%s\", namespace, name)", + "\treplicas := int32(1)", + "\tif statefulset.Spec.Replicas != nil {", + "\t\treplicas = *statefulset.Spec.Replicas", + "\t}", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t} // scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "containerHasLoggingOutput", + "kind": "function", + "source": [ + "func containerHasLoggingOutput(cut *provider.Container) (bool, error) {", + "\tocpClient := clientsholder.GetClientsHolder()", + "", + "\t// K8s' API will not return lines that do not have the newline termination char, so", + "\t// We need to ask for the last two lines.", + "\tconst tailLogLines = 2", + "\tnumLogLines := int64(tailLogLines)", + "\tpodLogOptions := corev1.PodLogOptions{TailLines: \u0026numLogLines, Container: cut.Name}", + "\treq := ocpClient.K8sClient.CoreV1().Pods(cut.Namespace).GetLogs(cut.Podname, \u0026podLogOptions)", + "", + "\tpodLogsReaderCloser, err := req.Stream(context.TODO())", + "\tif err != nil {", + "\t\treturn false, fmt.Errorf(\"unable to get log streamer, err: %v\", err)", + "\t}", + "", + "\tdefer podLogsReaderCloser.Close()", + "", + "\tbuf := new(bytes.Buffer)", + "\t_, err = io.Copy(buf, podLogsReaderCloser)", + "\tif err != nil {", + "\t\treturn false, fmt.Errorf(\"unable to get log data, err: %v\", err)", + "\t}", + "", + "\treturn buf.String() != \"\", nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "testAPICompatibilityWithNextOCPRelease", + "kind": "function", + "source": [ + "func testAPICompatibilityWithNextOCPRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tisOCP := provider.IsOCPCluster()", + "\tcheck.LogInfo(\"Is OCP: %v\", isOCP)", + "", + "\tif !isOCP {", + "\t\tcheck.LogInfo(\"The Kubernetes distribution is not OpenShift. Skipping API compatibility test.\")", + "\t\treturn", + "\t}", + "", + "\t// Retrieve APIRequestCount using clientsholder", + "\toc := clientsholder.GetClientsHolder()", + "\tapiRequestCounts, err := oc.ApiserverClient.ApiserverV1().APIRequestCounts().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error retrieving APIRequestCount objects: %s\", err)", + "\t\treturn", + "\t}", + "", + "\t// Extract unique service account names from env.ServiceAccounts", + "\tworkloadServiceAccountNames := extractUniqueServiceAccountNames(env)", + "\tcheck.LogInfo(\"Detected %d unique service account names for the workload: %v\", len(workloadServiceAccountNames), workloadServiceAccountNames)", + "", + "\t// Build a map from service accounts to deprecated APIs", + "\tserviceAccountToDeprecatedAPIs := buildServiceAccountToDeprecatedAPIMap(apiRequestCounts.Items, workloadServiceAccountNames)", + "", + "\t// Evaluate API compliance with the next Kubernetes version", + "\tcompliantObjects, nonCompliantObjects := evaluateAPICompliance(serviceAccountToDeprecatedAPIs, env.K8sVersion, workloadServiceAccountNames)", + "", + "\t// Add test results", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/phasecheck", + "name": "WaitOperatorReady", + "kind": "function", + "source": [ + "func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool {", + "\toc := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tif isOperatorPhaseSucceeded(csv) {", + "\t\t\tlog.Debug(\"%s is ready\", provider.CsvToString(csv))", + "\t\t\treturn true", + "\t\t} else if isOperatorPhaseFailedOrUnknown(csv) {", + "\t\t\tlog.Debug(\"%s failed to be ready, status=%s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Operator is not ready, but we need to take into account that its pods", + "\t\t// could have been deleted by some of the lifecycle test cases, so they", + "\t\t// could be restarting. Let's give it some time before declaring it failed.", + "\t\tlog.Debug(\"Waiting for %s to be in Succeeded phase: %s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\ttime.Sleep(time.Second)", + "", + "\t\tfreshCsv, err := oc.OlmClient.OperatorsV1alpha1().ClusterServiceVersions(csv.Namespace).Get(context.TODO(), csv.Name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not get csv %s, err: %v\", provider.CsvToString(freshCsv), err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// update old csv and check status again", + "\t\t*csv = *freshCsv", + "\t}", + "\tif time.Since(start) \u003e timeout {", + "\t\tlog.Error(\"timeout waiting for csv %s to be ready\", provider.CsvToString(csv))", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testContainersFsDiff", + "kind": "function", + "source": [ + "func testContainersFsDiff(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "", + "\t\t// If the probe pod is not found, we cannot run the test.", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Probe Pod not found for node %q\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"certsuite probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Check whether or not a container is available to prevent a panic.", + "\t\tif len(probePod.Spec.Containers) == 0 {", + "\t\t\tcheck.LogError(\"Probe Pod %q has no containers\", probePod)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"certsuite probe pod has no containers\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tctxt := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tfsDiffTester := cnffsdiff.NewFsDiffTester(check, clientsholder.GetClientsHolder(), ctxt, env.OpenshiftVersion)", + "\t\tfsDiffTester.RunTest(cut.UID)", + "\t\tswitch fsDiffTester.GetResults() {", + "\t\tcase testhelper.SUCCESS:", + "\t\t\tcheck.LogInfo(\"Container %q is not modified\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not modified\", true))", + "\t\t\tcontinue", + "\t\tcase testhelper.FAILURE:", + "\t\t\tcheck.LogError(\"Container %q modified (changed folders: %v, deleted folders: %v\", cut, fsDiffTester.ChangedFolders, fsDiffTester.DeletedFolders)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is modified\", false).", + "\t\t\t\tAddField(\"ChangedFolders\", strings.Join(fsDiffTester.ChangedFolders, \",\")).", + "\t\t\t\tAddField(\"DeletedFolders\", strings.Join(fsDiffTester.DeletedFolders, \",\")))", + "", + "\t\tcase testhelper.ERROR:", + "\t\t\tcheck.LogError(\"Could not run fs-diff in Container %q, err: %v\", cut, fsDiffTester.Error)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Error while running fs-diff\", false).AddField(testhelper.Error, fsDiffTester.Error.Error()))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testHugepages", + "kind": "function", + "source": [ + "func testHugepages(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.Nodes {", + "\t\tnode := env.Nodes[i]", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\tif !node.IsWorkerNode() {", + "\t\t\tcheck.LogInfo(\"Node %q is not a worker node\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Not a worker node\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tprobePod, exist := env.ProbePods[nodeName]", + "\t\tif !exist {", + "\t\t\tcheck.LogError(\"Could not find a Probe Pod in node %q.\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"tnf probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\thpTester, err := hugepages.NewTester(\u0026node, probePod, clientsholder.GetClientsHolder())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get node hugepages tester for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Unable to get node hugepages tester\", false))", + "\t\t}", + "", + "\t\tif err := hpTester.Run(); err != nil {", + "\t\t\tcheck.LogError(\"Hugepages check failed for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, err.Error(), false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q passed the hugepages check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the hugepages check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testIsRedHatRelease", + "kind": "function", + "source": [ + "func testIsRedHatRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tbaseImageTester := isredhat.NewBaseImageTester(clientsholder.GetClientsHolder(), clientsholder.NewContext(cut.Namespace, cut.Podname, cut.Name))", + "", + "\t\tresult, err := baseImageTester.TestContainerIsRedHatRelease()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not collect release information from Container %q, err=%v\", cut, err)", + "\t\t}", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Container %q has failed the RHEL release check\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Failed the RHEL release check\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has passed the RHEL release check\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Passed the RHEL release check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testIsSELinuxEnforcing", + "kind": "function", + "source": [ + "func testIsSELinuxEnforcing(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tconst (", + "\t\tgetenforceCommand = `chroot /host getenforce`", + "\t\tenforcingString = \"Enforcing\\n\"", + "\t)", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\to := clientsholder.GetClientsHolder()", + "\tnodesFailed := 0", + "\tnodesError := 0", + "\tfor _, probePod := range env.ProbePods {", + "\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, getenforceCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tcheck.LogError(\"Could not execute command %q in Probe Pod %q, errStr: %q, err: %v\", getenforceCommand, probePod, errStr, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(probePod.Namespace, probePod.Name, \"Failed to execute command\", false))", + "\t\t\tnodesError++", + "\t\t\tcontinue", + "\t\t}", + "\t\tif outStr != enforcingString {", + "\t\t\tcheck.LogError(\"Node %q is not running SELinux, %s command returned: %s\", probePod.Spec.NodeName, getenforceCommand, outStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(probePod.Spec.NodeName, \"SELinux is not enforced\", false))", + "\t\t\tnodesFailed++", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q is running SELinux\", probePod.Spec.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(probePod.Spec.NodeName, \"SELinux is enforced\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "getCurrentKernelCmdlineArgs", + "kind": "function", + "source": [ + "func getCurrentKernelCmdlineArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) {", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tcurrentKernelCmdlineArgs, errStr, err := o.ExecCommandContainer(ctx, kernelArgscommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn aMap, fmt.Errorf(\"cannot execute %s on probe pod container %s, err=%s, stderr=%s\", grubKernelArgsCommand, env.ProbePods[nodeName].Name, err, errStr)", + "\t}", + "\tcurrentSplitKernelCmdlineArgs := strings.Split(strings.TrimSuffix(currentKernelCmdlineArgs, \"\\n\"), \" \")", + "\treturn arrayhelper.ArgListToMap(currentSplitKernelCmdlineArgs), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "getGrubKernelArgs", + "kind": "function", + "source": [ + "func getGrubKernelArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) {", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tbootConfig, errStr, err := o.ExecCommandContainer(ctx, grubKernelArgsCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn aMap, fmt.Errorf(\"cannot execute %s on probe pod %s, err=%s, stderr=%s\", grubKernelArgsCommand, env.ProbePods[nodeName], err, errStr)", + "\t}", + "", + "\tsplitBootConfig := strings.Split(bootConfig, \"\\n\")", + "\tfilteredBootConfig := arrayhelper.FilterArray(splitBootConfig, func(line string) bool {", + "\t\treturn strings.HasPrefix(line, \"options\")", + "\t})", + "\tif len(filteredBootConfig) != 1 {", + "\t\treturn aMap, fmt.Errorf(\"filteredBootConfig!=1\")", + "\t}", + "\tgrubKernelConfig := filteredBootConfig[0]", + "\tgrubSplitKernelConfig := strings.Split(grubKernelConfig, \" \")", + "\tgrubSplitKernelConfig = grubSplitKernelConfig[1:]", + "\treturn arrayhelper.ArgListToMap(grubSplitKernelConfig), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/sysctlconfig", + "name": "GetSysctlSettings", + "kind": "function", + "source": [ + "func GetSysctlSettings(env *provider.TestEnvironment, nodeName string) (map[string]string, error) {", + "\tconst (", + "\t\tsysctlCommand = \"chroot /host sysctl --system\"", + "\t)", + "", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, sysctlCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"failed to execute command %s in probe pod %s, err=%s, stderr=%s\", sysctlCommand,", + "\t\t\tenv.ProbePods[nodeName], err, errStr)", + "\t}", + "", + "\treturn parseSysctlSystemOutput(outStr), nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "GetNewClientsHolder", + "qualifiedName": "GetNewClientsHolder", + "exported": true, + "signature": "func(string)(*ClientsHolder)", + "doc": "GetNewClientsHolder Creates a Kubernetes clients holder from the provided kubeconfig\n\nThe function takes a file path to a kubeconfig, uses an internal constructor\nto instantiate a ClientsHolder with all necessary API clients, and logs a\nfatal error if construction fails. On success it returns a pointer to the\nfully initialized holder for use by other components.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:245", + "calls": [ + { + "name": "newClientsHolder", + "kind": "function", + "source": [ + "func newClientsHolder(filenames ...string) (*ClientsHolder, error) { //nolint:funlen // this is a special function with lots of assignments", + "\tlog.Info(\"Creating k8s go-clients holder.\")", + "", + "\tvar err error", + "\tclientsHolder.RestConfig, err = getClusterRestConfig(filenames...)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get rest.Config: %v\", err)", + "\t}", + "\tclientsHolder.RestConfig.Timeout = DefaultTimeout", + "", + "\tclientsHolder.DynamicClient, err = dynamic.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate dynamic client (unstructured/dynamic): %s\", err)", + "\t}", + "\tclientsHolder.APIExtClient, err = apiextv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate apiextv1: %s\", err)", + "\t}", + "\tclientsHolder.OlmClient, err = olmClient.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate olm clientset: %s\", err)", + "\t}", + "\tclientsHolder.OlmPkgClient, err = olmpkgclient.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate olm clientset: %s\", err)", + "\t}", + "\tclientsHolder.K8sClient, err = kubernetes.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate k8sclient: %s\", err)", + "\t}", + "\t// create the oc client", + "\tclientsHolder.OcpClient, err = clientconfigv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate ocClient: %s\", err)", + "\t}", + "\tclientsHolder.MachineCfg, err = ocpMachine.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate MachineCfg client: %s\", err)", + "\t}", + "\tclientsHolder.K8sNetworkingClient, err = networkingv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate k8s networking client: %s\", err)", + "\t}", + "", + "\tdiscoveryClient, err := discovery.NewDiscoveryClientForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate discoveryClient: %s\", err)", + "\t}", + "", + "\tclientsHolder.GroupResources, err = discoveryClient.ServerPreferredResources()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot get list of resources in cluster: %s\", err)", + "\t}", + "", + "\tresolver := scale.NewDiscoveryScaleKindResolver(discoveryClient)", + "\tgr, err := restmapper.GetAPIGroupResources(clientsHolder.K8sClient.Discovery())", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate GetAPIGroupResources: %s\", err)", + "\t}", + "", + "\tmapper := restmapper.NewDiscoveryRESTMapper(gr)", + "\tclientsHolder.ScalingClient, err = scale.NewForConfig(clientsHolder.RestConfig, mapper, dynamic.LegacyAPIPathResolverFunc, resolver)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate ScalesGetter: %s\", err)", + "\t}", + "", + "\tclientsHolder.CNCFNetworkingClient, err = cncfNetworkAttachmentv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate CNCF networking client\")", + "\t}", + "", + "\tclientsHolder.ApiserverClient, err = apiserverscheme.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate apiserverscheme: %w\", err)", + "\t}", + "", + "\tclientsHolder.ready = true", + "\treturn \u0026clientsHolder, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "runHandler", + "kind": "function", + "source": [ + "func runHandler(w http.ResponseWriter, r *http.Request) {", + "\tbuf = bytes.NewBufferString(\"\")", + "\t// The log output will be written to the log file and to this buffer buf", + "\tlog.SetLogger(log.GetMultiLogger(buf))", + "", + "\tjsonData := r.FormValue(\"jsonData\") // \"jsonData\" is the name of the JSON input field", + "\tvar data RequestedData", + "\tif err := json.Unmarshal([]byte(jsonData), \u0026data); err != nil {", + "\t\tfmt.Println(\"Error:\", err)", + "\t}", + "\tflattenedOptions := data.SelectedOptions", + "", + "\t// Get the file from the request", + "\tfile, fileHeader, err := r.FormFile(\"kubeConfigPath\") // \"fileInput\" is the name of the file input field", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to retrieve file from form\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "\tdefer file.Close()", + "", + "\tlog.Info(\"Kubeconfig file name received: %s\", fileHeader.Filename)", + "\tkubeconfigTempFile, err := os.CreateTemp(\"\", \"webserver-kubeconfig-*\")", + "\tif err != nil {", + "\t\thttp.Error(w, \"Failed to create temp file to store the kubeconfig content.\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "", + "\tdefer func() {", + "\t\tlog.Info(\"Removing temporary kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\terr = os.Remove(kubeconfigTempFile.Name())", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to remove temp kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\t}", + "\t}()", + "", + "\t_, err = io.Copy(kubeconfigTempFile, file)", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to copy file\", http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t_ = kubeconfigTempFile.Close()", + "", + "\tlog.Info(\"Web Server kubeconfig file : %v (copied into %v)\", fileHeader.Filename, kubeconfigTempFile.Name())", + "\tlog.Info(\"Web Server Labels filter : %v\", flattenedOptions)", + "", + "\ttnfConfig, err := os.ReadFile(\"config/certsuite_config.yml\")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error reading YAML file: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t}", + "", + "\tnewData := updateTnf(tnfConfig, \u0026data)", + "", + "\t// Write the modified YAML data back to the file", + "\tvar filePerm fs.FileMode = 0o644 // owner can read/write, group and others can only read", + "\terr = os.WriteFile(\"config/certsuite_config.yml\", newData, filePerm)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing YAML file: %v\", err)", + "\t}", + "\tlabelsFilter := strings.Join(flattenedOptions, \",\")", + "", + "\t_ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name())", + "\tcertsuite.LoadChecksDB(labelsFilter)", + "", + "\toutputFolder := r.Context().Value(outputFolderCtxKey).(string)", + "", + "\tif err := checksdb.InitLabelsExprEvaluator(labelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(outputFolder, \"debug\"); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tlog.Info(\"Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s\", labelsFilter, outputFolder)", + "\terr = certsuite.Run(labelsFilter, outputFolder)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to run CNF Cert Suite: %v\", err)", + "\t}", + "", + "\t// Return the result as JSON", + "\tresponse := struct {", + "\t\tMessage string `json:\"Message\"`", + "\t}{", + "\t\tMessage: fmt.Sprintf(\"Succeeded to run %s\", strings.Join(flattenedOptions, \" \")),", + "\t}", + "\t// Serialize the response data to JSON", + "\tjsonResponse, err := json.Marshal(response)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to marshal jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t// Set the Content-Type header to specify that the response is JSON", + "\tw.Header().Set(\"Content-Type\", \"application/json\")", + "\t// Write the JSON response to the client", + "\tlog.Info(\"Sending web response: %v\", response)", + "\t_, err = w.Write(jsonResponse)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to write jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNewClientsHolder(kubeconfigFile string) *ClientsHolder {", + "\t_, err := newClientsHolder(kubeconfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "", + "\treturn \u0026clientsHolder", + "}" + ] + }, + { + "name": "GetTestClientsHolder", + "qualifiedName": "GetTestClientsHolder", + "exported": true, + "signature": "func([]runtime.Object)(*ClientsHolder)", + "doc": "GetTestClientsHolder Creates a mocked client holder for unit tests\n\nThis function accepts a slice of runtime objects that represent Kubernetes\nresources and builds separate slices for each supported client type. It then\ninitializes fake clients with these objects, marks the holder as ready, and\nreturns it for use in testing scenarios.\n\nnolint:funlen,gocyclo", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:110", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/kubernetes/fake", + "name": "NewSimpleClientset", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake", + "name": "NewSimpleClientset", + "kind": "function" + }, + { + "pkgPath": "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake", + "name": "NewSimpleClientset", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetTestClientsHolder(k8sMockObjects []runtime.Object) *ClientsHolder {", + "\t// Build slices of different objects depending on what client", + "\t// is supposed to expect them.", + "\tvar k8sClientObjects []runtime.Object", + "\tvar k8sExtClientObjects []runtime.Object", + "\tvar k8sPlumbingObjects []runtime.Object", + "", + "\tfor _, v := range k8sMockObjects {", + "\t\t// Based on what type of object is, populate certain object slices", + "\t\t// with what is supported by a certain client.", + "\t\t// Add more items below if/when needed.", + "\t\tswitch v.(type) {", + "\t\t// K8s Client Objects", + "\t\tcase *corev1.ServiceAccount:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *rbacv1.ClusterRole:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *rbacv1.ClusterRoleBinding:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *rbacv1.Role:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *rbacv1.RoleBinding:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *corev1.Pod:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *corev1.Service:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *corev1.Node:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *appsv1.Deployment:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *appsv1.StatefulSet:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *corev1.ResourceQuota:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *corev1.PersistentVolume:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *corev1.PersistentVolumeClaim:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *policyv1.PodDisruptionBudget:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *scalingv1.HorizontalPodAutoscaler:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *storagev1.StorageClass:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "\t\tcase *metav1.APIResourceList:", + "\t\t\tk8sClientObjects = append(k8sClientObjects, v)", + "", + "\t\t// K8s Extension Client Objects", + "\t\tcase *apiextv1c.CustomResourceDefinition:", + "\t\t\tk8sExtClientObjects = append(k8sExtClientObjects, v)", + "", + "\t\t// K8sNetworkPlumbing Client Objects", + "\t\tcase *cncfV1.NetworkAttachmentDefinition:", + "\t\t\tk8sPlumbingObjects = append(k8sPlumbingObjects, v)", + "\t\t}", + "\t}", + "", + "\t// Add the objects to their corresponding API Clients", + "\tclientsHolder.K8sClient = k8sFakeClient.NewSimpleClientset(k8sClientObjects...)", + "\tclientsHolder.APIExtClient = apiextv1fake.NewSimpleClientset(k8sExtClientObjects...)", + "\tclientsHolder.CNCFNetworkingClient = cncfNetworkAttachmentFake.NewSimpleClientset(k8sPlumbingObjects...)", + "", + "\tclientsHolder.ready = true", + "\treturn \u0026clientsHolder", + "}" + ] + }, + { + "name": "NewContext", + "qualifiedName": "NewContext", + "exported": true, + "signature": "func(string, string, string)(Context)", + "doc": "NewContext Creates a context for running commands inside a specific pod container\n\nThis function takes the namespace, pod name, and container name of a probe\npod and returns a Context object that holds those values. The returned\nContext is used by other components to target the correct container when\nexecuting shell commands via the client holder. No additional processing or\nvalidation occurs; it simply packages the identifiers into the struct.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:470", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetNodeProbePodContext", + "kind": "function", + "source": [ + "func GetNodeProbePodContext(node string, env *provider.TestEnvironment) (clientsholder.Context, error) {", + "\tprobePod := env.ProbePods[node]", + "\tif probePod == nil {", + "\t\treturn clientsholder.Context{}, fmt.Errorf(\"probe pod not found on node %s\", node)", + "\t}", + "", + "\treturn clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetCniPlugins", + "kind": "function", + "source": [ + "func GetCniPlugins() (out map[string][]interface{}) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string][]interface{})", + "\tfor _, probePod := range env.ProbePods {", + "\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, cniPluginsCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cniPluginsCommand, probePod.String())", + "\t\t\tcontinue", + "\t\t}", + "\t\tdecoded := []interface{}{}", + "\t\terr = json.Unmarshal([]byte(outStr), \u0026decoded)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not decode json file because of: %s\", err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = decoded", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "getHWJsonOutput", + "kind": "function", + "source": [ + "func getHWJsonOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) (out interface{}, err error) {", + "\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn out, fmt.Errorf(\"command %s failed with error err: %v, stderr: %s\", cmd, err, errStr)", + "\t}", + "\terr = json.Unmarshal([]byte(outStr), \u0026out)", + "\tif err != nil {", + "\t\treturn out, fmt.Errorf(\"could not decode json file because of: %s\", err)", + "\t}", + "\treturn out, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "getHWTextOutput", + "kind": "function", + "source": [ + "func getHWTextOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) (out []string, err error) {", + "\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn out, fmt.Errorf(\"command %s failed with error err: %v, stderr: %s\", lspciCommand, err, errStr)", + "\t}", + "", + "\treturn strings.Split(outStr, \"\\n\"), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Node.IsHyperThreadNode", + "kind": "function", + "source": [ + "func (node *Node) IsHyperThreadNode(env *TestEnvironment) (bool, error) {", + "\to := clientsholder.GetClientsHolder()", + "\tnodeName := node.Data.Name", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tcmdValue, errStr, err := o.ExecCommandContainer(ctx, isHyperThreadCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn false, fmt.Errorf(\"cannot execute %s on probe pod %s, err=%s, stderr=%s\", isHyperThreadCommand, env.ProbePods[nodeName], err, errStr)", + "\t}", + "\tre := regexp.MustCompile(`Thread\\(s\\) per core:\\s+(\\d+)`)", + "\tmatch := re.FindStringSubmatch(cmdValue)", + "\tnum := 0", + "\tif len(match) == expectedValue {", + "\t\tnum, _ = strconv.Atoi(match[1])", + "\t}", + "\treturn num \u003e 1, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "filterDPDKRunningPods", + "kind": "function", + "source": [ + "func filterDPDKRunningPods(pods []*Pod) []*Pod {", + "\tvar filteredPods []*Pod", + "\tconst (", + "\t\tdpdkDriver = \"vfio-pci\"", + "\t\tfindDeviceSubCommand = \"find /sys -name\"", + "\t)", + "\to := clientsholder.GetClientsHolder()", + "\tfor _, pod := range pods {", + "\t\tif len(pod.MultusPCIs) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tctx := clientsholder.NewContext(pod.Namespace, pod.Name, pod.Spec.Containers[0].Name)", + "\t\tfindCommand := fmt.Sprintf(\"%s '%s'\", findDeviceSubCommand, pod.MultusPCIs[0])", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, findCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tlog.Error(\"Failed to execute command %s in probe %s, errStr: %s, err: %v\", findCommand, pod.String(), errStr, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif strings.Contains(outStr, dpdkDriver) {", + "\t\t\tfilteredPods = append(filteredPods, pod)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getCatalogSourceBundleCountFromProbeContainer", + "kind": "function", + "source": [ + "func getCatalogSourceBundleCountFromProbeContainer(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// We need to use the probe container to get the bundle count", + "\t// This is because the package manifests are not available in the cluster", + "\t// for OCP versions \u003c= 4.12", + "\to := clientsholder.GetClientsHolder()", + "", + "\t// Find the kubernetes service associated with the catalog source", + "\tfor _, svc := range env.AllServices {", + "\t\t// Skip if the service is not associated with the catalog source", + "\t\tif svc.Spec.Selector[\"olm.catalogSource\"] != cs.Name {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tlog.Info(\"Found service %q associated with catalog source %q.\", svc.Name, cs.Name)", + "", + "\t\t// Use a probe pod to get the bundle count", + "\t\tfor _, probePod := range env.ProbePods {", + "\t\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\t\tcmd := \"grpcurl -plaintext \" + svc.Spec.ClusterIP + \":50051 api.Registry.ListBundles | jq -s 'length'\"", + "\t\t\tcmdValue, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\t\t\tif err != nil || errStr != \"\" {", + "\t\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cmd, probePod.String())", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Sanitize the command output", + "\t\t\tcmdValue = strings.TrimSpace(cmdValue)", + "\t\t\tcmdValue = strings.Trim(cmdValue, \"\\\"\")", + "", + "\t\t\t// Parse the command output", + "\t\t\tbundleCount, err := strconv.Atoi(cmdValue)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to convert bundle count to integer: %s\", cmdValue)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Try each probe pod until we get a valid bundle count (which should only be 1 probe pod)", + "\t\t\tlog.Info(\"Found bundle count via grpcurl %d for catalog source %q.\", bundleCount, cs.Name)", + "\t\t\treturn bundleCount", + "\t\t}", + "\t}", + "", + "\tlog.Warn(\"Warning: No services found associated with catalog source %q.\", cs.Name)", + "\treturn -1", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testOneProcessPerContainer", + "kind": "function", + "source": [ + "func testOneProcessPerContainer(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t// the Istio sidecar container \"istio-proxy\" launches two processes: \"pilot-agent\" and \"envoy\"", + "\t\tif cut.IsIstioProxy() {", + "\t\t\tcheck.LogInfo(\"Skipping \\\"istio-proxy\\\" container\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Debug pod not found for node %q\", cut.NodeName)", + "\t\t\treturn", + "\t\t}", + "\t\tocpContext := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tpid, err := crclient.GetPidFromContainer(cut, ocpContext)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get PID for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tnbProcesses, err := getNbOfProcessesInPidNamespace(ocpContext, pid, clientsholder.GetClientsHolder())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get number of processes for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif nbProcesses \u003e 1 {", + "\t\t\tcheck.LogError(\"Container %q has more than one process running\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has more than one process running\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has only one process running\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has only one process running\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testContainersFsDiff", + "kind": "function", + "source": [ + "func testContainersFsDiff(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "", + "\t\t// If the probe pod is not found, we cannot run the test.", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Probe Pod not found for node %q\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"certsuite probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Check whether or not a container is available to prevent a panic.", + "\t\tif len(probePod.Spec.Containers) == 0 {", + "\t\t\tcheck.LogError(\"Probe Pod %q has no containers\", probePod)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"certsuite probe pod has no containers\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tctxt := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tfsDiffTester := cnffsdiff.NewFsDiffTester(check, clientsholder.GetClientsHolder(), ctxt, env.OpenshiftVersion)", + "\t\tfsDiffTester.RunTest(cut.UID)", + "\t\tswitch fsDiffTester.GetResults() {", + "\t\tcase testhelper.SUCCESS:", + "\t\t\tcheck.LogInfo(\"Container %q is not modified\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not modified\", true))", + "\t\t\tcontinue", + "\t\tcase testhelper.FAILURE:", + "\t\t\tcheck.LogError(\"Container %q modified (changed folders: %v, deleted folders: %v\", cut, fsDiffTester.ChangedFolders, fsDiffTester.DeletedFolders)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is modified\", false).", + "\t\t\t\tAddField(\"ChangedFolders\", strings.Join(fsDiffTester.ChangedFolders, \",\")).", + "\t\t\t\tAddField(\"DeletedFolders\", strings.Join(fsDiffTester.DeletedFolders, \",\")))", + "", + "\t\tcase testhelper.ERROR:", + "\t\t\tcheck.LogError(\"Could not run fs-diff in Container %q, err: %v\", cut, fsDiffTester.Error)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Error while running fs-diff\", false).AddField(testhelper.Error, fsDiffTester.Error.Error()))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testIsRedHatRelease", + "kind": "function", + "source": [ + "func testIsRedHatRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tbaseImageTester := isredhat.NewBaseImageTester(clientsholder.GetClientsHolder(), clientsholder.NewContext(cut.Namespace, cut.Podname, cut.Name))", + "", + "\t\tresult, err := baseImageTester.TestContainerIsRedHatRelease()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not collect release information from Container %q, err=%v\", cut, err)", + "\t\t}", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Container %q has failed the RHEL release check\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Failed the RHEL release check\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has passed the RHEL release check\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Passed the RHEL release check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testIsSELinuxEnforcing", + "kind": "function", + "source": [ + "func testIsSELinuxEnforcing(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tconst (", + "\t\tgetenforceCommand = `chroot /host getenforce`", + "\t\tenforcingString = \"Enforcing\\n\"", + "\t)", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\to := clientsholder.GetClientsHolder()", + "\tnodesFailed := 0", + "\tnodesError := 0", + "\tfor _, probePod := range env.ProbePods {", + "\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, getenforceCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tcheck.LogError(\"Could not execute command %q in Probe Pod %q, errStr: %q, err: %v\", getenforceCommand, probePod, errStr, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(probePod.Namespace, probePod.Name, \"Failed to execute command\", false))", + "\t\t\tnodesError++", + "\t\t\tcontinue", + "\t\t}", + "\t\tif outStr != enforcingString {", + "\t\t\tcheck.LogError(\"Node %q is not running SELinux, %s command returned: %s\", probePod.Spec.NodeName, getenforceCommand, outStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(probePod.Spec.NodeName, \"SELinux is not enforced\", false))", + "\t\t\tnodesFailed++", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q is running SELinux\", probePod.Spec.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(probePod.Spec.NodeName, \"SELinux is enforced\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testTainted", + "kind": "function", + "source": [ + "func testTainted(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// errNodes has nodes that failed some operation while checking kernel taints.", + "\terrNodes := []string{}", + "", + "\ttype badModuleTaints struct {", + "\t\tname string", + "\t\ttaints []string", + "\t}", + "", + "\t// badModules maps node names to list of \"bad\"/offending modules.", + "\tbadModules := map[string][]badModuleTaints{}", + "\t// otherTaints maps a node to a list of taint bits that haven't been set by any module.", + "\totherTaints := map[string][]int{}", + "", + "\tcheck.LogInfo(\"Modules allowlist: %+v\", env.Config.AcceptedKernelTaints)", + "\t// helper map to make the checks easier.", + "\tallowListedModules := map[string]bool{}", + "\tfor _, module := range env.Config.AcceptedKernelTaints {", + "\t\tallowListedModules[module.Module] = true", + "\t}", + "", + "\t// Loop through the probe pods that are tied to each node.", + "\tfor _, n := range env.Nodes {", + "\t\tnodeName := n.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "", + "\t\t// Ensure we are only testing nodes that have CNF workload deployed on them.", + "\t\tif !n.HasWorkloadDeployed(env.Pods) {", + "\t\t\tcheck.LogInfo(\"Node %q has no workload deployed on it. Skipping tainted kernel check.\", nodeName)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tdp := env.ProbePods[nodeName]", + "", + "\t\tocpContext := clientsholder.NewContext(dp.Namespace, dp.Name, dp.Spec.Containers[0].Name)", + "\t\ttf := nodetainted.NewNodeTaintedTester(\u0026ocpContext, nodeName)", + "", + "\t\t// Get the taints mask from the node kernel", + "\t\ttaintsMask, err := tf.GetKernelTaintsMask()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to retrieve kernel taint information from node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to retrieve kernel taint information from node\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif taintsMask == 0 {", + "\t\t\tcheck.LogInfo(\"Node %q has no non-approved kernel taints.\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has no non-approved kernel taints\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Node %q kernel is tainted. Taints mask=%d - Decoded taints: %v\",", + "\t\t\tnodeName, taintsMask, nodetainted.DecodeKernelTaintsFromBitMask(taintsMask))", + "", + "\t\t// Check the allow list. If empty, mark this node as failed.", + "\t\tif len(allowListedModules) == 0 {", + "\t\t\ttaintsMaskStr := strconv.FormatUint(taintsMask, 10)", + "\t\t\ttaintsStr := strings.Join(nodetainted.DecodeKernelTaintsFromBitMask(taintsMask), \",\")", + "\t\t\tcheck.LogError(\"Node %q contains taints not covered by module allowlist. Taints: %q (mask=%q)\", nodeName, taintsStr, taintsMaskStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node contains taints not covered by module allowlist\", false).", + "\t\t\t\tAddField(testhelper.TaintMask, taintsMaskStr).", + "\t\t\t\tAddField(testhelper.Taints, taintsStr))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// allow list check.", + "\t\t// Get the list of modules (tainters) that have set a taint bit.", + "\t\t// 1. Each module should appear in the allow list.", + "\t\t// 2. All kernel taint bits (one bit \u003c-\u003e one letter) should have been set by at least", + "\t\t// one tainter module.", + "\t\ttainters, taintBitsByAllModules, err := tf.GetTainterModules(allowListedModules)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get tainter modules from node %q, err: %v\", nodeName, err)", + "\t\t\terrNodes = append(errNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to get tainter modules\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Keep track of whether or not this node is compliant with module allow list.", + "\t\tcompliantNode := true", + "", + "\t\t// Save modules' names only.", + "\t\tfor moduleName, taintsLetters := range tainters {", + "\t\t\tmoduleTaints := nodetainted.DecodeKernelTaintsFromLetters(taintsLetters)", + "\t\t\tbadModules[nodeName] = append(badModules[nodeName], badModuleTaints{name: moduleName, taints: moduleTaints})", + "", + "\t\t\t// Create non-compliant taint objects for each of the taints", + "\t\t\tfor _, taint := range moduleTaints {", + "\t\t\t\tcheck.LogError(\"Node %q - module %q taints kernel: %q\", nodeName, moduleName, taint)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(nodetainted.RemoveAllExceptNumbers(taint), nodeName, taint, false).AddField(testhelper.ModuleName, moduleName))", + "", + "\t\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\t\tcompliantNode = false", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Lastly, check that all kernel taint bits come from modules.", + "\t\totherKernelTaints := nodetainted.GetOtherTaintedBits(taintsMask, taintBitsByAllModules)", + "\t\tfor _, taintedBit := range otherKernelTaints {", + "\t\t\tcheck.LogError(\"Node %q - taint bit %d is set but it is not caused by any module.\", nodeName, taintedBit)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(strconv.Itoa(taintedBit), nodeName, nodetainted.GetTaintMsg(taintedBit), false).", + "\t\t\t\tAddField(testhelper.ModuleName, \"N/A\"))", + "\t\t\totherTaints[nodeName] = append(otherTaints[nodeName], taintedBit)", + "", + "\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\tcompliantNode = false", + "\t\t}", + "", + "\t\tif compliantNode {", + "\t\t\tcheck.LogInfo(\"Node %q passed the tainted kernel check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the tainted kernel check\", true))", + "\t\t}", + "\t}", + "", + "\tif len(errNodes) \u003e 0 {", + "\t\tcheck.LogError(\"Failed to get kernel taints from some nodes: %+v\", errNodes)", + "\t}", + "", + "\tif len(badModules) \u003e 0 || len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Nodes have been found to be tainted. Tainted modules: %+v\", badModules)", + "\t}", + "", + "\tif len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Taints not related to any module: %+v\", otherTaints)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "getCurrentKernelCmdlineArgs", + "kind": "function", + "source": [ + "func getCurrentKernelCmdlineArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) {", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tcurrentKernelCmdlineArgs, errStr, err := o.ExecCommandContainer(ctx, kernelArgscommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn aMap, fmt.Errorf(\"cannot execute %s on probe pod container %s, err=%s, stderr=%s\", grubKernelArgsCommand, env.ProbePods[nodeName].Name, err, errStr)", + "\t}", + "\tcurrentSplitKernelCmdlineArgs := strings.Split(strings.TrimSuffix(currentKernelCmdlineArgs, \"\\n\"), \" \")", + "\treturn arrayhelper.ArgListToMap(currentSplitKernelCmdlineArgs), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "getGrubKernelArgs", + "kind": "function", + "source": [ + "func getGrubKernelArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) {", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tbootConfig, errStr, err := o.ExecCommandContainer(ctx, grubKernelArgsCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn aMap, fmt.Errorf(\"cannot execute %s on probe pod %s, err=%s, stderr=%s\", grubKernelArgsCommand, env.ProbePods[nodeName], err, errStr)", + "\t}", + "", + "\tsplitBootConfig := strings.Split(bootConfig, \"\\n\")", + "\tfilteredBootConfig := arrayhelper.FilterArray(splitBootConfig, func(line string) bool {", + "\t\treturn strings.HasPrefix(line, \"options\")", + "\t})", + "\tif len(filteredBootConfig) != 1 {", + "\t\treturn aMap, fmt.Errorf(\"filteredBootConfig!=1\")", + "\t}", + "\tgrubKernelConfig := filteredBootConfig[0]", + "\tgrubSplitKernelConfig := strings.Split(grubKernelConfig, \" \")", + "\tgrubSplitKernelConfig = grubSplitKernelConfig[1:]", + "\treturn arrayhelper.ArgListToMap(grubSplitKernelConfig), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "NewTester", + "kind": "function", + "source": [ + "func NewTester(node *provider.Node, probePod *corev1.Pod, commander clientsholder.Command) (*Tester, error) {", + "\ttester := \u0026Tester{", + "\t\tnode: node,", + "\t\tcommander: commander,", + "\t\tcontext: clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name),", + "\t}", + "", + "\tlog.Info(\"Getting node %s numa's hugepages values.\", node.Data.Name)", + "\tvar err error", + "\ttester.nodeHugepagesByNuma, err = tester.getNodeNumaHugePages()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to get node hugepages, err: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Parsing machineconfig's kernelArguments and systemd's hugepages units.\")", + "\ttester.mcSystemdHugepagesByNuma, err = getMcSystemdUnitsHugepagesConfig(\u0026tester.node.Mc)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get MC systemd hugepages config, err: %v\", err)", + "\t}", + "", + "\treturn tester, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/sysctlconfig", + "name": "GetSysctlSettings", + "kind": "function", + "source": [ + "func GetSysctlSettings(env *provider.TestEnvironment, nodeName string) (map[string]string, error) {", + "\tconst (", + "\t\tsysctlCommand = \"chroot /host sysctl --system\"", + "\t)", + "", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, sysctlCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"failed to execute command %s in probe pod %s, err=%s, stderr=%s\", sysctlCommand,", + "\t\t\tenv.ProbePods[nodeName], err, errStr)", + "\t}", + "", + "\treturn parseSysctlSystemOutput(outStr), nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "name": "SetTestClientGroupResources", + "qualifiedName": "SetTestClientGroupResources", + "exported": true, + "signature": "func([]*metav1.APIResourceList)()", + "doc": "SetTestClientGroupResources Stores a list of API resource group definitions\n\nThis function receives an array of API resource lists and assigns it to the\ninternal holder used by the client package. It updates the shared state that\nother components reference when interacting with Kubernetes groups. No value\nis returned, and the operation replaces any previously stored resources.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:205", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SetTestClientGroupResources(groupResources []*metav1.APIResourceList) {", + "\tclientsHolder.GroupResources = groupResources", + "}" + ] + }, + { + "name": "SetTestK8sClientsHolder", + "qualifiedName": "SetTestK8sClientsHolder", + "exported": true, + "signature": "func(kubernetes.Interface)()", + "doc": "SetTestK8sClientsHolder Stores a Kubernetes client for test usage\n\nThis function assigns the provided Kubernetes interface to an internal holder\nand marks it as ready. It is intended for tests that require a mock or real\nclient without interacting with a live cluster. After execution, other\ncomponents can retrieve the stored client from the holder.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:183", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SetTestK8sClientsHolder(k8sClient kubernetes.Interface) {", + "\tclientsHolder.K8sClient = k8sClient", + "\tclientsHolder.ready = true", + "}" + ] + }, + { + "name": "SetTestK8sDynamicClientsHolder", + "qualifiedName": "SetTestK8sDynamicClientsHolder", + "exported": true, + "signature": "func(dynamic.Interface)()", + "doc": "SetTestK8sDynamicClientsHolder Assigns a test Kubernetes dynamic client to the internal holder\n\nThis function stores the provided dynamic client instance in an internal\nstructure used by tests, marking the holder as ready for use. It replaces any\nexisting client reference and enables subsequent code that relies on the\ndynamic client to operate against this test instance.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:194", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SetTestK8sDynamicClientsHolder(dynamicClient dynamic.Interface) {", + "\tclientsHolder.DynamicClient = dynamicClient", + "\tclientsHolder.ready = true", + "}" + ] + }, + { + "name": "SetupFakeOlmClient", + "qualifiedName": "SetupFakeOlmClient", + "exported": true, + "signature": "func([]runtime.Object)()", + "doc": "SetupFakeOlmClient Replaces the real OLM client with a fake for testing\n\nThis function takes a slice of Kubernetes objects that represent mocked OLM\nresources. It constructs a new fake client set containing those objects and\nassigns it to the package's client holder, enabling tests to interact with\nOLM APIs without contacting an actual cluster.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:98", + "calls": [ + { + "pkgPath": "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/fake", + "name": "NewSimpleClientset", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SetupFakeOlmClient(olmMockObjects []runtime.Object) {", + "\tclientsHolder.OlmClient = olmFakeClient.NewSimpleClientset(olmMockObjects...)", + "}" + ] + }, + { + "name": "createByteArrayKubeConfig", + "qualifiedName": "createByteArrayKubeConfig", + "exported": false, + "signature": "func(*clientcmdapi.Config)([]byte, error)", + "doc": "createByteArrayKubeConfig Converts a Kubernetes configuration into YAML byte array\n\nThe function takes a pointer to a client configuration structure and\nserializes it into its YAML representation using the client-go library. It\nreturns the resulting bytes along with any error that occurs during\nserialization, allowing callers to use the data as a kubeconfig file in\nmemory.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:261", + "calls": [ + { + "pkgPath": "k8s.io/client-go/tools/clientcmd", + "name": "Write", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "getClusterRestConfig", + "kind": "function", + "source": [ + "func getClusterRestConfig(filenames ...string) (*rest.Config, error) {", + "\trestConfig, err := rest.InClusterConfig()", + "\tif err == nil {", + "\t\tlog.Info(\"CNF Cert Suite is running inside a cluster.\")", + "", + "\t\t// Convert restConfig to clientcmdapi.Config so we can get the kubeconfig \"file\" bytes", + "\t\t// needed by preflight's operator checks.", + "\t\tclientConfig := GetClientConfigFromRestConfig(restConfig)", + "\t\tclientsHolder.KubeConfig, err = createByteArrayKubeConfig(clientConfig)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to create byte array from kube config reference: %v\", err)", + "\t\t}", + "", + "\t\t// No error: we're inside a cluster.", + "\t\treturn restConfig, nil", + "\t}", + "", + "\tlog.Info(\"Running outside a cluster. Parsing kubeconfig file/s %+v\", filenames)", + "\tif len(filenames) == 0 {", + "\t\treturn nil, errors.New(\"no kubeconfig files set\")", + "\t}", + "", + "\t// Get the rest.Config from the kubeconfig file/s.", + "\tprecedence := []string{}", + "\tprecedence = append(precedence, filenames...)", + "", + "\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()", + "\tloadingRules.Precedence = precedence", + "", + "\tkubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(", + "\t\tloadingRules,", + "\t\t\u0026clientcmd.ConfigOverrides{},", + "\t)", + "", + "\t// Save merged config to temporary kubeconfig file.", + "\tkubeRawConfig, err := kubeconfig.RawConfig()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get kube raw config: %w\", err)", + "\t}", + "", + "\tclientsHolder.KubeConfig, err = createByteArrayKubeConfig(\u0026kubeRawConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to byte array kube config reference: %w\", err)", + "\t}", + "", + "\trestConfig, err = kubeconfig.ClientConfig()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate rest config: %s\", err)", + "\t}", + "", + "\treturn restConfig, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createByteArrayKubeConfig(kubeConfig *clientcmdapi.Config) ([]byte, error) {", + "\tyamlBytes, err := clientcmd.Write(*kubeConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to generate yaml bytes from kubeconfig: %w\", err)", + "\t}", + "\treturn yamlBytes, nil", + "}" + ] + }, + { + "name": "getClusterRestConfig", + "qualifiedName": "getClusterRestConfig", + "exported": false, + "signature": "func(...string)(*rest.Config, error)", + "doc": "getClusterRestConfig Retrieves a Kubernetes REST configuration from in‑cluster or kubeconfig files\n\nThe function first attempts to obtain an in‑cluster configuration; if\nsuccessful it converts that config into a kubeconfig byte array for\ndownstream use and returns the rest.Config. If not running inside a cluster,\nit requires one or more kubeconfig file paths, merges them with precedence\nrules, creates a temporary kubeconfig representation, extracts the REST\nclient configuration from it, and returns that configuration along with any\nerror encountered.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:310", + "calls": [ + { + "pkgPath": "k8s.io/client-go/rest", + "name": "InClusterConfig", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "GetClientConfigFromRestConfig", + "kind": "function", + "source": [ + "func GetClientConfigFromRestConfig(restConfig *rest.Config) *clientcmdapi.Config {", + "\treturn \u0026clientcmdapi.Config{", + "\t\tKind: \"Config\",", + "\t\tAPIVersion: \"v1\",", + "\t\tClusters: map[string]*clientcmdapi.Cluster{", + "\t\t\t\"default-cluster\": {", + "\t\t\t\tServer: restConfig.Host,", + "\t\t\t\tCertificateAuthority: restConfig.CAFile,", + "\t\t\t},", + "\t\t},", + "\t\tContexts: map[string]*clientcmdapi.Context{", + "\t\t\t\"default-context\": {", + "\t\t\t\tCluster: \"default-cluster\",", + "\t\t\t\tAuthInfo: \"default-user\",", + "\t\t\t},", + "\t\t},", + "\t\tCurrentContext: \"default-context\",", + "\t\tAuthInfos: map[string]*clientcmdapi.AuthInfo{", + "\t\t\t\"default-user\": {", + "\t\t\t\tToken: restConfig.BearerToken,", + "\t\t\t},", + "\t\t},", + "\t}", + "}" + ] + }, + { + "name": "createByteArrayKubeConfig", + "kind": "function", + "source": [ + "func createByteArrayKubeConfig(kubeConfig *clientcmdapi.Config) ([]byte, error) {", + "\tyamlBytes, err := clientcmd.Write(*kubeConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to generate yaml bytes from kubeconfig: %w\", err)", + "\t}", + "\treturn yamlBytes, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "errors", + "name": "New", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/tools/clientcmd", + "name": "NewDefaultClientConfigLoadingRules", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/tools/clientcmd", + "name": "NewNonInteractiveDeferredLoadingClientConfig", + "kind": "function" + }, + { + "name": "RawConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "createByteArrayKubeConfig", + "kind": "function", + "source": [ + "func createByteArrayKubeConfig(kubeConfig *clientcmdapi.Config) ([]byte, error) {", + "\tyamlBytes, err := clientcmd.Write(*kubeConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to generate yaml bytes from kubeconfig: %w\", err)", + "\t}", + "\treturn yamlBytes, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "ClientConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "newClientsHolder", + "kind": "function", + "source": [ + "func newClientsHolder(filenames ...string) (*ClientsHolder, error) { //nolint:funlen // this is a special function with lots of assignments", + "\tlog.Info(\"Creating k8s go-clients holder.\")", + "", + "\tvar err error", + "\tclientsHolder.RestConfig, err = getClusterRestConfig(filenames...)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get rest.Config: %v\", err)", + "\t}", + "\tclientsHolder.RestConfig.Timeout = DefaultTimeout", + "", + "\tclientsHolder.DynamicClient, err = dynamic.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate dynamic client (unstructured/dynamic): %s\", err)", + "\t}", + "\tclientsHolder.APIExtClient, err = apiextv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate apiextv1: %s\", err)", + "\t}", + "\tclientsHolder.OlmClient, err = olmClient.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate olm clientset: %s\", err)", + "\t}", + "\tclientsHolder.OlmPkgClient, err = olmpkgclient.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate olm clientset: %s\", err)", + "\t}", + "\tclientsHolder.K8sClient, err = kubernetes.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate k8sclient: %s\", err)", + "\t}", + "\t// create the oc client", + "\tclientsHolder.OcpClient, err = clientconfigv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate ocClient: %s\", err)", + "\t}", + "\tclientsHolder.MachineCfg, err = ocpMachine.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate MachineCfg client: %s\", err)", + "\t}", + "\tclientsHolder.K8sNetworkingClient, err = networkingv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate k8s networking client: %s\", err)", + "\t}", + "", + "\tdiscoveryClient, err := discovery.NewDiscoveryClientForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate discoveryClient: %s\", err)", + "\t}", + "", + "\tclientsHolder.GroupResources, err = discoveryClient.ServerPreferredResources()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot get list of resources in cluster: %s\", err)", + "\t}", + "", + "\tresolver := scale.NewDiscoveryScaleKindResolver(discoveryClient)", + "\tgr, err := restmapper.GetAPIGroupResources(clientsHolder.K8sClient.Discovery())", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate GetAPIGroupResources: %s\", err)", + "\t}", + "", + "\tmapper := restmapper.NewDiscoveryRESTMapper(gr)", + "\tclientsHolder.ScalingClient, err = scale.NewForConfig(clientsHolder.RestConfig, mapper, dynamic.LegacyAPIPathResolverFunc, resolver)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate ScalesGetter: %s\", err)", + "\t}", + "", + "\tclientsHolder.CNCFNetworkingClient, err = cncfNetworkAttachmentv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate CNCF networking client\")", + "\t}", + "", + "\tclientsHolder.ApiserverClient, err = apiserverscheme.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate apiserverscheme: %w\", err)", + "\t}", + "", + "\tclientsHolder.ready = true", + "\treturn \u0026clientsHolder, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getClusterRestConfig(filenames ...string) (*rest.Config, error) {", + "\trestConfig, err := rest.InClusterConfig()", + "\tif err == nil {", + "\t\tlog.Info(\"CNF Cert Suite is running inside a cluster.\")", + "", + "\t\t// Convert restConfig to clientcmdapi.Config so we can get the kubeconfig \"file\" bytes", + "\t\t// needed by preflight's operator checks.", + "\t\tclientConfig := GetClientConfigFromRestConfig(restConfig)", + "\t\tclientsHolder.KubeConfig, err = createByteArrayKubeConfig(clientConfig)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to create byte array from kube config reference: %v\", err)", + "\t\t}", + "", + "\t\t// No error: we're inside a cluster.", + "\t\treturn restConfig, nil", + "\t}", + "", + "\tlog.Info(\"Running outside a cluster. Parsing kubeconfig file/s %+v\", filenames)", + "\tif len(filenames) == 0 {", + "\t\treturn nil, errors.New(\"no kubeconfig files set\")", + "\t}", + "", + "\t// Get the rest.Config from the kubeconfig file/s.", + "\tprecedence := []string{}", + "\tprecedence = append(precedence, filenames...)", + "", + "\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()", + "\tloadingRules.Precedence = precedence", + "", + "\tkubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(", + "\t\tloadingRules,", + "\t\t\u0026clientcmd.ConfigOverrides{},", + "\t)", + "", + "\t// Save merged config to temporary kubeconfig file.", + "\tkubeRawConfig, err := kubeconfig.RawConfig()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get kube raw config: %w\", err)", + "\t}", + "", + "\tclientsHolder.KubeConfig, err = createByteArrayKubeConfig(\u0026kubeRawConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to byte array kube config reference: %w\", err)", + "\t}", + "", + "\trestConfig, err = kubeconfig.ClientConfig()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate rest config: %s\", err)", + "\t}", + "", + "\treturn restConfig, nil", + "}" + ] + }, + { + "name": "newClientsHolder", + "qualifiedName": "newClientsHolder", + "exported": false, + "signature": "func(...string)(*ClientsHolder, error)", + "doc": "newClientsHolder Creates a holder of Kubernetes client interfaces based on provided kubeconfig files\n\nIt loads a rest configuration from the given kubeconfig paths or in-cluster\nsettings, then initializes numerous typed and dynamic clients for API\nextensions, OLM, OpenShift, networking, scaling, and CNCF networking. The\nfunction also retrieves cluster resource listings and prepares a REST mapper\nfor scale operations. Upon successful setup, it marks the holder as ready and\nreturns it; otherwise an error is returned.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:371", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "getClusterRestConfig", + "kind": "function", + "source": [ + "func getClusterRestConfig(filenames ...string) (*rest.Config, error) {", + "\trestConfig, err := rest.InClusterConfig()", + "\tif err == nil {", + "\t\tlog.Info(\"CNF Cert Suite is running inside a cluster.\")", + "", + "\t\t// Convert restConfig to clientcmdapi.Config so we can get the kubeconfig \"file\" bytes", + "\t\t// needed by preflight's operator checks.", + "\t\tclientConfig := GetClientConfigFromRestConfig(restConfig)", + "\t\tclientsHolder.KubeConfig, err = createByteArrayKubeConfig(clientConfig)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to create byte array from kube config reference: %v\", err)", + "\t\t}", + "", + "\t\t// No error: we're inside a cluster.", + "\t\treturn restConfig, nil", + "\t}", + "", + "\tlog.Info(\"Running outside a cluster. Parsing kubeconfig file/s %+v\", filenames)", + "\tif len(filenames) == 0 {", + "\t\treturn nil, errors.New(\"no kubeconfig files set\")", + "\t}", + "", + "\t// Get the rest.Config from the kubeconfig file/s.", + "\tprecedence := []string{}", + "\tprecedence = append(precedence, filenames...)", + "", + "\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()", + "\tloadingRules.Precedence = precedence", + "", + "\tkubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(", + "\t\tloadingRules,", + "\t\t\u0026clientcmd.ConfigOverrides{},", + "\t)", + "", + "\t// Save merged config to temporary kubeconfig file.", + "\tkubeRawConfig, err := kubeconfig.RawConfig()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get kube raw config: %w\", err)", + "\t}", + "", + "\tclientsHolder.KubeConfig, err = createByteArrayKubeConfig(\u0026kubeRawConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to byte array kube config reference: %w\", err)", + "\t}", + "", + "\trestConfig, err = kubeconfig.ClientConfig()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate rest config: %s\", err)", + "\t}", + "", + "\treturn restConfig, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/dynamic", + "name": "NewForConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", + "name": "NewForConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned", + "name": "NewForConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/client/clientset/versioned/typed/operators/v1", + "name": "NewForConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/kubernetes", + "name": "NewForConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1", + "name": "NewForConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/openshift/client-go/machineconfiguration/clientset/versioned", + "name": "NewForConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/kubernetes/typed/networking/v1", + "name": "NewForConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/discovery", + "name": "NewDiscoveryClientForConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "ServerPreferredResources", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/scale", + "name": "NewDiscoveryScaleKindResolver", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/restmapper", + "name": "GetAPIGroupResources", + "kind": "function" + }, + { + "name": "Discovery", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/restmapper", + "name": "NewDiscoveryRESTMapper", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/scale", + "name": "NewForConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned", + "name": "NewForConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/openshift/client-go/apiserver/clientset/versioned", + "name": "NewForConfig", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetNewClientsHolder", + "kind": "function", + "source": [ + "func GetNewClientsHolder(kubeconfigFile string) *ClientsHolder {", + "\t_, err := newClientsHolder(kubeconfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "", + "\treturn \u0026clientsHolder", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func newClientsHolder(filenames ...string) (*ClientsHolder, error) { //nolint:funlen // this is a special function with lots of assignments", + "\tlog.Info(\"Creating k8s go-clients holder.\")", + "", + "\tvar err error", + "\tclientsHolder.RestConfig, err = getClusterRestConfig(filenames...)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get rest.Config: %v\", err)", + "\t}", + "\tclientsHolder.RestConfig.Timeout = DefaultTimeout", + "", + "\tclientsHolder.DynamicClient, err = dynamic.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate dynamic client (unstructured/dynamic): %s\", err)", + "\t}", + "\tclientsHolder.APIExtClient, err = apiextv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate apiextv1: %s\", err)", + "\t}", + "\tclientsHolder.OlmClient, err = olmClient.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate olm clientset: %s\", err)", + "\t}", + "\tclientsHolder.OlmPkgClient, err = olmpkgclient.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate olm clientset: %s\", err)", + "\t}", + "\tclientsHolder.K8sClient, err = kubernetes.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate k8sclient: %s\", err)", + "\t}", + "\t// create the oc client", + "\tclientsHolder.OcpClient, err = clientconfigv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate ocClient: %s\", err)", + "\t}", + "\tclientsHolder.MachineCfg, err = ocpMachine.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate MachineCfg client: %s\", err)", + "\t}", + "\tclientsHolder.K8sNetworkingClient, err = networkingv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate k8s networking client: %s\", err)", + "\t}", + "", + "\tdiscoveryClient, err := discovery.NewDiscoveryClientForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate discoveryClient: %s\", err)", + "\t}", + "", + "\tclientsHolder.GroupResources, err = discoveryClient.ServerPreferredResources()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot get list of resources in cluster: %s\", err)", + "\t}", + "", + "\tresolver := scale.NewDiscoveryScaleKindResolver(discoveryClient)", + "\tgr, err := restmapper.GetAPIGroupResources(clientsHolder.K8sClient.Discovery())", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate GetAPIGroupResources: %s\", err)", + "\t}", + "", + "\tmapper := restmapper.NewDiscoveryRESTMapper(gr)", + "\tclientsHolder.ScalingClient, err = scale.NewForConfig(clientsHolder.RestConfig, mapper, dynamic.LegacyAPIPathResolverFunc, resolver)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate ScalesGetter: %s\", err)", + "\t}", + "", + "\tclientsHolder.CNCFNetworkingClient, err = cncfNetworkAttachmentv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate CNCF networking client\")", + "\t}", + "", + "\tclientsHolder.ApiserverClient, err = apiserverscheme.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate apiserverscheme: %w\", err)", + "\t}", + "", + "\tclientsHolder.ready = true", + "\treturn \u0026clientsHolder, nil", + "}" + ] + } + ], + "globals": [ + { + "name": "_", + "exported": false, + "type": "Command", + "doc": "Ensure, that CommandMock does implement Command.\nIf this is not the case, regenerate this file with moq.", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/command_moq.go:12" + }, + { + "name": "clientsHolder", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:90" + } + ], + "consts": [ + { + "name": "DefaultTimeout", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/clientsholder/clientsholder.go:60" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "crclient", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "regexp", + "strconv", + "strings", + "time" + ], + "structs": [ + { + "name": "Process", + "exported": true, + "doc": "Process Represents a running process inside a container\n\nThis structure holds the identifier, parent identifier, namespace, and\ncommand line arguments for a single operating system process discovered\nwithin a container’s PID namespace. The fields enable callers to\ndistinguish processes by their unique IDs and to trace relationships between\nchild and parent processes during diagnostics.", + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:40", + "fields": { + "Args": "string", + "PPid": "int", + "Pid": "int", + "PidNs": "int" + }, + "methodNames": [ + "String" + ], + "source": [ + "type Process struct {", + "\tPidNs, Pid, PPid int", + "\tArgs string", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "ExecCommandContainerNSEnter", + "qualifiedName": "ExecCommandContainerNSEnter", + "exported": true, + "signature": "func(string, *provider.Container)(string, error)", + "doc": "ExecCommandContainerNSEnter Executes a shell command inside a container’s namespace\n\nThe function determines the PID of the target container, builds an nsenter\ncommand to run in that process’s namespace, and executes it on a probe pod\nwith retry logic. It returns the standard output, standard error, and any\nexecution error. If the probe context or PID retrieval fails, it reports an\nappropriate error.", + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:163", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "name": "GetNodeProbePodContext", + "kind": "function", + "source": [ + "func GetNodeProbePodContext(node string, env *provider.TestEnvironment) (clientsholder.Context, error) {", + "\tprobePod := env.ProbePods[node]", + "\tif probePod == nil {", + "\t\treturn clientsholder.Context{}, fmt.Errorf(\"probe pod not found on node %s\", node)", + "\t}", + "", + "\treturn clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name), nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "GetPidFromContainer", + "kind": "function", + "source": [ + "func GetPidFromContainer(cut *provider.Container, ctx clientsholder.Context) (int, error) {", + "\tvar pidCmd string", + "", + "\tswitch cut.Runtime {", + "\tcase \"docker\":", + "\t\tpidCmd = DockerInspectPID + cut.UID + DevNull", + "\tcase \"docker-pullable\":", + "\t\tpidCmd = DockerInspectPID + cut.UID + DevNull", + "\tcase \"cri-o\", \"containerd\":", + "\t\tpidCmd = \"chroot /host crictl inspect --output go-template --template '{{.info.pid}}' \" + cut.UID + DevNull", + "\tdefault:", + "\t\tlog.Debug(\"Container runtime %s not supported yet for this test, skipping\", cut.Runtime)", + "\t\treturn 0, fmt.Errorf(\"container runtime %s not supported\", cut.Runtime)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "\toutStr, errStr, err := ch.ExecCommandContainer(ctx, pidCmd)", + "\tif err != nil {", + "\t\treturn 0, fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", pidCmd, cut, err)", + "\t}", + "\tif errStr != \"\" {", + "\t\treturn 0, fmt.Errorf(\"cmd: \\\" %s \\\" on %s returned %s\", pidCmd, cut, errStr)", + "\t}", + "", + "\treturn strconv.Atoi(strings.TrimSuffix(outStr, \"\\n\"))", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Sleep", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil", + "name": "GetListeningPorts", + "kind": "function", + "source": [ + "func GetListeningPorts(cut *provider.Container) (map[PortInfo]bool, error) {", + "\toutStr, errStr, err := crclient.ExecCommandContainerNSEnter(getListeningPortsCmd, cut)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"failed to execute command %s on %s, err: %v\", getListeningPortsCmd, cut, err)", + "\t}", + "", + "\treturn parseListeningPorts(outStr)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil", + "name": "GetSSHDaemonPort", + "kind": "function", + "source": [ + "func GetSSHDaemonPort(cut *provider.Container) (string, error) {", + "\tconst findSSHDaemonPort = \"ss -tpln | grep sshd | head -1 | awk '{ print $4 }' | awk -F : '{ print $2 }'\"", + "\toutStr, errStr, err := crclient.ExecCommandContainerNSEnter(findSSHDaemonPort, cut)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn \"\", fmt.Errorf(\"failed to execute command %s on %s, err: %v\", findSSHDaemonPort, cut, err)", + "\t}", + "", + "\treturn strings.TrimSpace(outStr), nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ExecCommandContainerNSEnter(command string,", + "\taContainer *provider.Container) (outStr, errStr string, err error) {", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(aContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", aContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\t// Get the container PID to build the nsenter command", + "\tcontainerPid, err := GetPidFromContainer(aContainer, ctx)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot get PID from: %s, err: %v\", aContainer, err)", + "\t}", + "", + "\t// Add the container PID and the specific command to run with nsenter", + "\tnsenterCommand := \"nsenter -t \" + strconv.Itoa(containerPid) + \" -n \" + command", + "", + "\t// Run the nsenter command on the probe pod with retry logic", + "\tfor attempt := 1; attempt \u003c= RetryAttempts; attempt++ {", + "\t\toutStr, errStr, err = ch.ExecCommandContainer(ctx, nsenterCommand)", + "\t\tif err == nil {", + "\t\t\tbreak", + "\t\t}", + "\t\tif attempt \u003c RetryAttempts {", + "\t\t\ttime.Sleep(RetrySleepSeconds * time.Second)", + "\t\t}", + "\t}", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", command, aContainer, err)", + "\t}", + "", + "\treturn outStr, errStr, err", + "}" + ] + }, + { + "name": "GetContainerPidNamespace", + "qualifiedName": "GetContainerPidNamespace", + "exported": true, + "signature": "func(*provider.Container, *provider.TestEnvironment)(string, error)", + "doc": "GetContainerPidNamespace Retrieves the PID namespace identifier for a container\n\nThis function determines the process ID of a target container by executing an\ninspection command on its runtime environment. It then runs a namespace\nlisting command against that PID to extract the namespace name, returning it\nas a string. Errors from context retrieval, PID extraction, or command\nexecution are wrapped and returned with descriptive messages.", + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:119", + "calls": [ + { + "name": "GetNodeProbePodContext", + "kind": "function", + "source": [ + "func GetNodeProbePodContext(node string, env *provider.TestEnvironment) (clientsholder.Context, error) {", + "\tprobePod := env.ProbePods[node]", + "\tif probePod == nil {", + "\t\treturn clientsholder.Context{}, fmt.Errorf(\"probe pod not found on node %s\", node)", + "\t}", + "", + "\treturn clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name), nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "GetPidFromContainer", + "kind": "function", + "source": [ + "func GetPidFromContainer(cut *provider.Container, ctx clientsholder.Context) (int, error) {", + "\tvar pidCmd string", + "", + "\tswitch cut.Runtime {", + "\tcase \"docker\":", + "\t\tpidCmd = DockerInspectPID + cut.UID + DevNull", + "\tcase \"docker-pullable\":", + "\t\tpidCmd = DockerInspectPID + cut.UID + DevNull", + "\tcase \"cri-o\", \"containerd\":", + "\t\tpidCmd = \"chroot /host crictl inspect --output go-template --template '{{.info.pid}}' \" + cut.UID + DevNull", + "\tdefault:", + "\t\tlog.Debug(\"Container runtime %s not supported yet for this test, skipping\", cut.Runtime)", + "\t\treturn 0, fmt.Errorf(\"container runtime %s not supported\", cut.Runtime)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "\toutStr, errStr, err := ch.ExecCommandContainer(ctx, pidCmd)", + "\tif err != nil {", + "\t\treturn 0, fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", pidCmd, cut, err)", + "\t}", + "\tif errStr != \"\" {", + "\t\treturn 0, fmt.Errorf(\"cmd: \\\" %s \\\" on %s returned %s\", pidCmd, cut, errStr)", + "\t}", + "", + "\treturn strconv.Atoi(strings.TrimSuffix(outStr, \"\\n\"))", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Fields", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetContainerProcesses", + "kind": "function", + "source": [ + "func GetContainerProcesses(container *provider.Container, env *provider.TestEnvironment) ([]*Process, error) {", + "\tpidNs, err := GetContainerPidNamespace(container, env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not get the containers' pid namespace, err: %v\", err)", + "\t}", + "", + "\treturn GetPidsFromPidNamespace(pidNs, container)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testSchedulingPolicyInCPUPool", + "kind": "function", + "source": [ + "func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvironment,", + "\tpodContainers []*provider.Container, schedulingType string) {", + "\tvar compliantContainersPids []*testhelper.ReportObject", + "\tvar nonCompliantContainersPids []*testhelper.ReportObject", + "\tfor _, cut := range podContainers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\t// Get the pid namespace", + "\t\tpidNamespace, err := crclient.GetContainerPidNamespace(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get pid namespace for Container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcheck.LogDebug(\"PID namespace for Container %q is %q\", cut, pidNamespace)", + "", + "\t\t// Get the list of process ids running in the pid namespace", + "\t\tprocesses, err := crclient.GetPidsFromPidNamespace(pidNamespace, cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get PIDs from PID namespace %q for Container %q, err: %v\", pidNamespace, cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t}", + "", + "\t\tcompliantPids, nonCompliantPids := scheduling.ProcessPidsCPUScheduling(processes, cut, schedulingType, check.GetLogger())", + "\t\t// Check for the specified priority for each processes running in that pid namespace", + "", + "\t\tcompliantContainersPids = append(compliantContainersPids, compliantPids...)", + "\t\tnonCompliantContainersPids = append(nonCompliantContainersPids, nonCompliantPids...)", + "\t}", + "", + "\tcheck.SetResult(compliantContainersPids, nonCompliantContainersPids)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetContainerPidNamespace(testContainer *provider.Container, env *provider.TestEnvironment) (string, error) {", + "\t// Get the container pid", + "\tocpContext, err := GetNodeProbePodContext(testContainer.NodeName, env)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tpid, err := GetPidFromContainer(testContainer, ocpContext)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"unable to get container process id due to: %v\", err)", + "\t}", + "\tlog.Debug(\"Obtained process id for %s is %d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"lsns -p %d -t pid -n\", pid)", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ocpContext, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn \"\", fmt.Errorf(\"unable to run nsenter due to : %v\", err)", + "\t}", + "", + "\treturn strings.Fields(stdout)[0], nil", + "}" + ] + }, + { + "name": "GetContainerProcesses", + "qualifiedName": "GetContainerProcesses", + "exported": true, + "signature": "func(*provider.Container, *provider.TestEnvironment)([]*Process, error)", + "doc": "GetContainerProcesses Retrieves all process information from a container's PID namespace\n\nThe function first determines the PID namespace of the given container, then\nqueries that namespace to list every running process. It returns a slice of\nProcess structures containing each process's ID, parent ID, command line and\nnamespace identifier, or an error if either step fails.", + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:147", + "calls": [ + { + "name": "GetContainerPidNamespace", + "kind": "function", + "source": [ + "func GetContainerPidNamespace(testContainer *provider.Container, env *provider.TestEnvironment) (string, error) {", + "\t// Get the container pid", + "\tocpContext, err := GetNodeProbePodContext(testContainer.NodeName, env)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tpid, err := GetPidFromContainer(testContainer, ocpContext)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"unable to get container process id due to: %v\", err)", + "\t}", + "\tlog.Debug(\"Obtained process id for %s is %d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"lsns -p %d -t pid -n\", pid)", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ocpContext, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn \"\", fmt.Errorf(\"unable to run nsenter due to : %v\", err)", + "\t}", + "", + "\treturn strings.Fields(stdout)[0], nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "GetPidsFromPidNamespace", + "kind": "function", + "source": [ + "func GetPidsFromPidNamespace(pidNamespace string, container *provider.Container) (p []*Process, err error) {", + "\tconst command = \"trap \\\"\\\" SIGURG ; ps -e -o pidns,pid,ppid,args\"", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(container.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", container, err)", + "\t}", + "", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"command %q failed to run in probe pod=%s (node=%s): %v\", command, ctx.GetPodName(), container.NodeName, err)", + "\t}", + "", + "\tre := regexp.MustCompile(PsRegex)", + "\tmatches := re.FindAllStringSubmatch(stdout, -1)", + "\t// If we do not find a successful log, we fail", + "\tfor _, v := range matches {", + "\t\t// Matching only the right PidNs", + "\t\tif pidNamespace != v[1] {", + "\t\t\tcontinue", + "\t\t}", + "\t\taPidNs, err := strconv.Atoi(v[1])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[1], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPid, err := strconv.Atoi(v[2])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[2], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPPid, err := strconv.Atoi(v[3])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[3], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tp = append(p, \u0026Process{PidNs: aPidNs, Pid: aPid, Args: v[4], PPid: aPPid})", + "\t}", + "\treturn p, nil", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testRtAppsNoExecProbes", + "kind": "function", + "source": [ + "func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcuts := env.GetNonGuaranteedPodContainersWithoutHostPID()", + "\tfor _, cut := range cuts {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif !cut.HasExecProbes() {", + "\t\t\tcheck.LogInfo(\"Container %q does not define exec probes\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not define exec probes\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tprocesses, err := crclient.GetContainerProcesses(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not determine the processes pids for container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the processes pids for container\", false))", + "\t\t\tbreak", + "\t\t}", + "", + "\t\tnotExecProbeProcesses, compliantObjectsProbes := filterProbeProcesses(processes, cut)", + "\t\tcompliantObjects = append(compliantObjects, compliantObjectsProbes...)", + "\t\tallProcessesCompliant := true", + "\t\tfor _, p := range notExecProbeProcesses {", + "\t\t\tcheck.LogInfo(\"Testing process %q\", p)", + "\t\t\tschedPolicy, _, err := scheduling.GetProcessCPUScheduling(p.Pid, cut)", + "\t\t\tif err != nil {", + "\t\t\t\t// If the process does not exist anymore it means that it has finished since the time the process list", + "\t\t\t\t// was retrieved. In this case, just ignore the error and continue processing the rest of the processes.", + "\t\t\t\tif strings.Contains(err.Error(), noProcessFoundErrMsg) {", + "\t\t\t\t\tcheck.LogWarn(\"Container process %q disappeared\", p)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container process disappeared\", true).", + "\t\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\tcheck.LogError(\"Could not determine the scheduling policy for container %q (pid=%d), err: %v\", cut, p.Pid, err)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the scheduling policy for container\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif scheduling.PolicyIsRT(schedPolicy) {", + "\t\t\t\tcheck.LogError(\"Container %q defines exec probes while having a RT scheduling policy for process %q\", cut, p)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes while having a RT scheduling policy\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t}", + "\t\t}", + "", + "\t\tif allProcessesCompliant {", + "\t\t\tcheck.LogInfo(\"Container %q defines exec probes but does not have a RT scheduling policy\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes but does not have a RT scheduling policy\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetContainerProcesses(container *provider.Container, env *provider.TestEnvironment) ([]*Process, error) {", + "\tpidNs, err := GetContainerPidNamespace(container, env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not get the containers' pid namespace, err: %v\", err)", + "\t}", + "", + "\treturn GetPidsFromPidNamespace(pidNs, container)", + "}" + ] + }, + { + "name": "GetNodeProbePodContext", + "qualifiedName": "GetNodeProbePodContext", + "exported": true, + "signature": "func(string, *provider.TestEnvironment)(clientsholder.Context, error)", + "doc": "GetNodeProbePodContext creates a context for the first container of a probe pod on a node\n\nThe function looks up the probe pod assigned to the specified node from the\ntest environment. If found, it constructs a clientsholder.Context using that\npod’s namespace, name, and its first container’s name. The returned\ncontext is used to execute commands inside the probe pod; if no probe pod\nexists on the node an error is returned.", + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:70", + "calls": [ + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "ExecCommandContainerNSEnter", + "kind": "function", + "source": [ + "func ExecCommandContainerNSEnter(command string,", + "\taContainer *provider.Container) (outStr, errStr string, err error) {", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(aContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", aContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\t// Get the container PID to build the nsenter command", + "\tcontainerPid, err := GetPidFromContainer(aContainer, ctx)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot get PID from: %s, err: %v\", aContainer, err)", + "\t}", + "", + "\t// Add the container PID and the specific command to run with nsenter", + "\tnsenterCommand := \"nsenter -t \" + strconv.Itoa(containerPid) + \" -n \" + command", + "", + "\t// Run the nsenter command on the probe pod with retry logic", + "\tfor attempt := 1; attempt \u003c= RetryAttempts; attempt++ {", + "\t\toutStr, errStr, err = ch.ExecCommandContainer(ctx, nsenterCommand)", + "\t\tif err == nil {", + "\t\t\tbreak", + "\t\t}", + "\t\tif attempt \u003c RetryAttempts {", + "\t\t\ttime.Sleep(RetrySleepSeconds * time.Second)", + "\t\t}", + "\t}", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", command, aContainer, err)", + "\t}", + "", + "\treturn outStr, errStr, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetContainerPidNamespace", + "kind": "function", + "source": [ + "func GetContainerPidNamespace(testContainer *provider.Container, env *provider.TestEnvironment) (string, error) {", + "\t// Get the container pid", + "\tocpContext, err := GetNodeProbePodContext(testContainer.NodeName, env)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tpid, err := GetPidFromContainer(testContainer, ocpContext)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"unable to get container process id due to: %v\", err)", + "\t}", + "\tlog.Debug(\"Obtained process id for %s is %d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"lsns -p %d -t pid -n\", pid)", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ocpContext, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn \"\", fmt.Errorf(\"unable to run nsenter due to : %v\", err)", + "\t}", + "", + "\treturn strings.Fields(stdout)[0], nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetPidsFromPidNamespace", + "kind": "function", + "source": [ + "func GetPidsFromPidNamespace(pidNamespace string, container *provider.Container) (p []*Process, err error) {", + "\tconst command = \"trap \\\"\\\" SIGURG ; ps -e -o pidns,pid,ppid,args\"", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(container.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", container, err)", + "\t}", + "", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"command %q failed to run in probe pod=%s (node=%s): %v\", command, ctx.GetPodName(), container.NodeName, err)", + "\t}", + "", + "\tre := regexp.MustCompile(PsRegex)", + "\tmatches := re.FindAllStringSubmatch(stdout, -1)", + "\t// If we do not find a successful log, we fail", + "\tfor _, v := range matches {", + "\t\t// Matching only the right PidNs", + "\t\tif pidNamespace != v[1] {", + "\t\t\tcontinue", + "\t\t}", + "\t\taPidNs, err := strconv.Atoi(v[1])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[1], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPid, err := strconv.Atoi(v[2])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[2], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPPid, err := strconv.Atoi(v[3])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[3], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tp = append(p, \u0026Process{PidNs: aPidNs, Pid: aPid, Args: v[4], PPid: aPPid})", + "\t}", + "\treturn p, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling", + "name": "GetProcessCPUScheduling", + "kind": "function", + "source": [ + "func GetProcessCPUScheduling(pid int, testContainer *provider.Container) (schedulePolicy string, schedulePriority int, err error) {", + "\tlog.Info(\"Checking the scheduling policy/priority in %v for pid=%d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"chrt -p %d\", pid)", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := crclient.GetNodeProbePodContext(testContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", 0, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\tstdout, stderr, err := ch.ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"command %q failed to run in probe pod %s (node %s): %v (stderr: %v)\",", + "\t\t\tcommand, ctx.GetPodName(), testContainer.NodeName, err, stderr)", + "\t}", + "", + "\tschedulePolicy, schedulePriority, err = parseSchedulingPolicyAndPriority(stdout)", + "\tif err != nil {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"error getting the scheduling policy and priority for %v : %v\", testContainer, err)", + "\t}", + "\tlog.Info(\"pid %d in %v has the cpu scheduling policy %s, scheduling priority %d\", pid, testContainer, schedulePolicy, schedulePriority)", + "", + "\treturn schedulePolicy, schedulePriority, err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNodeProbePodContext(node string, env *provider.TestEnvironment) (clientsholder.Context, error) {", + "\tprobePod := env.ProbePods[node]", + "\tif probePod == nil {", + "\t\treturn clientsholder.Context{}, fmt.Errorf(\"probe pod not found on node %s\", node)", + "\t}", + "", + "\treturn clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name), nil", + "}" + ] + }, + { + "name": "GetPidFromContainer", + "qualifiedName": "GetPidFromContainer", + "exported": true, + "signature": "func(*provider.Container, clientsholder.Context)(int, error)", + "doc": "GetPidFromContainer Retrieves the process ID of a container by executing a runtime-specific command\n\nThe function determines which container runtime is in use and builds an\nappropriate shell command to query the container's PID, then runs that\ncommand inside a probe pod context. It returns the numeric PID if the command\nsucceeds or an error if execution fails or the runtime is unsupported.", + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:85", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSuffix", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "ExecCommandContainerNSEnter", + "kind": "function", + "source": [ + "func ExecCommandContainerNSEnter(command string,", + "\taContainer *provider.Container) (outStr, errStr string, err error) {", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(aContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", aContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\t// Get the container PID to build the nsenter command", + "\tcontainerPid, err := GetPidFromContainer(aContainer, ctx)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot get PID from: %s, err: %v\", aContainer, err)", + "\t}", + "", + "\t// Add the container PID and the specific command to run with nsenter", + "\tnsenterCommand := \"nsenter -t \" + strconv.Itoa(containerPid) + \" -n \" + command", + "", + "\t// Run the nsenter command on the probe pod with retry logic", + "\tfor attempt := 1; attempt \u003c= RetryAttempts; attempt++ {", + "\t\toutStr, errStr, err = ch.ExecCommandContainer(ctx, nsenterCommand)", + "\t\tif err == nil {", + "\t\t\tbreak", + "\t\t}", + "\t\tif attempt \u003c RetryAttempts {", + "\t\t\ttime.Sleep(RetrySleepSeconds * time.Second)", + "\t\t}", + "\t}", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", command, aContainer, err)", + "\t}", + "", + "\treturn outStr, errStr, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetContainerPidNamespace", + "kind": "function", + "source": [ + "func GetContainerPidNamespace(testContainer *provider.Container, env *provider.TestEnvironment) (string, error) {", + "\t// Get the container pid", + "\tocpContext, err := GetNodeProbePodContext(testContainer.NodeName, env)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tpid, err := GetPidFromContainer(testContainer, ocpContext)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"unable to get container process id due to: %v\", err)", + "\t}", + "\tlog.Debug(\"Obtained process id for %s is %d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"lsns -p %d -t pid -n\", pid)", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ocpContext, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn \"\", fmt.Errorf(\"unable to run nsenter due to : %v\", err)", + "\t}", + "", + "\treturn strings.Fields(stdout)[0], nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testOneProcessPerContainer", + "kind": "function", + "source": [ + "func testOneProcessPerContainer(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t// the Istio sidecar container \"istio-proxy\" launches two processes: \"pilot-agent\" and \"envoy\"", + "\t\tif cut.IsIstioProxy() {", + "\t\t\tcheck.LogInfo(\"Skipping \\\"istio-proxy\\\" container\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Debug pod not found for node %q\", cut.NodeName)", + "\t\t\treturn", + "\t\t}", + "\t\tocpContext := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tpid, err := crclient.GetPidFromContainer(cut, ocpContext)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get PID for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tnbProcesses, err := getNbOfProcessesInPidNamespace(ocpContext, pid, clientsholder.GetClientsHolder())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get number of processes for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif nbProcesses \u003e 1 {", + "\t\t\tcheck.LogError(\"Container %q has more than one process running\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has more than one process running\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has only one process running\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has only one process running\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetPidFromContainer(cut *provider.Container, ctx clientsholder.Context) (int, error) {", + "\tvar pidCmd string", + "", + "\tswitch cut.Runtime {", + "\tcase \"docker\":", + "\t\tpidCmd = DockerInspectPID + cut.UID + DevNull", + "\tcase \"docker-pullable\":", + "\t\tpidCmd = DockerInspectPID + cut.UID + DevNull", + "\tcase \"cri-o\", \"containerd\":", + "\t\tpidCmd = \"chroot /host crictl inspect --output go-template --template '{{.info.pid}}' \" + cut.UID + DevNull", + "\tdefault:", + "\t\tlog.Debug(\"Container runtime %s not supported yet for this test, skipping\", cut.Runtime)", + "\t\treturn 0, fmt.Errorf(\"container runtime %s not supported\", cut.Runtime)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "\toutStr, errStr, err := ch.ExecCommandContainer(ctx, pidCmd)", + "\tif err != nil {", + "\t\treturn 0, fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", pidCmd, cut, err)", + "\t}", + "\tif errStr != \"\" {", + "\t\treturn 0, fmt.Errorf(\"cmd: \\\" %s \\\" on %s returned %s\", pidCmd, cut, errStr)", + "\t}", + "", + "\treturn strconv.Atoi(strings.TrimSuffix(outStr, \"\\n\"))", + "}" + ] + }, + { + "name": "GetPidsFromPidNamespace", + "qualifiedName": "GetPidsFromPidNamespace", + "exported": true, + "signature": "func(string, *provider.Container)([]*Process, error)", + "doc": "GetPidsFromPidNamespace Retrieves processes running in a specific PID namespace\n\nThe function runs a ps command inside the probe pod on the container's node\nto list all processes with their namespaces, then filters those whose pidns\nmatches the supplied string. It parses each line of output, converting\nnumeric fields to integers and constructs Process objects for matching\nentries. The resulting slice of Process pointers is returned; if any error\noccurs during execution or parsing, an error value is returned.", + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:207", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "name": "GetNodeProbePodContext", + "kind": "function", + "source": [ + "func GetNodeProbePodContext(node string, env *provider.TestEnvironment) (clientsholder.Context, error) {", + "\tprobePod := env.ProbePods[node]", + "\tif probePod == nil {", + "\t\treturn clientsholder.Context{}, fmt.Errorf(\"probe pod not found on node %s\", node)", + "\t}", + "", + "\treturn clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name), nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "GetPodName", + "kind": "function" + }, + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "name": "FindAllStringSubmatch", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetContainerProcesses", + "kind": "function", + "source": [ + "func GetContainerProcesses(container *provider.Container, env *provider.TestEnvironment) ([]*Process, error) {", + "\tpidNs, err := GetContainerPidNamespace(container, env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not get the containers' pid namespace, err: %v\", err)", + "\t}", + "", + "\treturn GetPidsFromPidNamespace(pidNs, container)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testSchedulingPolicyInCPUPool", + "kind": "function", + "source": [ + "func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvironment,", + "\tpodContainers []*provider.Container, schedulingType string) {", + "\tvar compliantContainersPids []*testhelper.ReportObject", + "\tvar nonCompliantContainersPids []*testhelper.ReportObject", + "\tfor _, cut := range podContainers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\t// Get the pid namespace", + "\t\tpidNamespace, err := crclient.GetContainerPidNamespace(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get pid namespace for Container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcheck.LogDebug(\"PID namespace for Container %q is %q\", cut, pidNamespace)", + "", + "\t\t// Get the list of process ids running in the pid namespace", + "\t\tprocesses, err := crclient.GetPidsFromPidNamespace(pidNamespace, cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get PIDs from PID namespace %q for Container %q, err: %v\", pidNamespace, cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t}", + "", + "\t\tcompliantPids, nonCompliantPids := scheduling.ProcessPidsCPUScheduling(processes, cut, schedulingType, check.GetLogger())", + "\t\t// Check for the specified priority for each processes running in that pid namespace", + "", + "\t\tcompliantContainersPids = append(compliantContainersPids, compliantPids...)", + "\t\tnonCompliantContainersPids = append(nonCompliantContainersPids, nonCompliantPids...)", + "\t}", + "", + "\tcheck.SetResult(compliantContainersPids, nonCompliantContainersPids)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetPidsFromPidNamespace(pidNamespace string, container *provider.Container) (p []*Process, err error) {", + "\tconst command = \"trap \\\"\\\" SIGURG ; ps -e -o pidns,pid,ppid,args\"", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(container.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", container, err)", + "\t}", + "", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"command %q failed to run in probe pod=%s (node=%s): %v\", command, ctx.GetPodName(), container.NodeName, err)", + "\t}", + "", + "\tre := regexp.MustCompile(PsRegex)", + "\tmatches := re.FindAllStringSubmatch(stdout, -1)", + "\t// If we do not find a successful log, we fail", + "\tfor _, v := range matches {", + "\t\t// Matching only the right PidNs", + "\t\tif pidNamespace != v[1] {", + "\t\t\tcontinue", + "\t\t}", + "\t\taPidNs, err := strconv.Atoi(v[1])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[1], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPid, err := strconv.Atoi(v[2])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[2], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPPid, err := strconv.Atoi(v[3])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[3], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tp = append(p, \u0026Process{PidNs: aPidNs, Pid: aPid, Args: v[4], PPid: aPPid})", + "\t}", + "\treturn p, nil", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "Process.String", + "exported": true, + "receiver": "Process", + "signature": "func()(string)", + "doc": "Process.String Formats the process details into a readable string\n\nThis method creates a human‑readable representation of a process by\ncombining its command line arguments and identifiers. It uses string\nformatting to include the executable name, process ID, parent process ID, and\nPID namespace number in a single line. The resulting string is returned for\nlogging or debugging purposes.", + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:59", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Process) String() string {", + "\treturn fmt.Sprintf(\"cmd: %s, pid: %d, ppid: %d, pidNs: %d\", p.Args, p.Pid, p.PPid, p.PidNs)", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "DevNull", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:46" + }, + { + "name": "DockerInspectPID", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:47" + }, + { + "name": "PsRegex", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:31" + }, + { + "name": "RetryAttempts", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:48" + }, + { + "name": "RetrySleepSeconds", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/crclient/crclient.go:49" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/internal/datautil", + "name": "datautil", + "files": 1, + "imports": null, + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "IsMapSubset", + "qualifiedName": "IsMapSubset", + "exported": true, + "signature": "func(map[K]V, map[K]V)(bool)", + "doc": "IsMapSubset Determines if one map contains all key-value pairs of another\n\nIt compares two generic maps, returning true only when every entry in the\nsecond map exists identically in the first. The function first checks that\nthe second map is not larger than the first for efficiency. It then iterates\nthrough each key-value pair, verifying presence and equality; if any mismatch\noccurs, it returns false.", + "position": "/Users/deliedit/dev/certsuite/internal/datautil/data_util.go:10", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsMapSubset[K, V comparable](m, s map[K]V) bool {", + "\tif len(s) \u003e len(m) {", + "\t\treturn false", + "\t}", + "\tfor ks, vs := range s {", + "\t\tif vm, found := m[ks]; !found || vm != vs {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "log", + "files": 3, + "imports": [ + "context", + "fmt", + "io", + "log/slog", + "os", + "path/filepath", + "runtime", + "strings", + "sync", + "time" + ], + "structs": [ + { + "name": "CustomHandler", + "exported": true, + "doc": "CustomHandler Formats and writes structured log entries to an output stream\n\nThe handler collects attributes and optional context information such as\nlevel, time, source file, and message. It serializes these into a single line\nusing a custom attribute formatting routine before writing them atomically to\nthe configured writer. The handler supports adding default attributes via\nWithAttrs while preserving thread safety with a mutex.", + "position": "/Users/deliedit/dev/certsuite/internal/log/custom_handler.go:28", + "fields": { + "attrs": "[]slog.Attr", + "mu": "*sync.Mutex", + "opts": "slog.HandlerOptions", + "out": "io.Writer" + }, + "methodNames": [ + "Enabled", + "Handle", + "WithAttrs", + "WithGroup", + "appendAttr" + ], + "source": [ + "type CustomHandler struct {", + "\topts slog.HandlerOptions", + "\tattrs []slog.Attr", + "\tmu *sync.Mutex", + "\tout io.Writer", + "}" + ] + }, + { + "name": "Logger", + "exported": true, + "doc": "Logger Encapsulates a structured logger with convenience methods\n\nThis type wraps an slog.Logger to provide simple debug, info, warn, error,\nfatal, and context‑aware logging functions. It also offers a With method\nthat attaches key/value pairs to the underlying logger, returning a new\nLogger instance for fluent chaining.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:34", + "fields": { + "l": "*slog.Logger" + }, + "methodNames": [ + "Debug", + "Error", + "Fatal", + "Info", + "Warn", + "With" + ], + "source": [ + "type Logger struct {", + "\tl *slog.Logger", + "}" + ] + }, + { + "name": "MultiHandler", + "exported": true, + "doc": "MultiHandler combines multiple logging handlers into one\n\nIt holds a slice of slog.Handler values and forwards each logging call to\nevery handler in the slice. For enabled checks, it returns true if any\nunderlying handler is enabled for the given level. When handling a record, it\nclones the record before passing it to each handler, stopping early only if\nan error occurs. Attribute and group additions are propagated by creating new\nhandlers with the specified attributes or groups.", + "position": "/Users/deliedit/dev/certsuite/internal/log/multi_handler.go:16", + "fields": { + "handlers": "[]slog.Handler" + }, + "methodNames": [ + "Enabled", + "Handle", + "WithAttrs", + "WithGroup" + ], + "source": [ + "type MultiHandler struct {", + "\thandlers []slog.Handler", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "CloseGlobalLogFile", + "qualifiedName": "CloseGlobalLogFile", + "exported": true, + "signature": "func()(error)", + "doc": "CloseGlobalLogFile Closes the globally opened log file\n\nThe function invokes the Close method on the global log file handle and\nreturns any error that occurs during closure. It does not take any arguments\nand only provides an error result indicating success or failure of the\noperation.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:75", + "calls": [ + { + "name": "Close", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Shutdown", + "kind": "function", + "source": [ + "func Shutdown() {", + "\terr := log.CloseGlobalLogFile()", + "\tif err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not close the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CloseGlobalLogFile() error {", + "\treturn globalLogFile.Close()", + "}" + ] + }, + { + "name": "CreateGlobalLogFile", + "qualifiedName": "CreateGlobalLogFile", + "exported": true, + "signature": "func(string, string)(error)", + "doc": "CreateGlobalLogFile Creates or replaces the global log file for test output\n\nThe function removes any existing log file in the specified directory, then\nopens a new one with read/write permissions. It configures the logger to\nwrite to this file at the requested level and stores the file handle\nglobally. Errors during removal or opening are returned as formatted\nmessages.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:51", + "calls": [ + { + "pkgPath": "os", + "name": "Remove", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "IsNotExist", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "OpenFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "SetupLogger", + "kind": "function", + "source": [ + "func SetupLogger(logWriter io.Writer, level string) {", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not parse log level, err: %v. Defaulting to DEBUG.\", err)", + "\t\tglobalLogLevel = slog.LevelInfo", + "\t} else {", + "\t\tglobalLogLevel = logLevel", + "\t}", + "", + "\topts := slog.HandlerOptions{", + "\t\tLevel: globalLogLevel,", + "\t\tReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {", + "\t\t\tif a.Key == slog.LevelKey {", + "\t\t\t\tlevel := a.Value.Any().(slog.Level)", + "\t\t\t\tlevelLabel, exists := CustomLevelNames[level]", + "\t\t\t\tif !exists {", + "\t\t\t\t\tlevelLabel = level.String()", + "\t\t\t\t}", + "", + "\t\t\t\ta.Value = slog.StringValue(levelLabel)", + "\t\t\t}", + "", + "\t\t\treturn a", + "\t\t},", + "\t}", + "", + "\tglobalLogger = \u0026Logger{", + "\t\tl: slog.New(NewCustomHandler(logWriter, \u0026opts)),", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Startup", + "kind": "function", + "source": [ + "func Startup() {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Create an evaluator to filter test cases with labels", + "\tif err := checksdb.InitLabelsExprEvaluator(testParams.LabelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(testParams.OutputDir, testParams.LogLevel); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\t// Diagnostic functions will run when no labels are provided.", + "\tif testParams.LabelsFilter == noLabelsFilterExpr {", + "\t\tlog.Warn(\"The Best Practices Test Suite will run in diagnostic mode so no test case will be launched\")", + "\t}", + "", + "\t// Set clientsholder singleton with the filenames from the env vars.", + "\t_ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...)", + "\tLoadChecksDB(testParams.LabelsFilter)", + "", + "\tlog.Info(\"Certsuite Version: %v\", versions.GitVersion())", + "\tlog.Info(\"Claim Format Version: %s\", versions.ClaimFormatVersion)", + "\tlog.Info(\"Labels filter: %v\", testParams.LabelsFilter)", + "\tlog.Info(\"Log level: %s\", strings.ToUpper(testParams.LogLevel))", + "", + "\tcli.PrintBanner()", + "", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "\tfmt.Printf(\"Checks filter: %s\\n\", testParams.LabelsFilter)", + "\tfmt.Printf(\"Output folder: %s\\n\", testParams.OutputDir)", + "\tfmt.Printf(\"Log file: %s (level=%s)\\n\", log.LogFileName, testParams.LogLevel)", + "\tfmt.Printf(\"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "runHandler", + "kind": "function", + "source": [ + "func runHandler(w http.ResponseWriter, r *http.Request) {", + "\tbuf = bytes.NewBufferString(\"\")", + "\t// The log output will be written to the log file and to this buffer buf", + "\tlog.SetLogger(log.GetMultiLogger(buf))", + "", + "\tjsonData := r.FormValue(\"jsonData\") // \"jsonData\" is the name of the JSON input field", + "\tvar data RequestedData", + "\tif err := json.Unmarshal([]byte(jsonData), \u0026data); err != nil {", + "\t\tfmt.Println(\"Error:\", err)", + "\t}", + "\tflattenedOptions := data.SelectedOptions", + "", + "\t// Get the file from the request", + "\tfile, fileHeader, err := r.FormFile(\"kubeConfigPath\") // \"fileInput\" is the name of the file input field", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to retrieve file from form\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "\tdefer file.Close()", + "", + "\tlog.Info(\"Kubeconfig file name received: %s\", fileHeader.Filename)", + "\tkubeconfigTempFile, err := os.CreateTemp(\"\", \"webserver-kubeconfig-*\")", + "\tif err != nil {", + "\t\thttp.Error(w, \"Failed to create temp file to store the kubeconfig content.\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "", + "\tdefer func() {", + "\t\tlog.Info(\"Removing temporary kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\terr = os.Remove(kubeconfigTempFile.Name())", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to remove temp kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\t}", + "\t}()", + "", + "\t_, err = io.Copy(kubeconfigTempFile, file)", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to copy file\", http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t_ = kubeconfigTempFile.Close()", + "", + "\tlog.Info(\"Web Server kubeconfig file : %v (copied into %v)\", fileHeader.Filename, kubeconfigTempFile.Name())", + "\tlog.Info(\"Web Server Labels filter : %v\", flattenedOptions)", + "", + "\ttnfConfig, err := os.ReadFile(\"config/certsuite_config.yml\")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error reading YAML file: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t}", + "", + "\tnewData := updateTnf(tnfConfig, \u0026data)", + "", + "\t// Write the modified YAML data back to the file", + "\tvar filePerm fs.FileMode = 0o644 // owner can read/write, group and others can only read", + "\terr = os.WriteFile(\"config/certsuite_config.yml\", newData, filePerm)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing YAML file: %v\", err)", + "\t}", + "\tlabelsFilter := strings.Join(flattenedOptions, \",\")", + "", + "\t_ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name())", + "\tcertsuite.LoadChecksDB(labelsFilter)", + "", + "\toutputFolder := r.Context().Value(outputFolderCtxKey).(string)", + "", + "\tif err := checksdb.InitLabelsExprEvaluator(labelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(outputFolder, \"debug\"); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tlog.Info(\"Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s\", labelsFilter, outputFolder)", + "\terr = certsuite.Run(labelsFilter, outputFolder)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to run CNF Cert Suite: %v\", err)", + "\t}", + "", + "\t// Return the result as JSON", + "\tresponse := struct {", + "\t\tMessage string `json:\"Message\"`", + "\t}{", + "\t\tMessage: fmt.Sprintf(\"Succeeded to run %s\", strings.Join(flattenedOptions, \" \")),", + "\t}", + "\t// Serialize the response data to JSON", + "\tjsonResponse, err := json.Marshal(response)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to marshal jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t// Set the Content-Type header to specify that the response is JSON", + "\tw.Header().Set(\"Content-Type\", \"application/json\")", + "\t// Write the JSON response to the client", + "\tlog.Info(\"Sending web response: %v\", response)", + "\t_, err = w.Write(jsonResponse)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to write jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CreateGlobalLogFile(outputDir, logLevel string) error {", + "\tlogFilePath := outputDir + \"/\" + LogFileName", + "\terr := os.Remove(logFilePath)", + "\tif err != nil \u0026\u0026 !os.IsNotExist(err) {", + "\t\treturn fmt.Errorf(\"could not delete old log file, err: %v\", err)", + "\t}", + "", + "\tlogFile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE, LogFilePermissions)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not open a new log file, err: %v\", err)", + "\t}", + "", + "\tSetupLogger(logFile, logLevel)", + "\tglobalLogFile = logFile", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "Enabled", + "qualifiedName": "CustomHandler.Enabled", + "exported": true, + "receiver": "CustomHandler", + "signature": "func(context.Context, slog.Level)(bool)", + "doc": "CustomHandler.Enabled Determines if a log level is enabled based on configuration\n\nThe method compares the supplied logging level against the handler's\nconfigured threshold, returning true when the level meets or exceeds that\nthreshold. It ignores the context parameter because the decision relies\nsolely on static settings.", + "position": "/Users/deliedit/dev/certsuite/internal/log/custom_handler.go:59", + "calls": [ + { + "name": "Level", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (h *CustomHandler) Enabled(_ context.Context, level slog.Level) bool {", + "\treturn level \u003e= h.opts.Level.Level()", + "}" + ] + }, + { + "name": "Handle", + "qualifiedName": "CustomHandler.Handle", + "exported": true, + "receiver": "CustomHandler", + "signature": "func(context.Context, slog.Record)(error)", + "doc": "CustomHandler.Handle writes a formatted log line to the output\n\nThis method receives a context and a slog.Record, builds a byte buffer\ncontaining level, time, source file, custom attributes, and message in a\nspecific format, then writes it to an underlying writer. It locks a mutex\nduring the write to ensure thread safety and returns any error from the write\noperation.\n\nnolint:gocritic // r param is heavy but defined in the slog.Handler interface", + "position": "/Users/deliedit/dev/certsuite/internal/log/custom_handler.go:72", + "calls": [ + { + "name": "ReplaceAttr", + "kind": "function" + }, + { + "pkgPath": "log/slog", + "name": "Any", + "kind": "function" + }, + { + "pkgPath": "log/slog", + "name": "Any", + "kind": "function" + }, + { + "name": "CustomHandler.appendAttr", + "kind": "function", + "source": [ + "func (h *CustomHandler) appendAttr(buf []byte, a slog.Attr) []byte {", + "\t// Resolve the Attr's value before doing anything else.", + "\ta.Value = a.Value.Resolve()", + "\t// Ignore empty Attrs.", + "\tif a.Equal(slog.Attr{}) {", + "\t\treturn buf", + "\t}", + "\tswitch a.Value.Kind() {", + "\tcase slog.KindString:", + "\t\tif a.Key == slog.MessageKey {", + "\t\t\tbuf = fmt.Appendf(buf, \"%s\", a.Value.String())", + "\t\t} else {", + "\t\t\tbuf = fmt.Appendf(buf, \"[%s] \", a.Value.String())", + "\t\t}", + "\tcase slog.KindTime:", + "\t\tbuf = fmt.Appendf(buf, \"[%s] \", a.Value.Time().Format(time.StampMilli))", + "\tdefault:", + "\t\tif a.Key == slog.LevelKey {", + "\t\t\tbuf = fmt.Appendf(buf, \"%-5s \", a.Value.String())", + "\t\t} else {", + "\t\t\tbuf = fmt.Appendf(buf, \"[%s: %s] \", a.Key, a.Value)", + "\t\t}", + "\t}", + "\treturn buf", + "}" + ] + }, + { + "name": "IsZero", + "kind": "function" + }, + { + "name": "CustomHandler.appendAttr", + "kind": "function", + "source": [ + "func (h *CustomHandler) appendAttr(buf []byte, a slog.Attr) []byte {", + "\t// Resolve the Attr's value before doing anything else.", + "\ta.Value = a.Value.Resolve()", + "\t// Ignore empty Attrs.", + "\tif a.Equal(slog.Attr{}) {", + "\t\treturn buf", + "\t}", + "\tswitch a.Value.Kind() {", + "\tcase slog.KindString:", + "\t\tif a.Key == slog.MessageKey {", + "\t\t\tbuf = fmt.Appendf(buf, \"%s\", a.Value.String())", + "\t\t} else {", + "\t\t\tbuf = fmt.Appendf(buf, \"[%s] \", a.Value.String())", + "\t\t}", + "\tcase slog.KindTime:", + "\t\tbuf = fmt.Appendf(buf, \"[%s] \", a.Value.Time().Format(time.StampMilli))", + "\tdefault:", + "\t\tif a.Key == slog.LevelKey {", + "\t\t\tbuf = fmt.Appendf(buf, \"%-5s \", a.Value.String())", + "\t\t} else {", + "\t\t\tbuf = fmt.Appendf(buf, \"[%s: %s] \", a.Key, a.Value)", + "\t\t}", + "\t}", + "\treturn buf", + "}" + ] + }, + { + "pkgPath": "log/slog", + "name": "Time", + "kind": "function" + }, + { + "pkgPath": "runtime", + "name": "CallersFrames", + "kind": "function" + }, + { + "name": "Next", + "kind": "function" + }, + { + "name": "CustomHandler.appendAttr", + "kind": "function", + "source": [ + "func (h *CustomHandler) appendAttr(buf []byte, a slog.Attr) []byte {", + "\t// Resolve the Attr's value before doing anything else.", + "\ta.Value = a.Value.Resolve()", + "\t// Ignore empty Attrs.", + "\tif a.Equal(slog.Attr{}) {", + "\t\treturn buf", + "\t}", + "\tswitch a.Value.Kind() {", + "\tcase slog.KindString:", + "\t\tif a.Key == slog.MessageKey {", + "\t\t\tbuf = fmt.Appendf(buf, \"%s\", a.Value.String())", + "\t\t} else {", + "\t\t\tbuf = fmt.Appendf(buf, \"[%s] \", a.Value.String())", + "\t\t}", + "\tcase slog.KindTime:", + "\t\tbuf = fmt.Appendf(buf, \"[%s] \", a.Value.Time().Format(time.StampMilli))", + "\tdefault:", + "\t\tif a.Key == slog.LevelKey {", + "\t\t\tbuf = fmt.Appendf(buf, \"%-5s \", a.Value.String())", + "\t\t} else {", + "\t\t\tbuf = fmt.Appendf(buf, \"[%s: %s] \", a.Key, a.Value)", + "\t\t}", + "\t}", + "\treturn buf", + "}" + ] + }, + { + "pkgPath": "log/slog", + "name": "String", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "path/filepath", + "name": "Base", + "kind": "function" + }, + { + "name": "CustomHandler.appendAttr", + "kind": "function", + "source": [ + "func (h *CustomHandler) appendAttr(buf []byte, a slog.Attr) []byte {", + "\t// Resolve the Attr's value before doing anything else.", + "\ta.Value = a.Value.Resolve()", + "\t// Ignore empty Attrs.", + "\tif a.Equal(slog.Attr{}) {", + "\t\treturn buf", + "\t}", + "\tswitch a.Value.Kind() {", + "\tcase slog.KindString:", + "\t\tif a.Key == slog.MessageKey {", + "\t\t\tbuf = fmt.Appendf(buf, \"%s\", a.Value.String())", + "\t\t} else {", + "\t\t\tbuf = fmt.Appendf(buf, \"[%s] \", a.Value.String())", + "\t\t}", + "\tcase slog.KindTime:", + "\t\tbuf = fmt.Appendf(buf, \"[%s] \", a.Value.Time().Format(time.StampMilli))", + "\tdefault:", + "\t\tif a.Key == slog.LevelKey {", + "\t\t\tbuf = fmt.Appendf(buf, \"%-5s \", a.Value.String())", + "\t\t} else {", + "\t\t\tbuf = fmt.Appendf(buf, \"[%s: %s] \", a.Key, a.Value)", + "\t\t}", + "\t}", + "\treturn buf", + "}" + ] + }, + { + "name": "CustomHandler.appendAttr", + "kind": "function", + "source": [ + "func (h *CustomHandler) appendAttr(buf []byte, a slog.Attr) []byte {", + "\t// Resolve the Attr's value before doing anything else.", + "\ta.Value = a.Value.Resolve()", + "\t// Ignore empty Attrs.", + "\tif a.Equal(slog.Attr{}) {", + "\t\treturn buf", + "\t}", + "\tswitch a.Value.Kind() {", + "\tcase slog.KindString:", + "\t\tif a.Key == slog.MessageKey {", + "\t\t\tbuf = fmt.Appendf(buf, \"%s\", a.Value.String())", + "\t\t} else {", + "\t\t\tbuf = fmt.Appendf(buf, \"[%s] \", a.Value.String())", + "\t\t}", + "\tcase slog.KindTime:", + "\t\tbuf = fmt.Appendf(buf, \"[%s] \", a.Value.Time().Format(time.StampMilli))", + "\tdefault:", + "\t\tif a.Key == slog.LevelKey {", + "\t\t\tbuf = fmt.Appendf(buf, \"%-5s \", a.Value.String())", + "\t\t} else {", + "\t\t\tbuf = fmt.Appendf(buf, \"[%s: %s] \", a.Key, a.Value)", + "\t\t}", + "\t}", + "\treturn buf", + "}" + ] + }, + { + "pkgPath": "log/slog", + "name": "String", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "Lock", + "kind": "function" + }, + { + "name": "Unlock", + "kind": "function" + }, + { + "name": "Write", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (h *CustomHandler) Handle(_ context.Context, r slog.Record) error {", + "\tvar buf []byte", + "\t// Level", + "\tvar levelAttr slog.Attr", + "\tif h.opts.ReplaceAttr != nil {", + "\t\tlevelAttr = h.opts.ReplaceAttr(nil, slog.Any(slog.LevelKey, r.Level))", + "\t} else {", + "\t\tlevelAttr = slog.Any(slog.LevelKey, r.Level)", + "\t}", + "\tbuf = h.appendAttr(buf, levelAttr)", + "\t// Time", + "\tif !r.Time.IsZero() {", + "\t\tbuf = h.appendAttr(buf, slog.Time(slog.TimeKey, r.Time))", + "\t}", + "\t// Source", + "\tif r.PC != 0 {", + "\t\tfs := runtime.CallersFrames([]uintptr{r.PC})", + "\t\tf, _ := fs.Next()", + "\t\tbuf = h.appendAttr(buf, slog.String(slog.SourceKey, fmt.Sprintf(\"%s: %d\", filepath.Base(f.File), f.Line)))", + "\t}", + "\t// Attributes", + "\tfor _, attr := range h.attrs {", + "\t\tbuf = h.appendAttr(buf, attr)", + "\t}", + "\t// Message", + "\tbuf = h.appendAttr(buf, slog.String(slog.MessageKey, r.Message))", + "\tbuf = append(buf, \"\\n\"...)", + "\th.mu.Lock()", + "\tdefer h.mu.Unlock()", + "\t_, err := h.out.Write(buf)", + "\treturn err", + "}" + ] + }, + { + "name": "WithAttrs", + "qualifiedName": "CustomHandler.WithAttrs", + "exported": true, + "receiver": "CustomHandler", + "signature": "func([]slog.Attr)(slog.Handler)", + "doc": "CustomHandler.WithAttrs Creates a handler that includes additional attributes\n\nThe method takes a slice of attributes, merges them with the handler’s\nexisting ones, and returns a new handler instance containing the combined\nset. If no attributes are supplied it simply returns the original handler to\navoid unnecessary copying. The returned handler is a copy of the receiver so\nthat modifications do not affect the original.", + "position": "/Users/deliedit/dev/certsuite/internal/log/custom_handler.go:122", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "copy", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (h *CustomHandler) WithAttrs(attrs []slog.Attr) slog.Handler {", + "\tif len(attrs) == 0 {", + "\t\treturn h", + "\t}", + "", + "\t// Create a new handler with default attributes", + "\th2 := *h", + "\t// A deep copy of the attributes is required", + "\th2.attrs = make([]slog.Attr, len(h.attrs)+len(attrs))", + "\tcopy(h2.attrs, h.attrs)", + "\th2.attrs = append(h2.attrs, attrs...)", + "", + "\treturn \u0026h2", + "}" + ] + }, + { + "name": "WithGroup", + "qualifiedName": "CustomHandler.WithGroup", + "exported": true, + "receiver": "CustomHandler", + "signature": "func(string)(slog.Handler)", + "doc": "CustomHandler.WithGroup Returns a new handler scoped to a named group\n\nWhen called, this method ignores the provided group name and simply returns a\nnil handler, indicating that grouping functionality is not implemented for\nCustomHandler. It satisfies the slog.Handler interface but does not create\nany new handler instance or modify state.", + "position": "/Users/deliedit/dev/certsuite/internal/log/custom_handler.go:111", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (h *CustomHandler) WithGroup(_ string) slog.Handler {", + "\treturn nil", + "}" + ] + }, + { + "name": "appendAttr", + "qualifiedName": "CustomHandler.appendAttr", + "exported": false, + "receiver": "CustomHandler", + "signature": "func([]byte, slog.Attr)([]byte)", + "doc": "CustomHandler.appendAttr Formats a logging attribute for output\n\nThe function resolves the attribute’s value, skips empty attributes, then\nformats the output based on the kind of value. String values are printed\nplainly or in brackets; time values use a millisecond timestamp; other kinds\ninclude level or key/value pairs with appropriate spacing. The resulting\nbytes are appended to the buffer and returned.", + "position": "/Users/deliedit/dev/certsuite/internal/log/custom_handler.go:144", + "calls": [ + { + "name": "Resolve", + "kind": "function" + }, + { + "name": "Equal", + "kind": "function" + }, + { + "name": "Kind", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Appendf", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Appendf", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Appendf", + "kind": "function" + }, + { + "name": "Format", + "kind": "function" + }, + { + "name": "Time", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Appendf", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Appendf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "CustomHandler.Handle", + "kind": "function", + "source": [ + "func (h *CustomHandler) Handle(_ context.Context, r slog.Record) error {", + "\tvar buf []byte", + "\t// Level", + "\tvar levelAttr slog.Attr", + "\tif h.opts.ReplaceAttr != nil {", + "\t\tlevelAttr = h.opts.ReplaceAttr(nil, slog.Any(slog.LevelKey, r.Level))", + "\t} else {", + "\t\tlevelAttr = slog.Any(slog.LevelKey, r.Level)", + "\t}", + "\tbuf = h.appendAttr(buf, levelAttr)", + "\t// Time", + "\tif !r.Time.IsZero() {", + "\t\tbuf = h.appendAttr(buf, slog.Time(slog.TimeKey, r.Time))", + "\t}", + "\t// Source", + "\tif r.PC != 0 {", + "\t\tfs := runtime.CallersFrames([]uintptr{r.PC})", + "\t\tf, _ := fs.Next()", + "\t\tbuf = h.appendAttr(buf, slog.String(slog.SourceKey, fmt.Sprintf(\"%s: %d\", filepath.Base(f.File), f.Line)))", + "\t}", + "\t// Attributes", + "\tfor _, attr := range h.attrs {", + "\t\tbuf = h.appendAttr(buf, attr)", + "\t}", + "\t// Message", + "\tbuf = h.appendAttr(buf, slog.String(slog.MessageKey, r.Message))", + "\tbuf = append(buf, \"\\n\"...)", + "\th.mu.Lock()", + "\tdefer h.mu.Unlock()", + "\t_, err := h.out.Write(buf)", + "\treturn err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (h *CustomHandler) appendAttr(buf []byte, a slog.Attr) []byte {", + "\t// Resolve the Attr's value before doing anything else.", + "\ta.Value = a.Value.Resolve()", + "\t// Ignore empty Attrs.", + "\tif a.Equal(slog.Attr{}) {", + "\t\treturn buf", + "\t}", + "\tswitch a.Value.Kind() {", + "\tcase slog.KindString:", + "\t\tif a.Key == slog.MessageKey {", + "\t\t\tbuf = fmt.Appendf(buf, \"%s\", a.Value.String())", + "\t\t} else {", + "\t\t\tbuf = fmt.Appendf(buf, \"[%s] \", a.Value.String())", + "\t\t}", + "\tcase slog.KindTime:", + "\t\tbuf = fmt.Appendf(buf, \"[%s] \", a.Value.Time().Format(time.StampMilli))", + "\tdefault:", + "\t\tif a.Key == slog.LevelKey {", + "\t\t\tbuf = fmt.Appendf(buf, \"%-5s \", a.Value.String())", + "\t\t} else {", + "\t\t\tbuf = fmt.Appendf(buf, \"[%s: %s] \", a.Key, a.Value)", + "\t\t}", + "\t}", + "\treturn buf", + "}" + ] + }, + { + "name": "Debug", + "qualifiedName": "Debug", + "exported": true, + "signature": "func(string, ...any)()", + "doc": "Debug Logs a message at the debug level\n\nThis function forwards its arguments to the internal logging system, tagging\nthem with a debug severity. It accepts a format string followed by any number\nof values, which are passed to the underlying logger for formatting and\noutput. The global logger instance is used, ensuring consistent log\nconfiguration across the application.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:182", + "calls": [ + { + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Debug(msg string, args ...any) {", + "\tLogf(globalLogger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Error", + "qualifiedName": "Error", + "exported": true, + "signature": "func(string, ...any)()", + "doc": "Error Logs an error message with optional formatting\n\nThis function accepts a format string and optional arguments, then forwards\nthe call to a lower-level logging routine that writes the message at the\nerror severity level. It uses the global logger instance, ensuring\nconsistency across the application. The formatted output is sent to the\nconfigured log handler.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:214", + "calls": [ + { + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Error(msg string, args ...any) {", + "\tLogf(globalLogger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "Fatal", + "qualifiedName": "Fatal", + "exported": true, + "signature": "func(string, ...any)()", + "doc": "Fatal Logs a fatal error message and terminates the program\n\nThis function writes the supplied formatted message to both the configured\nlogger at the fatal level and directly to standard error. After logging, it\nexits the process with status code one, ensuring that the application stops\nimmediately.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:224", + "calls": [ + { + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Exit", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Fatal(msg string, args ...any) {", + "\tLogf(globalLogger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "GetLogger", + "qualifiedName": "GetLogger", + "exported": true, + "signature": "func()(*Logger)", + "doc": "GetLogger Retrieves the package-wide logger instance\n\nThis function provides access to a globally shared Logger object that is used\nthroughout the application for consistent logging behavior. It simply returns\nthe reference stored in the internal variable, allowing other packages to\nobtain and use the same logger without creating new instances.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:133", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetLogger() *Logger {", + "\treturn globalLogger", + "}" + ] + }, + { + "name": "GetMultiLogger", + "qualifiedName": "GetMultiLogger", + "exported": true, + "signature": "func(...io.Writer)(*Logger)", + "doc": "GetMultiLogger Creates a logger that writes to multiple destinations\n\nThe function builds a set of slog handlers, including an optional global\nhandler if one is configured, and wraps each supplied writer in a custom\nhandler with the current log level settings. It then combines these handlers\ninto a multi-handler so that every log record is emitted to all specified\nwriters simultaneously. The resulting Logger instance is returned for use\nthroughout the application.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:145", + "calls": [ + { + "name": "Any", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "pkgPath": "log/slog", + "name": "StringValue", + "kind": "function" + }, + { + "name": "Handler", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "NewCustomHandler", + "kind": "function", + "source": [ + "func NewCustomHandler(out io.Writer, opts *slog.HandlerOptions) *CustomHandler {", + "\th := \u0026CustomHandler{out: out, mu: \u0026sync.Mutex{}}", + "\tif opts != nil {", + "\t\th.opts = *opts", + "\t}", + "\tif h.opts.Level == nil {", + "\t\th.opts.Level = slog.LevelInfo", + "\t}", + "", + "\treturn h", + "}" + ] + }, + { + "pkgPath": "log/slog", + "name": "New", + "kind": "function" + }, + { + "name": "NewMultiHandler", + "kind": "function", + "source": [ + "func NewMultiHandler(handlers ...slog.Handler) *MultiHandler {", + "\treturn \u0026MultiHandler{", + "\t\thandlers: handlers,", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "runHandler", + "kind": "function", + "source": [ + "func runHandler(w http.ResponseWriter, r *http.Request) {", + "\tbuf = bytes.NewBufferString(\"\")", + "\t// The log output will be written to the log file and to this buffer buf", + "\tlog.SetLogger(log.GetMultiLogger(buf))", + "", + "\tjsonData := r.FormValue(\"jsonData\") // \"jsonData\" is the name of the JSON input field", + "\tvar data RequestedData", + "\tif err := json.Unmarshal([]byte(jsonData), \u0026data); err != nil {", + "\t\tfmt.Println(\"Error:\", err)", + "\t}", + "\tflattenedOptions := data.SelectedOptions", + "", + "\t// Get the file from the request", + "\tfile, fileHeader, err := r.FormFile(\"kubeConfigPath\") // \"fileInput\" is the name of the file input field", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to retrieve file from form\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "\tdefer file.Close()", + "", + "\tlog.Info(\"Kubeconfig file name received: %s\", fileHeader.Filename)", + "\tkubeconfigTempFile, err := os.CreateTemp(\"\", \"webserver-kubeconfig-*\")", + "\tif err != nil {", + "\t\thttp.Error(w, \"Failed to create temp file to store the kubeconfig content.\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "", + "\tdefer func() {", + "\t\tlog.Info(\"Removing temporary kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\terr = os.Remove(kubeconfigTempFile.Name())", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to remove temp kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\t}", + "\t}()", + "", + "\t_, err = io.Copy(kubeconfigTempFile, file)", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to copy file\", http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t_ = kubeconfigTempFile.Close()", + "", + "\tlog.Info(\"Web Server kubeconfig file : %v (copied into %v)\", fileHeader.Filename, kubeconfigTempFile.Name())", + "\tlog.Info(\"Web Server Labels filter : %v\", flattenedOptions)", + "", + "\ttnfConfig, err := os.ReadFile(\"config/certsuite_config.yml\")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error reading YAML file: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t}", + "", + "\tnewData := updateTnf(tnfConfig, \u0026data)", + "", + "\t// Write the modified YAML data back to the file", + "\tvar filePerm fs.FileMode = 0o644 // owner can read/write, group and others can only read", + "\terr = os.WriteFile(\"config/certsuite_config.yml\", newData, filePerm)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing YAML file: %v\", err)", + "\t}", + "\tlabelsFilter := strings.Join(flattenedOptions, \",\")", + "", + "\t_ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name())", + "\tcertsuite.LoadChecksDB(labelsFilter)", + "", + "\toutputFolder := r.Context().Value(outputFolderCtxKey).(string)", + "", + "\tif err := checksdb.InitLabelsExprEvaluator(labelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(outputFolder, \"debug\"); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tlog.Info(\"Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s\", labelsFilter, outputFolder)", + "\terr = certsuite.Run(labelsFilter, outputFolder)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to run CNF Cert Suite: %v\", err)", + "\t}", + "", + "\t// Return the result as JSON", + "\tresponse := struct {", + "\t\tMessage string `json:\"Message\"`", + "\t}{", + "\t\tMessage: fmt.Sprintf(\"Succeeded to run %s\", strings.Join(flattenedOptions, \" \")),", + "\t}", + "\t// Serialize the response data to JSON", + "\tjsonResponse, err := json.Marshal(response)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to marshal jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t// Set the Content-Type header to specify that the response is JSON", + "\tw.Header().Set(\"Content-Type\", \"application/json\")", + "\t// Write the JSON response to the client", + "\tlog.Info(\"Sending web response: %v\", response)", + "\t_, err = w.Write(jsonResponse)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to write jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetMultiLogger(writers ...io.Writer) *Logger {", + "\topts := slog.HandlerOptions{", + "\t\tLevel: globalLogLevel,", + "\t\tReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {", + "\t\t\tif a.Key == slog.LevelKey {", + "\t\t\t\tlevel := a.Value.Any().(slog.Level)", + "\t\t\t\tlevelLabel, exists := CustomLevelNames[level]", + "\t\t\t\tif !exists {", + "\t\t\t\t\tlevelLabel = level.String()", + "\t\t\t\t}", + "", + "\t\t\t\ta.Value = slog.StringValue(levelLabel)", + "\t\t\t}", + "", + "\t\t\treturn a", + "\t\t},", + "\t}", + "", + "\tvar handlers []slog.Handler", + "\tif globalLogger != nil {", + "\t\thandlers = []slog.Handler{globalLogger.l.Handler()}", + "\t}", + "", + "\tfor _, writer := range writers {", + "\t\thandlers = append(handlers, NewCustomHandler(writer, \u0026opts))", + "\t}", + "", + "\treturn \u0026Logger{l: slog.New(NewMultiHandler(handlers...))}", + "}" + ] + }, + { + "name": "Info", + "qualifiedName": "Info", + "exported": true, + "signature": "func(string, ...any)()", + "doc": "Info Logs a message at the informational level\n\nThis function sends a formatted log entry to the package's global logger with\nan informational severity. It accepts a message string and optional\narguments, which are passed through to formatting before dispatching to the\nunderlying logging system. The call is non‑blocking and does not return any\nvalue.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:193", + "calls": [ + { + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Info(msg string, args ...any) {", + "\tLogf(globalLogger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Logf", + "qualifiedName": "Logf", + "exported": true, + "signature": "func(*Logger, string, string, ...any)()", + "doc": "Logf Logs a formatted message at the specified level\n\nThe function accepts a logger, a string representing the log level, a format\nstring, and optional arguments. It parses the level, checks if logging is\nenabled for that level, retrieves the caller information, creates a slog\nrecord with a timestamp and formatted message, and passes it to the\nlogger’s handler.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:328", + "calls": [ + { + "pkgPath": "log/slog", + "name": "Default", + "kind": "function" + }, + { + "name": "parseLevel", + "kind": "function", + "source": [ + "func parseLevel(level string) (slog.Level, error) {", + "\tswitch strings.ToLower(level) {", + "\tcase \"debug\":", + "\t\treturn slog.LevelDebug, nil", + "\tcase \"info\":", + "\t\treturn slog.LevelInfo, nil", + "\tcase \"warn\", \"warning\":", + "\t\treturn slog.LevelWarn, nil", + "\tcase \"error\":", + "\t\treturn slog.LevelError, nil", + "\tcase \"fatal\":", + "\t\treturn CustomLevelFatal, nil", + "\t}", + "", + "\treturn 0, fmt.Errorf(\"not a valid slog Level: %q\", level)", + "}" + ] + }, + { + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "MultiHandler.Enabled", + "kind": "function", + "source": [ + "func (h *MultiHandler) Enabled(ctx context.Context, level slog.Level) bool {", + "\tfor i := range h.handlers {", + "\t\tif h.handlers[i].Enabled(ctx, level) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "runtime", + "name": "Callers", + "kind": "function" + }, + { + "pkgPath": "log/slog", + "name": "NewRecord", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "MultiHandler.Handle", + "kind": "function", + "source": [ + "func (h *MultiHandler) Handle(ctx context.Context, r slog.Record) error {", + "\tfor i := range h.handlers {", + "\t\tif err := h.handlers[i].Handle(ctx, r.Clone()); err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "Handler", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Debug", + "kind": "function", + "source": [ + "func Debug(msg string, args ...any) {", + "\tLogf(globalLogger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Error", + "kind": "function", + "source": [ + "func Error(msg string, args ...any) {", + "\tLogf(globalLogger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Fatal", + "kind": "function", + "source": [ + "func Fatal(msg string, args ...any) {", + "\tLogf(globalLogger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Info", + "kind": "function", + "source": [ + "func Info(msg string, args ...any) {", + "\tLogf(globalLogger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Warn", + "kind": "function", + "source": [ + "func (logger *Logger) Warn(msg string, args ...any) {", + "\tLogf(logger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.LogDebug", + "kind": "function", + "source": [ + "func (check *Check) LogDebug(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.LogError", + "kind": "function", + "source": [ + "func (check *Check) LogError(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.LogFatal", + "kind": "function", + "source": [ + "func (check *Check) LogFatal(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.LogInfo", + "kind": "function", + "source": [ + "func (check *Check) LogInfo(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.LogWarn", + "kind": "function", + "source": [ + "func (check *Check) LogWarn(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelWarn, msg, args...)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + }, + { + "name": "Debug", + "qualifiedName": "Logger.Debug", + "exported": true, + "receiver": "Logger", + "signature": "func(string, ...any)()", + "doc": "Logger.Debug Logs a formatted message at debug level\n\nThe method calls the generic logging helper Logf, passing the logger instance\nand the debug log level together with the supplied format string and\narguments. It formats the message using fmt.Sprintf before emitting it\nthrough the underlying slog handler, only if the current logger is enabled\nfor debug logs.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:237", + "calls": [ + { + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "ClientsHolder.ExecCommandContainer", + "kind": "function", + "source": [ + "func (clientsholder *ClientsHolder) ExecCommandContainer(", + "\tctx Context, command string) (stdout, stderr string, err error) {", + "\tcommandStr := []string{\"sh\", \"-c\", command}", + "\tvar buffOut bytes.Buffer", + "\tvar buffErr bytes.Buffer", + "", + "\tlog.Debug(\"execute command on ns=%s, pod=%s container=%s, cmd: %s\", ctx.GetNamespace(), ctx.GetPodName(), ctx.GetContainerName(), strings.Join(commandStr, \" \"))", + "\treq := clientsholder.K8sClient.CoreV1().RESTClient().", + "\t\tPost().", + "\t\tNamespace(ctx.GetNamespace()).", + "\t\tResource(\"pods\").", + "\t\tName(ctx.GetPodName()).", + "\t\tSubResource(\"exec\").", + "\t\tVersionedParams(\u0026corev1.PodExecOptions{", + "\t\t\tContainer: ctx.GetContainerName(),", + "\t\t\tCommand: commandStr,", + "\t\t\tStdin: false,", + "\t\t\tStdout: true,", + "\t\t\tStderr: true,", + "\t\t\tTTY: false,", + "\t\t}, scheme.ParameterCodec)", + "", + "\texec, err := remotecommand.NewSPDYExecutor(clientsholder.RestConfig, \"POST\", req.URL())", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\treturn stdout, stderr, err", + "\t}", + "\terr = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{", + "\t\tStdout: \u0026buffOut,", + "\t\tStderr: \u0026buffErr,", + "\t})", + "\tstdout, stderr = buffOut.String(), buffErr.String()", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\tlog.Error(\"%v\", req.URL())", + "\t\tlog.Error(\"command: %s\", command)", + "\t\tlog.Error(\"stderr: %s\", stderr)", + "\t\tlog.Error(\"stdout: %s\", stdout)", + "\t\treturn stdout, stderr, err", + "\t}", + "\treturn stdout, stderr, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetContainerPidNamespace", + "kind": "function", + "source": [ + "func GetContainerPidNamespace(testContainer *provider.Container, env *provider.TestEnvironment) (string, error) {", + "\t// Get the container pid", + "\tocpContext, err := GetNodeProbePodContext(testContainer.NodeName, env)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tpid, err := GetPidFromContainer(testContainer, ocpContext)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"unable to get container process id due to: %v\", err)", + "\t}", + "\tlog.Debug(\"Obtained process id for %s is %d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"lsns -p %d -t pid -n\", pid)", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ocpContext, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn \"\", fmt.Errorf(\"unable to run nsenter due to : %v\", err)", + "\t}", + "", + "\treturn strings.Fields(stdout)[0], nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetPidFromContainer", + "kind": "function", + "source": [ + "func GetPidFromContainer(cut *provider.Container, ctx clientsholder.Context) (int, error) {", + "\tvar pidCmd string", + "", + "\tswitch cut.Runtime {", + "\tcase \"docker\":", + "\t\tpidCmd = DockerInspectPID + cut.UID + DevNull", + "\tcase \"docker-pullable\":", + "\t\tpidCmd = DockerInspectPID + cut.UID + DevNull", + "\tcase \"cri-o\", \"containerd\":", + "\t\tpidCmd = \"chroot /host crictl inspect --output go-template --template '{{.info.pid}}' \" + cut.UID + DevNull", + "\tdefault:", + "\t\tlog.Debug(\"Container runtime %s not supported yet for this test, skipping\", cut.Runtime)", + "\t\treturn 0, fmt.Errorf(\"container runtime %s not supported\", cut.Runtime)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "\toutStr, errStr, err := ch.ExecCommandContainer(ctx, pidCmd)", + "\tif err != nil {", + "\t\treturn 0, fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", pidCmd, cut, err)", + "\t}", + "\tif errStr != \"\" {", + "\t\treturn 0, fmt.Errorf(\"cmd: \\\" %s \\\" on %s returned %s\", pidCmd, cut, errStr)", + "\t}", + "", + "\treturn strconv.Atoi(strings.TrimSuffix(outStr, \"\\n\"))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "CompressResultsArtifacts", + "kind": "function", + "source": [ + "func CompressResultsArtifacts(outputDir string, filePaths []string) (string, error) {", + "\tzipFileName := generateZipFileName()", + "\tzipFilePath := filepath.Join(outputDir, zipFileName)", + "", + "\tlog.Info(\"Compressing results artifacts into %s\", zipFilePath)", + "\tzipFile, err := os.Create(zipFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed creating tar.gz file %s in dir %s (filepath=%s): %v\",", + "\t\t\tzipFileName, outputDir, zipFilePath, err)", + "\t}", + "", + "\tzipWriter := gzip.NewWriter(zipFile)", + "\tdefer zipWriter.Close()", + "", + "\ttarWriter := tar.NewWriter(zipWriter)", + "\tdefer tarWriter.Close()", + "", + "\tfor _, file := range filePaths {", + "\t\tlog.Debug(\"Zipping file %s\", file)", + "", + "\t\ttarHeader, err := getFileTarHeader(file)", + "\t\tif err != nil {", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\terr = tarWriter.WriteHeader(tarHeader)", + "\t\tif err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to write tar header for %s: %v\", file, err)", + "\t\t}", + "", + "\t\tf, err := os.Open(file)", + "\t\tif err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to open file %s: %v\", file, err)", + "\t\t}", + "", + "\t\tif _, err = io.Copy(tarWriter, f); err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to tar file %s: %v\", file, err)", + "\t\t}", + "", + "\t\tf.Close()", + "\t}", + "", + "\t// Create fully qualified path to the zip file", + "\tzipFilePath, err = filepath.Abs(zipFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get absolute path for %s: %v\", zipFilePath, err)", + "\t}", + "", + "\t// Return the entire path to the zip file", + "\treturn zipFilePath, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "GetCertIDFromConnectAPI", + "kind": "function", + "source": [ + "func GetCertIDFromConnectAPI(apiKey, projectID, connectAPIBaseURL, proxyURL, proxyPort string) (string, error) {", + "\tlog.Info(\"Getting certification ID from Red Hat Connect API\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tprojectID = strings.ReplaceAll(projectID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectAPIBaseURL = strings.ReplaceAll(connectAPIBaseURL, \"\\\"\", \"\")", + "", + "\t// remove quotes from projectID", + "\tprojectIDJSON := fmt.Sprintf(`{ \"projectId\": %q }`, projectID)", + "", + "\t// Convert JSON to bytes", + "\tprojectIDJSONBytes := []byte(projectIDJSON)", + "", + "\t// Create the URL", + "\tcertIDURL := fmt.Sprintf(\"%s/projects/certifications\", connectAPIBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", certIDURL, bytes.NewBuffer(projectIDJSONBytes))", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\tlog.Debug(\"Request Body: %s\", req.Body)", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", \"application/json\")", + "\treq.Header.Set(\"Accept\", \"application/json\")", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// print the request", + "\tlog.Debug(\"Sending request to %s\", certIDURL)", + "", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tres, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer res.Body.Close()", + "", + "\t// Parse the response", + "\tvar certIDResponse CertIDResponse", + "\terr = json.NewDecoder(res.Body).Decode(\u0026certIDResponse)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Certification ID retrieved from the API: %d\", certIDResponse.ID)", + "", + "\t// Return the certification ID", + "\treturn fmt.Sprintf(\"%d\", certIDResponse.ID), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "SendResultsToConnectAPI", + "kind": "function", + "source": [ + "func SendResultsToConnectAPI(zipFile, apiKey, connectBaseURL, certID, proxyURL, proxyPort string) error {", + "\tlog.Info(\"Sending results to Red Hat Connect\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tcertID = strings.ReplaceAll(certID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectBaseURL = strings.ReplaceAll(connectBaseURL, \"\\\"\", \"\")", + "", + "\tvar buffer bytes.Buffer", + "", + "\t// Create a new multipart writer", + "\tw := multipart.NewWriter(\u0026buffer)", + "", + "\tlog.Debug(\"Creating form file for %s\", zipFile)", + "", + "\tclaimFile, err := os.Open(zipFile)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tdefer claimFile.Close()", + "", + "\tfw, err := w.CreateFormFile(\"attachment\", zipFile)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create form file: %v\", err)", + "\t}", + "", + "\tif _, err = io.Copy(fw, claimFile); err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"type\", \"RhocpBestPracticeTestResult\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"certId\", certID)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"description\", \"CNF Test Results\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\tw.Close()", + "", + "\t// Create the URL", + "\tconnectAPIURL := fmt.Sprintf(\"%s/attachments/upload\", connectBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", connectAPIURL, \u0026buffer)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", w.FormDataContentType())", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// Create a client", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API upload", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tresponse, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer response.Body.Close()", + "", + "\t// Parse the result of the request", + "\tvar uploadResult UploadResult", + "\terr = json.NewDecoder(response.Body).Decode(\u0026uploadResult)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Download URL: %s\", uploadResult.DownloadURL)", + "\tlog.Info(\"Upload Date: %s\", uploadResult.UploadedDate)", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "sendRequest", + "kind": "function", + "source": [ + "func sendRequest(req *http.Request, client *http.Client) (*http.Response, error) {", + "\t// print the request", + "\tlog.Debug(\"Sending request to %s\", req.URL)", + "", + "\tres, err := client.Do(req)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to send post request: %v\", err)", + "\t}", + "", + "\tif res.StatusCode != http.StatusOK {", + "\t\tlog.Debug(\"Response: %v\", res)", + "\t\treturn nil, fmt.Errorf(\"failed to send post request to the endpoint: %v\", res.Status)", + "\t}", + "", + "\treturn res, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "setProxy", + "kind": "function", + "source": [ + "func setProxy(client *http.Client, proxyURL, proxyPort string) {", + "\tif proxyURL != \"\" \u0026\u0026 proxyPort != \"\" {", + "\t\tlog.Debug(\"Proxy is set. Using proxy %s:%s\", proxyURL, proxyPort)", + "\t\tproxyURL := fmt.Sprintf(\"%s:%s\", proxyURL, proxyPort)", + "\t\tparsedURL, err := url.Parse(proxyURL)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse proxy URL: %v\", err)", + "\t\t}", + "\t\tlog.Debug(\"Proxy URL: %s\", parsedURL)", + "\t\tclient.Transport = \u0026http.Transport{Proxy: http.ProxyURL(parsedURL)}", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "FindPodsByLabels", + "kind": "function", + "source": [ + "func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, namespaces []string) (runningPods, allPods []corev1.Pod) {", + "\trunningPods = []corev1.Pod{}", + "\tallPods = []corev1.Pod{}", + "\tallowNonRunning := configuration.GetTestParameters().AllowNonRunning", + "\t// Iterate through namespaces", + "\tfor _, ns := range namespaces {", + "\t\tvar pods *corev1.PodList", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tpods = findPodsMatchingAtLeastOneLabel(oc, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching Pods in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tpods, err = oc.Pods(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing pods in ns=%s, err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\t// Filter out any pod set to be deleted", + "\t\tfor i := 0; i \u003c len(pods.Items); i++ {", + "\t\t\tif pods.Items[i].DeletionTimestamp == nil {", + "\t\t\t\tif allowNonRunning || pods.Items[i].Status.Phase == corev1.PodRunning {", + "\t\t\t\t\trunningPods = append(runningPods, pods.Items[i])", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tallPods = append(allPods, pods.Items[i])", + "\t\t}", + "\t}", + "", + "\treturn runningPods, allPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "GetScaleCrUnderTest", + "kind": "function", + "source": [ + "func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDefinition) []ScaleObject {", + "\tdynamicClient := clientsholder.GetClientsHolder().DynamicClient", + "", + "\tvar scaleObjects []ScaleObject", + "\tfor _, crd := range crds {", + "\t\tif crd.Spec.Scope != apiextv1.NamespaceScoped {", + "\t\t\tlog.Warn(\"Target CRD %q is cluster-wide scoped. Skipping search of scale objects.\", crd.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range crd.Spec.Versions {", + "\t\t\tcrdVersion := crd.Spec.Versions[i]", + "\t\t\tgvr := schema.GroupVersionResource{", + "\t\t\t\tGroup: crd.Spec.Group,", + "\t\t\t\tVersion: crdVersion.Name,", + "\t\t\t\tResource: crd.Spec.Names.Plural,", + "\t\t\t}", + "", + "\t\t\t// Filter out non-scalable CRDs.", + "\t\t\tif crdVersion.Subresources == nil || crdVersion.Subresources.Scale == nil {", + "\t\t\t\tlog.Info(\"Target CRD %q is not scalable. Skipping search of scalable CRs.\", crd.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Looking for Scalable CRs of CRD %q (api version %q, group %q, plural %q) in target namespaces.\",", + "\t\t\t\tcrd.Name, crdVersion.Name, crd.Spec.Group, crd.Spec.Names.Plural)", + "", + "\t\t\tfor _, ns := range namespaces {", + "\t\t\t\tcrs, err := dynamicClient.Resource(gvr).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tlog.Fatal(\"Error getting CRs of CRD %q in namespace %q, err: %v\", crd.Name, ns, err)", + "\t\t\t\t}", + "", + "\t\t\t\tif len(crs.Items) \u003e 0 {", + "\t\t\t\t\tscaleObjects = append(scaleObjects, getCrScaleObjects(crs.Items, crd)...)", + "\t\t\t\t} else {", + "\t\t\t\t\tlog.Warn(\"No CRs of CRD %q found in the target namespaces.\", crd.Name)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn scaleObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findClusterOperators", + "kind": "function", + "source": [ + "func findClusterOperators(client clientconfigv1.ClusterOperatorInterface) ([]configv1.ClusterOperator, error) {", + "\tclusterOperators, err := client.List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil \u0026\u0026 !k8serrors.IsNotFound(err) {", + "\t\treturn nil, err", + "\t}", + "", + "\tif k8serrors.IsNotFound(err) {", + "\t\tlog.Debug(\"ClusterOperator CR not found in the cluster\")", + "\t\treturn nil, nil", + "\t}", + "", + "\treturn clusterOperators.Items, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findDeploymentsByLabels", + "kind": "function", + "source": [ + "func findDeploymentsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.Deployment {", + "\tallDeployments := []appsv1.Deployment{}", + "\tfor _, ns := range namespaces {", + "\t\tdps, err := appClient.Deployments(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list deployments in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(dps.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any deployments in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(dps.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The deployment is added only once if at least one pod matches one label in the Deployment", + "\t\t\t\tif isDeploymentsPodsMatchingAtLeastOneLabel(labels, ns, \u0026dps.Items[i]) {", + "\t\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all deployments in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in deployment %q found in ns %q without label\", dps.Items[i].Name, ns)", + "\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\tlog.Info(\"Deployment %s found in ns=%s\", dps.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allDeployments) == 0 {", + "\t\tlog.Warn(\"Did not find any deployment in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allDeployments", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findOperatorsByLabels", + "kind": "function", + "source": [ + "func findOperatorsByLabels(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespaces []configuration.Namespace) (csvs []*olmv1Alpha.ClusterServiceVersion) {", + "\tconst nsAnnotation = \"olm.operatorNamespace\"", + "", + "\t// Helper namespaces map to do quick search of the operator's controller namespace.", + "\tnamespacesMap := map[string]bool{}", + "\tfor _, ns := range namespaces {", + "\t\tnamespacesMap[ns.Name] = true", + "\t}", + "", + "\tcsvs = []*olmv1Alpha.ClusterServiceVersion{}", + "\tvar csvList *olmv1Alpha.ClusterServiceVersionList", + "\tfor _, ns := range namespaces {", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tcsvList = findOperatorsMatchingAtLeastOneLabel(olmClient, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching CSVs in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tcsvList, err = olmClient.ClusterServiceVersions(ns.Name).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing csvs in namespace %q , err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\tfor i := range csvList.Items {", + "\t\t\tcsv := \u0026csvList.Items[i]", + "", + "\t\t\t// Filter out CSV if operator's controller pod/s is/are not running in any configured/test namespace.", + "\t\t\tcontrollerNamespace, found := csv.Annotations[nsAnnotation]", + "\t\t\tif !found {", + "\t\t\t\tlog.Error(\"Failed to get ns annotation %q from csv %v/%v\", nsAnnotation, csv.Namespace, csv.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tif namespacesMap[controllerNamespace] {", + "\t\t\t\tcsvs = append(csvs, csv)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range csvs {", + "\t\tlog.Info(\"Found CSV %q (namespace %q)\", csvs[i].Name, csvs[i].Namespace)", + "\t}", + "\treturn csvs", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findOperatorsMatchingAtLeastOneLabel", + "kind": "function", + "source": [ + "func findOperatorsMatchingAtLeastOneLabel(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespace configuration.Namespace) *olmv1Alpha.ClusterServiceVersionList {", + "\tcsvList := \u0026olmv1Alpha.ClusterServiceVersionList{}", + "\tfor _, l := range labels {", + "\t\tlog.Debug(\"Searching CSVs in namespace %q with label %q\", namespace, l)", + "\t\tcsv, err := olmClient.ClusterServiceVersions(namespace.Name).List(context.TODO(), metav1.ListOptions{", + "\t\t\tLabelSelector: l.LabelKey + \"=\" + l.LabelValue,", + "\t\t})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing csvs in namespace %q with label %q, err: %v\", namespace, l.LabelKey+\"=\"+l.LabelValue, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tcsvList.Items = append(csvList.Items, csv.Items...)", + "\t}", + "\treturn csvList", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findPodsMatchingAtLeastOneLabel", + "kind": "function", + "source": [ + "func findPodsMatchingAtLeastOneLabel(oc corev1client.CoreV1Interface, labels []labelObject, namespace string) *corev1.PodList {", + "\tallPods := \u0026corev1.PodList{}", + "\tfor _, l := range labels {", + "\t\tlog.Debug(\"Searching Pods in namespace %s with label %q\", namespace, l)", + "\t\tpods, err := oc.Pods(namespace).List(context.TODO(), metav1.ListOptions{", + "\t\t\tLabelSelector: l.LabelKey + \"=\" + l.LabelValue,", + "\t\t})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing pods in ns=%s label=%s, err: %v\", namespace, l.LabelKey+\"=\"+l.LabelValue, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tallPods.Items = append(allPods.Items, pods.Items...)", + "\t}", + "\treturn allPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findStatefulSetsByLabels", + "kind": "function", + "source": [ + "func findStatefulSetsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.StatefulSet {", + "\tallStatefulSets := []appsv1.StatefulSet{}", + "\tfor _, ns := range namespaces {", + "\t\tstatefulSet, err := appClient.StatefulSets(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list statefulsets in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(statefulSet.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any statefulSet in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(statefulSet.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The StatefulSet is added only once if at least one pod matches one label in the Statefulset", + "\t\t\t\tif isStatefulSetsMatchingAtLeastOneLabel(labels, ns, \u0026statefulSet.Items[i]) {", + "\t\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all statefulsets in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in statefulset %q found in ns %q without label\", statefulSet.Items[i].Name, ns)", + "\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\tlog.Info(\"StatefulSet %s found in ns=%s\", statefulSet.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allStatefulSets) == 0 {", + "\t\tlog.Warn(\"Did not find any statefulset in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allStatefulSets", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findSubscriptions", + "kind": "function", + "source": [ + "func findSubscriptions(olmClient v1alpha1.OperatorsV1alpha1Interface, namespaces []string) []olmv1Alpha.Subscription {", + "\tsubscriptions := []olmv1Alpha.Subscription{}", + "\tfor _, ns := range namespaces {", + "\t\tdisplayNs := ns", + "\t\tif ns == \"\" {", + "\t\t\tdisplayNs = \"All Namespaces\"", + "\t\t}", + "\t\tlog.Debug(\"Searching subscriptions in namespace %q\", displayNs)", + "\t\tsubscription, err := olmClient.Subscriptions(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing subscriptions in namespace %q\", ns)", + "\t\t\tcontinue", + "\t\t}", + "\t\tsubscriptions = append(subscriptions, subscription.Items...)", + "\t}", + "", + "\tfor i := range subscriptions {", + "\t\tlog.Info(\"Found subscription %q (ns %q)\", subscriptions[i].Name, subscriptions[i].Namespace)", + "\t}", + "\treturn subscriptions", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "isDeploymentsPodsMatchingAtLeastOneLabel", + "kind": "function", + "source": [ + "func isDeploymentsPodsMatchingAtLeastOneLabel(labels []labelObject, namespace string, deployment *appsv1.Deployment) bool {", + "\tfor _, aLabelObject := range labels {", + "\t\tlog.Debug(\"Searching pods in deployment %q found in ns %q using label %s=%s\", deployment.Name, namespace, aLabelObject.LabelKey, aLabelObject.LabelValue)", + "\t\tif deployment.Spec.Template.Labels[aLabelObject.LabelKey] == aLabelObject.LabelValue {", + "\t\t\tlog.Info(\"Deployment %s found in ns=%s\", deployment.Name, namespace)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "isStatefulSetsMatchingAtLeastOneLabel", + "kind": "function", + "source": [ + "func isStatefulSetsMatchingAtLeastOneLabel(labels []labelObject, namespace string, statefulSet *appsv1.StatefulSet) bool {", + "\tfor _, aLabelObject := range labels {", + "\t\tlog.Debug(\"Searching pods in statefulset %q found in ns %q using label %s=%s\", statefulSet.Name, namespace, aLabelObject.LabelKey, aLabelObject.LabelValue)", + "\t\tif statefulSet.Spec.Template.Labels[aLabelObject.LabelKey] == aLabelObject.LabelValue {", + "\t\t\tlog.Info(\"StatefulSet %s found in ns=%s\", statefulSet.Name, namespace)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "RunChecks", + "kind": "function", + "source": [ + "func RunChecks(timeout time.Duration) (failedCtr int, err error) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\t// Timeout channel", + "\ttimeOutChan := time.After(timeout)", + "\t// SIGINT(ctrl+c)/SIGTERM capture channel.", + "\tconst SIGINTBufferLen = 10", + "\tsigIntChan := make(chan os.Signal, SIGINTBufferLen)", + "\tsignal.Notify(sigIntChan, syscall.SIGINT, syscall.SIGTERM)", + "\t// turn off ctrl-c capture on exit", + "\tdefer signal.Stop(sigIntChan)", + "", + "\tabort := false", + "\tvar abortReason string", + "\tvar errs []error", + "\tfor _, group := range dbByGroup {", + "\t\tif abort {", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t\tgroup.RecordChecksResults()", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Stop channel, so we can send a stop signal to group.RunChecks()", + "\t\tstopChan := make(chan bool, 1)", + "\t\tabortChan := make(chan string, 1)", + "", + "\t\t// Done channel for the goroutine that runs group.RunChecks().", + "\t\tgroupDone := make(chan bool)", + "\t\tgo func() {", + "\t\t\tchecks, failedCheckCtr := group.RunChecks(stopChan, abortChan)", + "\t\t\tfailedCtr += failedCheckCtr", + "\t\t\terrs = append(errs, checks...)", + "\t\t\tgroupDone \u003c- true", + "\t\t}()", + "", + "\t\tselect {", + "\t\tcase \u003c-groupDone:", + "\t\t\tlog.Debug(\"Group %s finished running checks.\", group.name)", + "\t\tcase abortReason = \u003c-abortChan:", + "\t\t\tlog.Warn(\"Group %s aborted.\", group.name)", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-timeOutChan:", + "\t\t\tlog.Warn(\"Running all checks timed-out.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"global time-out\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-sigIntChan:", + "\t\t\tlog.Warn(\"SIGINT/SIGTERM received.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"SIGINT/SIGTERM\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t}", + "", + "\t\tgroup.RecordChecksResults()", + "\t}", + "", + "\t// Print the results in the CLI", + "\tcli.PrintResultsTable(getResultsSummary())", + "\tprintFailedChecksLog()", + "", + "\tif len(errs) \u003e 0 {", + "\t\tlog.Error(\"RunChecks errors: %v\", errs)", + "\t\treturn 0, fmt.Errorf(\"%d errors found in checks/groups\", len(errs))", + "\t}", + "", + "\treturn failedCtr, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runAfterAllFn", + "kind": "function", + "source": [ + "func runAfterAllFn(group *ChecksGroup, checks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running afterAll\", group.name)", + "", + "\tif group.afterAllFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tlastCheck := checks[len(checks)-1]", + "\tzeroRemainingChecks := []*Check{}", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running afterAll function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"afterAll function panicked\", \"\\n: \"+stackTrace, group, lastCheck, zeroRemainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.afterAllFn(group.checks); err != nil {", + "\t\tlog.Error(\"Unexpected error while running afterAll function: %v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"afterAll function unexpected error\", err.Error(), group, lastCheck, zeroRemainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runAfterEachFn", + "kind": "function", + "source": [ + "func runAfterEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running afterEach for check %s\", group.name, check.ID)", + "", + "\tif group.afterEachFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running afterEach function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"afterEach function panicked\", \"\\n: \"+stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.afterEachFn(check); err != nil {", + "\t\tlog.Error(\"Unexpected error while running afterEach function:\\n%v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"afterEach function unexpected error\", err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runBeforeAllFn", + "kind": "function", + "source": [ + "func runBeforeAllFn(group *ChecksGroup, checks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running beforeAll\", group.name)", + "\tif group.beforeAllFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tfirstCheck := checks[0]", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running beforeAll function:\\n%v\", stackTrace)", + "\t\t\t// Set first check's result as error and skip the remaining ones.", + "\t\t\terr = onFailure(\"beforeAll function panicked\", \"\\n:\"+stackTrace, group, firstCheck, checks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.beforeAllFn(checks); err != nil {", + "\t\tlog.Error(\"Unexpected error while running beforeAll function: %v\", err)", + "\t\t// Set first check's result as error and skip the remaining ones.", + "\t\treturn onFailure(\"beforeAll function unexpected error\", err.Error(), group, firstCheck, checks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runBeforeEachFn", + "kind": "function", + "source": [ + "func runBeforeEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running beforeEach for check %s\", group.name, check.ID)", + "\tif group.beforeEachFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running beforeEach function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"beforeEach function panicked\", \"\\n: \"+stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.beforeEachFn(check); err != nil {", + "\t\tlog.Error(\"Unexpected error while running beforeEach function:\\n%v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"beforeEach function unexpected error\", err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "NewClaimBuilder", + "kind": "function", + "source": [ + "func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) {", + "\tif os.Getenv(\"UNIT_TEST\") == \"true\" {", + "\t\treturn \u0026ClaimBuilder{", + "\t\t\tclaimRoot: CreateClaimRoot(),", + "\t\t}, nil", + "\t}", + "", + "\tlog.Debug(\"Creating claim file builder.\")", + "\tconfigurations, err := MarshalConfigurations(env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"configuration node missing because of: %v\", err)", + "\t}", + "", + "\tclaimConfigurations := map[string]interface{}{}", + "\tUnmarshalConfigurations(configurations, claimConfigurations)", + "", + "\troot := CreateClaimRoot()", + "", + "\troot.Claim.Configurations = claimConfigurations", + "\troot.Claim.Nodes = GenerateNodes()", + "\troot.Claim.Versions = \u0026claim.Versions{", + "\t\tCertSuite: versions.GitDisplayRelease,", + "\t\tCertSuiteGitCommit: versions.GitCommit,", + "\t\tOcClient: diagnostics.GetVersionOcClient(),", + "\t\tOcp: diagnostics.GetVersionOcp(),", + "\t\tK8s: diagnostics.GetVersionK8s(),", + "\t\tClaimFormat: versions.ClaimFormatVersion,", + "\t}", + "", + "\treturn \u0026ClaimBuilder{", + "\t\tclaimRoot: root,", + "\t}, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "LoadConfiguration", + "kind": "function", + "source": [ + "func LoadConfiguration(filePath string) (TestConfiguration, error) {", + "\tif confLoaded {", + "\t\tlog.Debug(\"config file already loaded, return previous element\")", + "\t\treturn configuration, nil", + "\t}", + "", + "\tlog.Info(\"Loading config from file: %s\", filePath)", + "\tcontents, err := os.ReadFile(filePath)", + "\tif err != nil {", + "\t\treturn configuration, err", + "\t}", + "", + "\terr = yaml.Unmarshal(contents, \u0026configuration)", + "\tif err != nil {", + "\t\treturn configuration, err", + "\t}", + "", + "\t// Set default namespace for the probe daemonset pods, in case it was not set.", + "\tif configuration.ProbeDaemonSetNamespace == \"\" {", + "\t\tlog.Warn(\"No namespace configured for the probe daemonset. Defaulting to namespace %q\", defaultProbeDaemonSetNamespace)", + "\t\tconfiguration.ProbeDaemonSetNamespace = defaultProbeDaemonSetNamespace", + "\t} else {", + "\t\tlog.Info(\"Namespace for probe daemonset: %s\", configuration.ProbeDaemonSetNamespace)", + "\t}", + "", + "\tconfLoaded = true", + "\treturn configuration, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "AreCPUResourcesWholeUnits", + "kind": "function", + "source": [ + "func AreCPUResourcesWholeUnits(p *Pod) bool {", + "\tisInteger := func(val int64) bool {", + "\t\treturn val%1000 == 0", + "\t}", + "", + "\t// Pods may contain more than one container. All containers must conform to the CPU isolation requirements.", + "\tfor _, cut := range p.Containers {", + "\t\t// Resources must be specified", + "\t\tcpuRequestsMillis := cut.Resources.Requests.Cpu().MilliValue()", + "\t\tcpuLimitsMillis := cut.Resources.Limits.Cpu().MilliValue()", + "", + "\t\tif cpuRequestsMillis == 0 || cpuLimitsMillis == 0 {", + "\t\t\tlog.Debug(\"%s has been found with undefined requests or limits.\", cut.String())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\tif !isInteger(cpuRequestsMillis) {", + "\t\t\tlog.Debug(\"%s has CPU requests %d (milli) that has to be a whole unit.\", cut.String(), cpuRequestsMillis)", + "\t\t\treturn false", + "\t\t}", + "\t\tif !isInteger(cpuLimitsMillis) {", + "\t\t\tlog.Debug(\"%s has CPU limits %d (milli) that has to be a whole unit.\", cut.String(), cpuLimitsMillis)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "AreResourcesIdentical", + "kind": "function", + "source": [ + "func AreResourcesIdentical(p *Pod) bool {", + "\t// Pods may contain more than one container. All containers must conform to the CPU isolation requirements.", + "\tfor _, cut := range p.Containers {", + "\t\t// At least limits must be specified (requests default to limits if not specified)", + "\t\tif len(cut.Resources.Limits) == 0 {", + "\t\t\tlog.Debug(\"%s has been found with undefined limits.\", cut.String())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Gather the values", + "\t\tcpuRequests := cut.Resources.Requests.Cpu()", + "\t\tcpuLimits := cut.Resources.Limits.Cpu()", + "\t\tmemoryRequests := cut.Resources.Requests.Memory()", + "\t\tmemoryLimits := cut.Resources.Limits.Memory()", + "", + "\t\t// Check for mismatches", + "\t\tif !cpuRequests.Equal(*cpuLimits) {", + "\t\t\tlog.Debug(\"%s has CPU requests %f and limits %f that do not match.\", cut.String(), cpuRequests.AsApproximateFloat64(), cpuLimits.AsApproximateFloat64())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\tif !memoryRequests.Equal(*memoryLimits) {", + "\t\t\tlog.Debug(\"%s has memory requests %f and limits %f that do not match.\", cut.String(), memoryRequests.AsApproximateFloat64(), memoryLimits.AsApproximateFloat64())", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Container.GetUID", + "kind": "function", + "source": [ + "func (c *Container) GetUID() (string, error) {", + "\tsplit := strings.Split(c.Status.ContainerID, \"://\")", + "\tuid := \"\"", + "\tif len(split) \u003e 0 {", + "\t\tuid = split[len(split)-1]", + "\t}", + "\tif uid == \"\" {", + "\t\tlog.Debug(\"could not find uid of %s/%s/%s\\n\", c.Namespace, c.Podname, c.Name)", + "\t\treturn \"\", errors.New(\"cannot determine container UID\")", + "\t}", + "\tlog.Debug(\"uid of %s/%s/%s=%s\\n\", c.Namespace, c.Podname, c.Name, uid)", + "\treturn uid, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "LoadBalancingDisabled", + "kind": "function", + "source": [ + "func LoadBalancingDisabled(p *Pod) bool {", + "\tconst (", + "\t\tdisableVar = \"disable\"", + "\t)", + "", + "\tcpuLoadBalancingDisabled := false", + "\tirqLoadBalancingDisabled := false", + "", + "\tif v, ok := p.Annotations[\"cpu-load-balancing.crio.io\"]; ok {", + "\t\tif v == disableVar {", + "\t\t\tcpuLoadBalancingDisabled = true", + "\t\t} else {", + "\t\t\tlog.Debug(\"Annotation cpu-load-balancing.crio.io has an invalid value for CPU isolation. Must be 'disable'.\")", + "\t\t}", + "\t} else {", + "\t\tlog.Debug(\"Annotation cpu-load-balancing.crio.io is missing.\")", + "\t}", + "", + "\tif v, ok := p.Annotations[\"irq-load-balancing.crio.io\"]; ok {", + "\t\tif v == disableVar {", + "\t\t\tirqLoadBalancingDisabled = true", + "\t\t} else {", + "\t\t\tlog.Debug(\"Annotation irq-load-balancing.crio.io has an invalid value for CPU isolation. Must be 'disable'.\")", + "\t\t}", + "\t} else {", + "\t\tlog.Debug(\"Annotation irq-load-balancing.crio.io is missing.\")", + "\t}", + "", + "\t// Both conditions have to be set to 'disable'", + "\tif cpuLoadBalancingDisabled \u0026\u0026 irqLoadBalancingDisabled {", + "\t\treturn true", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsCPUIsolationCompliant", + "kind": "function", + "source": [ + "func (p *Pod) IsCPUIsolationCompliant() bool {", + "\tisCPUIsolated := true", + "", + "\tif !LoadBalancingDisabled(p) {", + "\t\tlog.Debug(\"Pod %q has been found to not have annotations set correctly for CPU isolation.\", p)", + "\t\tisCPUIsolated = false", + "\t}", + "", + "\tif !p.IsRuntimeClassNameSpecified() {", + "\t\tlog.Debug(\"Pod %q has been found to not have runtimeClassName specified.\", p)", + "\t\tisCPUIsolated = false", + "\t}", + "", + "\treturn isCPUIsolated", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsUsingSRIOV", + "kind": "function", + "source": [ + "func (p *Pod) IsUsingSRIOV() (bool, error) {", + "\tconst (", + "\t\tcncfNetworksAnnotation = \"k8s.v1.cni.cncf.io/networks\"", + "\t)", + "", + "\tcncfNetworks, exist := p.Annotations[cncfNetworksAnnotation]", + "\tif !exist {", + "\t\treturn false, nil", + "\t}", + "", + "\t// Get all CNCF network names", + "\tcncfNetworkNames := getCNCFNetworksNamesFromPodAnnotation(cncfNetworks)", + "", + "\t// For each CNCF network, get its network attachment definition and check", + "\t// whether its config's type is \"sriov\"", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tfor _, networkName := range cncfNetworkNames {", + "\t\tlog.Debug(\"%s: Reviewing network-attachment definition %q\", p, networkName)", + "\t\tnad, err := oc.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(p.Namespace).Get(context.TODO(), networkName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to get NetworkAttachment %s: %v\", networkName, err)", + "\t\t}", + "", + "\t\tisSRIOV, err := isNetworkAttachmentDefinitionConfigTypeSRIOV(nad.Spec.Config)", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to know if network-attachment %s is sriov: %v\", networkName, err)", + "\t\t}", + "", + "\t\tlog.Debug(\"%s: NAD config: %s\", p, nad.Spec.Config)", + "\t\tif isSRIOV {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\treturn false, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsUsingSRIOVWithMTU", + "kind": "function", + "source": [ + "func (p *Pod) IsUsingSRIOVWithMTU() (bool, error) {", + "\tconst (", + "\t\tcncfNetworksAnnotation = \"k8s.v1.cni.cncf.io/networks\"", + "\t)", + "", + "\tcncfNetworks, exist := p.Annotations[cncfNetworksAnnotation]", + "\tif !exist {", + "\t\treturn false, nil", + "\t}", + "", + "\t// Get all CNCF network names", + "\tcncfNetworkNames := getCNCFNetworksNamesFromPodAnnotation(cncfNetworks)", + "", + "\t// For each CNCF network, get its network attachment definition and check", + "\t// whether its config's type is \"sriov\"", + "", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, networkName := range cncfNetworkNames {", + "\t\tlog.Debug(\"%s: Reviewing network-attachment definition %q\", p, networkName)", + "\t\tnad, err := oc.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(", + "\t\t\tp.Namespace).Get(context.TODO(), networkName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to get NetworkAttachment %s: %v\", networkName, err)", + "\t\t}", + "", + "\t\t// If the network-status annotation is not set, let's check the SriovNetwork/SriovNetworkNodePolicy CRs", + "\t\t// to see if the MTU is set there.", + "\t\tlog.Debug(\"Number of SriovNetworks: %d\", len(env.AllSriovNetworks))", + "\t\tlog.Debug(\"Number of SriovNetworkNodePolicies: %d\", len(env.AllSriovNetworkNodePolicies))", + "\t\tif sriovNetworkUsesMTU(env.AllSriovNetworks, env.AllSriovNetworkNodePolicies, nad.Name) {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\treturn false, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildContainerImageSource", + "kind": "function", + "source": [ + "func buildContainerImageSource(urlImage, urlImageID string) (source ContainerImageIdentifier) {", + "\tconst regexImageWithTag = `^([^/]*)/*([^@]*):(.*)`", + "\tconst regexImageDigest = `^([^/]*)/(.*)@(.*:.*)`", + "", + "\t// get image repository, Name and tag if present", + "\tre := regexp.MustCompile(regexImageWithTag)", + "\tmatch := re.FindStringSubmatch(urlImage)", + "", + "\tif match != nil {", + "\t\tif match[2] != \"\" {", + "\t\t\tsource.Registry = match[1]", + "\t\t\tsource.Repository = match[2]", + "\t\t\tsource.Tag = match[3]", + "\t\t} else {", + "\t\t\tsource.Repository = match[1]", + "\t\t\tsource.Tag = match[3]", + "\t\t}", + "\t}", + "", + "\t// get image Digest based on imageID only", + "\tre = regexp.MustCompile(regexImageDigest)", + "\tmatch = re.FindStringSubmatch(urlImageID)", + "", + "\tif match != nil {", + "\t\tsource.Digest = match[3]", + "\t}", + "", + "\tlog.Debug(\"Parsed image, repo: %s, name:%s, tag: %s, digest: %s\",", + "\t\tsource.Registry,", + "\t\tsource.Repository,", + "\t\tsource.Tag,", + "\t\tsource.Digest)", + "", + "\treturn source", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createOperators", + "kind": "function", + "source": [ + "func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion,", + "\tallSubscriptions []olmv1Alpha.Subscription,", + "\tallPackageManifests []*olmpkgv1.PackageManifest,", + "\tallInstallPlans []*olmv1Alpha.InstallPlan,", + "\tallCatalogSources []*olmv1Alpha.CatalogSource,", + "\tsucceededRequired,", + "\tkeepCsvDetails bool) []*Operator {", + "\tconst (", + "\t\tmaxSize = 2", + "\t)", + "", + "\toperators := []*Operator{}", + "", + "\t// Make map with unique csv names to original index in the env.Csvs slice.", + "\t// Otherwise, cluster-wide operators info will be repeated unnecessarily.", + "\tuniqueCsvs := getUniqueCsvListByName(csvs)", + "", + "\tfor _, csv := range uniqueCsvs {", + "\t\t// Skip CSVs that are not in the Succeeded phase if the flag is set.", + "\t\tif csv.Status.Phase != olmv1Alpha.CSVPhaseSucceeded \u0026\u0026 succeededRequired {", + "\t\t\tcontinue", + "\t\t}", + "\t\top := \u0026Operator{Name: csv.Name, Namespace: csv.Namespace}", + "\t\tif keepCsvDetails {", + "\t\t\top.Csv = csv", + "\t\t}", + "\t\top.Phase = csv.Status.Phase", + "\t\tpackageAndVersion := strings.SplitN(csv.Name, \".\", maxSize)", + "\t\tif len(packageAndVersion) == 0 {", + "\t\t\tlog.Debug(\"Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v\", csv)", + "\t\t\tcontinue", + "\t\t}", + "\t\top.PackageFromCsvName = packageAndVersion[0]", + "\t\top.Version = csv.Spec.Version.String()", + "\t\t// Get at least one subscription and update the Operator object with it.", + "\t\tif getAtLeastOneSubscription(op, csv, allSubscriptions, allPackageManifests) {", + "\t\t\ttargetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to get target namespaces for operator %s: %v\", csv.Name, err)", + "\t\t\t} else {", + "\t\t\t\top.TargetNamespaces = targetNamespaces", + "\t\t\t\top.IsClusterWide = len(targetNamespaces) == 0", + "\t\t\t}", + "\t\t} else {", + "\t\t\tlog.Warn(\"Subscription not found for CSV: %s (ns %s)\", csv.Name, csv.Namespace)", + "\t\t}", + "\t\tlog.Info(\"Getting installplans for op %s (subs %s ns %s)\", op.Name, op.SubscriptionName, op.SubscriptionNamespace)", + "\t\t// Get at least one Install Plan and update the Operator object with it.", + "\t\tgetAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources)", + "\t\toperators = append(operators, op)", + "\t}", + "\treturn operators", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getAtLeastOneInstallPlan", + "kind": "function", + "source": [ + "func getAtLeastOneInstallPlan(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, allInstallPlans []*olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource) (atLeastOneInstallPlan bool) {", + "\tatLeastOneInstallPlan = false", + "\tfor _, installPlan := range allInstallPlans {", + "\t\tif installPlan.Namespace != op.SubscriptionNamespace {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If the install plan does not deploys this CSV, check the next one", + "\t\tif !getAtLeastOneCsv(csv, installPlan) {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tindexImage, catalogErr := getCatalogSourceImageIndexFromInstallPlan(installPlan, allCatalogSources)", + "\t\tif catalogErr != nil {", + "\t\t\tlog.Debug(\"failed to get installPlan image index for csv %s (ns %s) installPlan %s, err: %v\",", + "\t\t\t\tcsv.Name, csv.Namespace, installPlan.Name, catalogErr)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\top.InstallPlans = append(op.InstallPlans, CsvInstallPlan{", + "\t\t\tName: installPlan.Name,", + "\t\t\tBundleImage: installPlan.Status.BundleLookups[0].Path,", + "\t\t\tIndexImage: indexImage,", + "\t\t})", + "\t\tatLeastOneInstallPlan = true", + "\t}", + "\treturn atLeastOneInstallPlan", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "isNetworkAttachmentDefinitionConfigTypeSRIOV", + "kind": "function", + "source": [ + "func isNetworkAttachmentDefinitionConfigTypeSRIOV(nadConfig string) (bool, error) {", + "\tconst (", + "\t\ttypeSriov = \"sriov\"", + "\t)", + "", + "\ttype CNIConfig struct {", + "\t\tCniVersion string `json:\"cniVersion\"`", + "\t\tName string `json:\"name\"`", + "\t\tType *string `json:\"type,omitempty\"`", + "\t\tPlugins *[]struct {", + "\t\t\tType string `json:\"type\"`", + "\t\t} `json:\"plugins,omitempty\"`", + "\t}", + "", + "\tcniConfig := CNIConfig{}", + "\tif err := json.Unmarshal([]byte(nadConfig), \u0026cniConfig); err != nil {", + "\t\treturn false, fmt.Errorf(\"failed to unmarshal cni config %s: %v\", nadConfig, err)", + "\t}", + "", + "\t// If type is found, it's a single plugin CNI config.", + "\tif cniConfig.Type != nil {", + "\t\tlog.Debug(\"Single plugin config type found: %+v, type=%s\", cniConfig, *cniConfig.Type)", + "\t\treturn *cniConfig.Type == typeSriov, nil", + "\t}", + "", + "\tif cniConfig.Plugins == nil {", + "\t\treturn false, fmt.Errorf(\"invalid multi-plugins cni config: %s\", nadConfig)", + "\t}", + "", + "\tlog.Debug(\"CNI plugins: %+v\", *cniConfig.Plugins)", + "\tfor i := range *cniConfig.Plugins {", + "\t\tplugin := (*cniConfig.Plugins)[i]", + "\t\tif plugin.Type == typeSriov {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\t// No sriov plugin type found.", + "\treturn false, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "isNetworkAttachmentDefinitionSRIOVConfigMTUSet", + "kind": "function", + "source": [ + "func isNetworkAttachmentDefinitionSRIOVConfigMTUSet(nadConfig string) (bool, error) {", + "\tconst (", + "\t\ttypeSriov = \"sriov\"", + "\t)", + "", + "\ttype CNIConfig struct {", + "\t\tCniVersion string `json:\"cniVersion\"`", + "\t\tName string `json:\"name\"`", + "\t\tType *string `json:\"type,omitempty\"`", + "\t\tPlugins *[]struct {", + "\t\t\tType string `json:\"type\"`", + "\t\t\tMTU int `json:\"mtu\"`", + "\t\t} `json:\"plugins,omitempty\"`", + "\t}", + "", + "\tcniConfig := CNIConfig{}", + "\tif err := json.Unmarshal([]byte(nadConfig), \u0026cniConfig); err != nil {", + "\t\treturn false, fmt.Errorf(\"failed to unmarshal cni config %s: %v\", nadConfig, err)", + "\t}", + "", + "\tif cniConfig.Plugins == nil {", + "\t\treturn false, fmt.Errorf(\"invalid multi-plugins cni config: %s\", nadConfig)", + "\t}", + "", + "\tlog.Debug(\"CNI plugins: %+v\", *cniConfig.Plugins)", + "\tfor i := range *cniConfig.Plugins {", + "\t\tplugin := (*cniConfig.Plugins)[i]", + "\t\tif plugin.Type == typeSriov \u0026\u0026 plugin.MTU \u003e 0 {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\t// No sriov plugin type found.", + "\treturn false, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "sriovNetworkUsesMTU", + "kind": "function", + "source": [ + "func sriovNetworkUsesMTU(sriovNetworks, sriovNetworkNodePolicies []unstructured.Unstructured, nadName string) bool {", + "\tfor _, sriovNetwork := range sriovNetworks {", + "\t\tnetworkName := sriovNetwork.GetName()", + "\t\tlog.Debug(\"Checking SriovNetwork %s\", networkName)", + "\t\tif networkName == nadName {", + "\t\t\tlog.Debug(\"SriovNetwork %s found to match the NAD name %s\", networkName, nadName)", + "", + "\t\t\t// Get the ResourceName from the SriovNetwork spec", + "\t\t\tspec, found, err := unstructured.NestedMap(sriovNetwork.Object, \"spec\")", + "\t\t\tif !found || err != nil {", + "\t\t\t\tlog.Debug(\"Failed to get spec from SriovNetwork %s: %v\", networkName, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tresourceName, found, err := unstructured.NestedString(spec, \"resourceName\")", + "\t\t\tif !found || err != nil {", + "\t\t\t\tlog.Debug(\"Failed to get resourceName from SriovNetwork %s: %v\", networkName, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tfor _, nodePolicy := range sriovNetworkNodePolicies {", + "\t\t\t\tpolicyNamespace := nodePolicy.GetNamespace()", + "\t\t\t\tnetworkNamespace := sriovNetwork.GetNamespace()", + "", + "\t\t\t\tlog.Debug(\"Checking SriovNetworkNodePolicy in namespace %s\", policyNamespace)", + "\t\t\t\tif policyNamespace == networkNamespace {", + "\t\t\t\t\t// Get the ResourceName and MTU from the SriovNetworkNodePolicy spec", + "\t\t\t\t\tpolicySpec, found, err := unstructured.NestedMap(nodePolicy.Object, \"spec\")", + "\t\t\t\t\tif !found || err != nil {", + "\t\t\t\t\t\tlog.Debug(\"Failed to get spec from SriovNetworkNodePolicy: %v\", err)", + "\t\t\t\t\t\tcontinue", + "\t\t\t\t\t}", + "", + "\t\t\t\t\tpolicyResourceName, found, err := unstructured.NestedString(policySpec, \"resourceName\")", + "\t\t\t\t\tif !found || err != nil {", + "\t\t\t\t\t\tlog.Debug(\"Failed to get resourceName from SriovNetworkNodePolicy: %v\", err)", + "\t\t\t\t\t\tcontinue", + "\t\t\t\t\t}", + "", + "\t\t\t\t\tif policyResourceName == resourceName {", + "\t\t\t\t\t\tmtu, found, err := unstructured.NestedInt64(policySpec, \"mtu\")", + "\t\t\t\t\t\tif found \u0026\u0026 err == nil \u0026\u0026 mtu \u003e 0 {", + "\t\t\t\t\t\t\treturn true", + "\t\t\t\t\t\t}", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/namespace", + "name": "getCrsPerNamespaces", + "kind": "function", + "source": [ + "func getCrsPerNamespaces(aCrd *apiextv1.CustomResourceDefinition) (crdNamespaces map[string][]string, err error) {", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, version := range aCrd.Spec.Versions {", + "\t\tgvr := schema.GroupVersionResource{", + "\t\t\tGroup: aCrd.Spec.Group,", + "\t\t\tVersion: version.Name,", + "\t\t\tResource: aCrd.Spec.Names.Plural,", + "\t\t}", + "\t\tlog.Debug(\"Looking for CRs from CRD: %s api version:%s group:%s plural:%s\", aCrd.Name, version.Name, aCrd.Spec.Group, aCrd.Spec.Names.Plural)", + "\t\tcrs, err := oc.DynamicClient.Resource(gvr).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error getting %s: %v\\n\", aCrd.Name, err)", + "\t\t\treturn crdNamespaces, err", + "\t\t}", + "\t\tcrdNamespaces = make(map[string][]string)", + "\t\tfor _, cr := range crs.Items {", + "\t\t\tname := cr.Object[\"metadata\"].(map[string]interface{})[\"name\"]", + "\t\t\tnamespace := cr.Object[\"metadata\"].(map[string]interface{})[\"namespace\"]", + "\t\t\tvar namespaceStr, nameStr string", + "\t\t\tif namespace == nil {", + "\t\t\t\tnamespaceStr = \"\"", + "\t\t\t} else {", + "\t\t\t\tnamespaceStr = fmt.Sprintf(\"%s\", namespace)", + "\t\t\t}", + "\t\t\tif name == nil {", + "\t\t\t\tnameStr = \"\"", + "\t\t\t} else {", + "\t\t\t\tnameStr = fmt.Sprintf(\"%s\", name)", + "\t\t\t}", + "\t\t\tcrdNamespaces[namespaceStr] = append(crdNamespaces[namespaceStr], nameStr)", + "\t\t}", + "\t}", + "\treturn crdNamespaces, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer", + "name": "compareCategory", + "kind": "function", + "source": [ + "func compareCategory(refCategory, containerSCC *ContainerSCC, id CategoryID) bool {", + "\tresult := true", + "\tlog.Debug(\"Testing if pod belongs to category %s\", \u0026id)", + "\t// AllVolumeAllowed reports whether the volumes in the container are compliant to the SCC (same volume list for all SCCs).", + "\t// True means that all volumes declared in the pod are allowed in the SCC.", + "\t// False means that at least one volume is disallowed", + "\tif refCategory.AllVolumeAllowed == containerSCC.AllVolumeAllowed {", + "\t\tlog.Debug(\"AllVolumeAllowed = %s - OK\", containerSCC.AllVolumeAllowed)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"AllVolumeAllowed = %s but expected \u003e=\u003c=%s - NOK\", containerSCC.AllVolumeAllowed, refCategory.AllVolumeAllowed)", + "\t}", + "\t// RunAsUserPresent reports whether the RunAsUser Field is set to something other than nil as requested by All SCC categories.", + "\t// True means that the RunAsUser Field is set.", + "\t// False means that it is not set (nil)", + "\t// The runAsUser range can be specified in the SCC itself. If not, it is specified in the namespace, see", + "\t// https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth", + "\t// runAsUser:", + "\t// type: MustRunAsRange", + "\t// uidRangeMin: 1000", + "\t// uidRangeMax: 2000", + "\tif refCategory.RunAsUserPresent == containerSCC.RunAsUserPresent {", + "\t\tlog.Debug(\"RunAsUserPresent = %s - OK\", containerSCC.RunAsUserPresent)", + "\t} else {", + "\t\tlog.Debug(\"RunAsUserPresent = %s but expected %s - NOK\", containerSCC.RunAsUserPresent, refCategory.RunAsUserPresent)", + "\t\tresult = false", + "\t}", + "\t// RunAsNonRoot is true if the RunAsNonRoot field is set to true, false otherwise.", + "\t// if setting a range including the roor UID 0 ( for instance 0-2000), then this option can disallow it.", + "\tif refCategory.RunAsNonRoot \u003e= containerSCC.RunAsNonRoot {", + "\t\tlog.Debug(\"RunAsNonRoot = %s - OK\", containerSCC.RunAsNonRoot)", + "\t} else {", + "\t\tlog.Debug(\"RunAsNonRoot = %s but expected %s - NOK\", containerSCC.RunAsNonRoot, refCategory.RunAsNonRoot)", + "\t\tresult = false", + "\t}", + "\t// FsGroupPresent reports whether the FsGroup Field is set to something other than nil as requested by All SCC categories.", + "\t// True means that the FsGroup Field is set.", + "\t// False means that it is not set (nil)", + "\t// The FSGroup range can be specified in the SCC itself. If not, it is specified in the namespace, see", + "\t// https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth", + "\t// fsGroup:", + "\t// type: MustRunAs", + "\t// ranges:", + "\t// - min: 1000900000", + "\t// max: 1000900010", + "\tif refCategory.FsGroupPresent == containerSCC.FsGroupPresent {", + "\t\tlog.Debug(\"FsGroupPresent = %s - OK\", containerSCC.FsGroupPresent)", + "\t} else {", + "\t\tlog.Debug(\"FsGroupPresent = %s but expected %s - NOK\", containerSCC.FsGroupPresent, refCategory.FsGroupPresent)", + "\t\tresult = false", + "\t}", + "\t// RequiredDropCapabilitiesPresent is true if the drop DropCapabilities field has at least the set of required drop capabilities ( same required set for all categories ).", + "\t// False means that some required DropCapabilities are missing.", + "\tif refCategory.RequiredDropCapabilitiesPresent == containerSCC.RequiredDropCapabilitiesPresent {", + "\t\tlog.Debug(\"DropCapabilities list - OK\")", + "\t} else {", + "\t\tlog.Debug(\"RequiredDropCapabilitiesPresent = %s but expected %s - NOK\", containerSCC.RequiredDropCapabilitiesPresent, refCategory.RequiredDropCapabilitiesPresent)", + "\t\tlog.Debug(\"its didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \")", + "\t\tresult = false", + "\t}", + "\t// HostDirVolumePluginPresent is true if a hostpath volume is configured, false otherwise.", + "\t// It is a deprecated field and is derived from the volume list currently configured in the container.", + "\t// see https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html#use-the-hostpath-volume-plugin", + "\tif refCategory.HostDirVolumePluginPresent == containerSCC.HostDirVolumePluginPresent {", + "\t\tlog.Debug(\"HostDirVolumePluginPresent = %s - OK\", containerSCC.HostDirVolumePluginPresent)", + "\t} else {", + "\t\tlog.Debug(\"HostDirVolumePluginPresent = %s but expected %s - NOK\", containerSCC.HostDirVolumePluginPresent, refCategory.HostDirVolumePluginPresent)", + "\t\tresult = false", + "\t}", + "\t// HostIPC is true if the HostIPC field is set to true, false otherwise.", + "\tif refCategory.HostIPC \u003e= containerSCC.HostIPC {", + "\t\tlog.Debug(\"HostIPC = %s - OK\", containerSCC.HostIPC)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostIPC = %s but expected \u003c= %s - NOK\", containerSCC.HostIPC, refCategory.HostIPC)", + "\t}", + "\t// HostNetwork is true if the HostNetwork field is set to true, false otherwise.", + "\tif refCategory.HostNetwork \u003e= containerSCC.HostNetwork {", + "\t\tlog.Debug(\"HostNetwork = %s - OK\", containerSCC.HostNetwork)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostNetwork = %s but expected \u003c= %s - NOK\", containerSCC.HostNetwork, refCategory.HostNetwork)", + "\t}", + "\t// HostPID is true if the HostPID field is set to true, false otherwise.", + "\tif refCategory.HostPID \u003e= containerSCC.HostPID {", + "\t\tlog.Debug(\"HostPID = %s - OK\", containerSCC.HostPID)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostPID = %s but expected \u003c= %s - NOK\", containerSCC.HostPID, refCategory.HostPID)", + "\t}", + "\t// HostPorts is true if the HostPorts field is set to true, false otherwise.", + "\tif refCategory.HostPorts \u003e= containerSCC.HostPorts {", + "\t\tlog.Debug(\"HostPorts = %s - OK\", containerSCC.HostPorts)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostPorts = %s but expected \u003c= %s - NOK\", containerSCC.HostPorts, refCategory.HostPorts)", + "\t}", + "\t// PrivilegeEscalation is true if the PrivilegeEscalation field is set to true, false otherwise.", + "\tif refCategory.PrivilegeEscalation \u003e= containerSCC.PrivilegeEscalation {", + "\t\tlog.Debug(\"HostNetwork = %s - OK\", containerSCC.HostNetwork)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"PrivilegeEscalation = %s but expected \u003c= %s - NOK\", containerSCC.PrivilegeEscalation, refCategory.PrivilegeEscalation)", + "\t}", + "\t// PrivilegedContainer is true if the PrivilegedContainer field is set to true, false otherwise.", + "\tif refCategory.PrivilegedContainer \u003e= containerSCC.PrivilegedContainer {", + "\t\tlog.Debug(\"PrivilegedContainer = %s - OK\", containerSCC.PrivilegedContainer)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"PrivilegedContainer = %s but expected \u003c= %s - NOK\", containerSCC.PrivilegedContainer, refCategory.PrivilegedContainer)", + "\t}", + "", + "\t// From the SecurityContextConstraint CRD spec:", + "\t// description: ReadOnlyRootFilesystem when set to true will force containers", + "\t// to run with a read only root file system. If the container specifically", + "\t// requests to run with a non-read only root file system the SCC should", + "\t// deny the pod. If set to false the container may run with a read only", + "\t// root file system if it wishes but it will not be forced to.", + "\t// type: boolean", + "\tif refCategory.ReadOnlyRootFilesystem == NOK {", + "\t\tlog.Debug(\"ReadOnlyRootFilesystem = %s - OK (not enforced by SCC)\", containerSCC.ReadOnlyRootFilesystem)", + "\t} else if containerSCC.ReadOnlyRootFilesystem != OK {", + "\t\tresult = false", + "\t\tlog.Debug(\"ReadOnlyRootFilesystem = %s but expected \u003c= %s - NOK\", containerSCC.ReadOnlyRootFilesystem, refCategory.ReadOnlyRootFilesystem)", + "\t}", + "\t// SeLinuxContextPresent is true if the SeLinuxContext field is present and set to a value (e.g. not nil), false otherwise.", + "\t// An SELinuxContext strategy of MustRunAs with no level set. Admission looks for the openshift.io/sa.scc.mcs annotation to populate the level.", + "\tif refCategory.SeLinuxContextPresent == containerSCC.SeLinuxContextPresent {", + "\t\tlog.Debug(\"SeLinuxContextPresent is not nil - OK\")", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"SeLinuxContextPresent = %s but expected %s expected to be non nil - NOK\", containerSCC.SeLinuxContextPresent, refCategory.SeLinuxContextPresent)", + "\t}", + "\t// CapabilitiesCategory indicates the lowest SCC category to which the list of capabilities.add in the container can be mapped to.", + "\tif refCategory.CapabilitiesCategory != containerSCC.CapabilitiesCategory {", + "\t\tresult = false", + "\t\tlog.Debug(\"CapabilitiesCategory = %s but expected %s - NOK\", containerSCC.CapabilitiesCategory, refCategory.CapabilitiesCategory)", + "\t} else {", + "\t\tlog.Debug(\"CapabilitiesCategory list is as expected %s - OK\", containerSCC.CapabilitiesCategory)", + "\t}", + "\treturn result", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AffiliatedCertTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmVersionIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\ttestHelmVersion(check)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoOperatorsFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAllOperatorCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHelmCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerIsCertifiedDigestIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerCertificationStatusByDigest(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "deletePod", + "kind": "function", + "source": [ + "func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error {", + "\tclients := clientsholder.GetClientsHolder()", + "\tlog.Debug(\"deleting ns=%s pod=%s with %s mode\", pod.Namespace, pod.Name, mode)", + "\tgracePeriodSeconds := *pod.Spec.TerminationGracePeriodSeconds", + "\t// Create watcher before deleting pod", + "\twatcher, err := clients.K8sClient.CoreV1().Pods(pod.Namespace).Watch(context.TODO(), metav1.ListOptions{", + "\t\tFieldSelector: \"metadata.name=\" + pod.Name + \",metadata.namespace=\" + pod.Namespace,", + "\t})", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"waitPodDeleted ns=%s pod=%s, err=%s\", pod.Namespace, pod.Name, err)", + "\t}", + "\t// Actually deleting pod", + "\terr = clients.K8sClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{", + "\t\tGracePeriodSeconds: \u0026gracePeriodSeconds,", + "\t})", + "\tif err != nil {", + "\t\tlog.Error(\"Error deleting %s err: %v\", pod.String(), err)", + "\t\treturn err", + "\t}", + "\tif mode == DeleteBackground {", + "\t\treturn nil", + "\t}", + "\twg.Add(1)", + "\tpodName := pod.Name", + "\tnamespace := pod.Namespace", + "\tgo func() {", + "\t\tdefer wg.Done()", + "\t\twaitPodDeleted(namespace, podName, gracePeriodSeconds, watcher)", + "\t}()", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "waitPodDeleted", + "kind": "function", + "source": [ + "func waitPodDeleted(ns, podName string, timeout int64, watcher watch.Interface) {", + "\tlog.Debug(\"Entering waitPodDeleted ns=%s pod=%s\", ns, podName)", + "\tdefer watcher.Stop()", + "", + "\tfor {", + "\t\tselect {", + "\t\tcase event := \u003c-watcher.ResultChan():", + "\t\t\tif event.Type == watch.Deleted || event.Type == \"\" {", + "\t\t\t\tlog.Debug(\"ns=%s pod=%s deleted\", ns, podName)", + "\t\t\t\treturn", + "\t\t\t}", + "\t\tcase \u003c-time.After(time.Duration(timeout) * time.Second):", + "\t\t\tlog.Info(\"watch for pod deletion timedout after %d seconds\", timeout)", + "\t\t\treturn", + "\t\t}", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "getNotReadyDeployments", + "kind": "function", + "source": [ + "func getNotReadyDeployments(deployments []*provider.Deployment) []*provider.Deployment {", + "\tnotReadyDeployments := []*provider.Deployment{}", + "\tfor _, dep := range deployments {", + "\t\tready, err := isDeploymentReady(dep.Name, dep.Namespace)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get %s: %v\", dep.ToString(), err)", + "\t\t\t// We'll mark it as not ready, anyways.", + "\t\t\tnotReadyDeployments = append(notReadyDeployments, dep)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif ready {", + "\t\t\tlog.Debug(\"%s is ready.\", dep.ToString())", + "\t\t} else {", + "\t\t\tnotReadyDeployments = append(notReadyDeployments, dep)", + "\t\t}", + "\t}", + "", + "\treturn notReadyDeployments", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "getNotReadyStatefulSets", + "kind": "function", + "source": [ + "func getNotReadyStatefulSets(statefulSets []*provider.StatefulSet) []*provider.StatefulSet {", + "\tnotReadyStatefulSets := []*provider.StatefulSet{}", + "\tfor _, sts := range statefulSets {", + "\t\tready, err := isStatefulSetReady(sts.Name, sts.Namespace)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get %s: %v\", sts.ToString(), err)", + "\t\t\t// We'll mark it as not ready, anyways.", + "\t\t\tnotReadyStatefulSets = append(notReadyStatefulSets, sts)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif ready {", + "\t\t\tlog.Debug(\"%s is ready.\", sts.ToString())", + "\t\t} else {", + "\t\t\tnotReadyStatefulSets = append(notReadyStatefulSets, sts)", + "\t\t}", + "\t}", + "", + "\treturn notReadyStatefulSets", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/manageability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ManageabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ManageabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainersImageTag)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImageTag(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPortNameFormat)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerPortNameFormat(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/services", + "name": "GetServiceIPVersion", + "kind": "function", + "source": [ + "func GetServiceIPVersion(aService *corev1.Service) (result netcommons.IPVersion, err error) {", + "\tipver, err := netcommons.GetIPVersion(aService.Spec.ClusterIP)", + "\tif err != nil {", + "\t\terr = fmt.Errorf(\"%s cannot get aService clusterIP version\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "\tif aService.Spec.IPFamilyPolicy == nil {", + "\t\terr = fmt.Errorf(\"%s does not have a IPFamilyPolicy configured\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "\tif *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack \u0026\u0026", + "\t\tipver == netcommons.IPv6 {", + "\t\tlog.Debug(\"%s is single stack ipv6\", ToString(aService))", + "\t\treturn netcommons.IPv6, nil", + "\t}", + "\tif *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack \u0026\u0026", + "\t\tipver == netcommons.IPv4 {", + "\t\tlog.Debug(\"%s is single stack ipv4\", ToString(aService))", + "\t\treturn netcommons.IPv4, nil", + "\t}", + "\tif (*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyPreferDualStack ||", + "\t\t*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyRequireDualStack) \u0026\u0026", + "\t\tlen(aService.Spec.ClusterIPs) \u003c 2 {", + "\t\terr = fmt.Errorf(\"%s is dual stack but has only zero or one ClusterIPs\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "", + "\tres, err := isClusterIPsDualStack(aService.Spec.ClusterIPs)", + "\tif err != nil {", + "\t\terr = fmt.Errorf(\"%s, err:%s\", ToString(aService), err)", + "\t\treturn result, err", + "\t}", + "\tif res {", + "\t\tlog.Debug(\"%s is dual-stack\", ToString(aService))", + "\t\treturn netcommons.IPv4v6, nil", + "\t}", + "", + "\terr = fmt.Errorf(\"%s is not compliant, it is not single stack ipv6 or dual stack\", ToString(aService))", + "\treturn result, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "OperatorInstalledMoreThanOnce", + "kind": "function", + "source": [ + "func OperatorInstalledMoreThanOnce(operator1, operator2 *provider.Operator) bool {", + "\t// Safeguard against nil operators (should not happen)", + "\tif operator1 == nil || operator2 == nil {", + "\t\treturn false", + "\t}", + "", + "\tlog.Debug(\"Comparing operator %q with operator %q\", operator1.Name, operator2.Name)", + "", + "\t// Retrieve the version from each CSV", + "\tcsv1Version := operator1.Csv.Spec.Version.String()", + "\tcsv2Version := operator2.Csv.Spec.Version.String()", + "", + "\tlog.Debug(\"CSV1 Version: %s\", csv1Version)", + "\tlog.Debug(\"CSV2 Version: %s\", csv2Version)", + "", + "\t// Strip the version from the CSV name by removing the suffix (which should be the version)", + "\tcsv1Name := strings.TrimSuffix(operator1.Csv.Name, \".v\"+csv1Version)", + "\tcsv2Name := strings.TrimSuffix(operator2.Csv.Name, \".v\"+csv2Version)", + "", + "\tlog.Debug(\"Comparing CSV names %q and %q\", csv1Name, csv2Name)", + "", + "\t// The CSV name should be the same, but the version should be different", + "\t// if the operator is installed more than once.", + "\tif operator1.Csv != nil \u0026\u0026 operator2.Csv != nil \u0026\u0026", + "\t\tcsv1Name == csv2Name \u0026\u0026", + "\t\tcsv1Version != csv2Version {", + "\t\tlog.Error(\"Operator %q is installed more than once\", operator1.Name)", + "\t\treturn true", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func testOperatorCatalogSourceBundleCount(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tconst (", + "\t\tbundleCountLimit = 1000", + "", + "\t\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count.", + "\t\t// This means we cannot use the package manifests to skip based on channel.", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\tocp412Skip := false", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\tcheck.LogError(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003c= 4.12.\")", + "\t\t\tocp412Skip = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003e 4.12.\")", + "\t\t}", + "\t}", + "", + "\tbundleCountLimitStr := strconv.Itoa(bundleCountLimit)", + "", + "\t// Loop through all labeled operators and check if they have more than 1000 referenced images.", + "\tvar catalogsAlreadyReported []string", + "\tfor _, op := range env.Operators {", + "\t\tcatalogSourceCheckComplete := false", + "\t\tcheck.LogInfo(\"Checking bundle count for operator %q\", op.Csv.Name)", + "", + "\t\t// Search through packagemanifests to match the name of the CSV.", + "\t\tfor _, pm := range env.AllPackageManifests {", + "\t\t\t// Skip package manifests based on channel entries.", + "\t\t\t// Note: This only works for OCP versions \u003e 4.12 due to channel entries existence.", + "\t\t\tif !ocp412Skip \u0026\u0026 catalogsource.SkipPMBasedOnChannel(pm.Status.Channels, op.Csv.Name) {", + "\t\t\t\tlog.Debug(\"Skipping package manifest %q based on channel\", pm.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Search through all catalog sources to match the name and namespace of the package manifest.", + "\t\t\tfor _, catalogSource := range env.AllCatalogSources {", + "\t\t\t\tif catalogSource.Name != pm.Status.CatalogSource || catalogSource.Namespace != pm.Status.CatalogSourceNamespace {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on name or namespace\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the catalog source has already been reported, skip it.", + "\t\t\t\tif stringhelper.StringInSlice(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace, false) {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on already reported\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Found matching catalog source %q for operator %q\", catalogSource.Name, op.Csv.Name)", + "", + "\t\t\t\t// The name and namespace match. Lookup the bundle count.", + "\t\t\t\tbundleCount := provider.GetCatalogSourceBundleCount(env, catalogSource)", + "", + "\t\t\t\tif bundleCount == -1 {", + "\t\t\t\t\tcheck.LogError(\"Failed to get bundle count for CatalogSource %q\", catalogSource.Name)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"Failed to get bundle count\", false))", + "\t\t\t\t} else {", + "\t\t\t\t\tif bundleCount \u003e bundleCountLimit {", + "\t\t\t\t\t\tcheck.LogError(\"CatalogSource %q has more than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has more than \"+bundleCountLimitStr+\" referenced images\", false))", + "\t\t\t\t\t} else {", + "\t\t\t\t\t\tcheck.LogInfo(\"CatalogSource %q has less than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has less than \"+bundleCountLimitStr+\" referenced images\", true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Adding catalog source %q to list of already reported\", catalogSource.Name)", + "\t\t\t\tcatalogsAlreadyReported = append(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace)", + "\t\t\t\t// Signal that the catalog source check is complete.", + "\t\t\t\tcatalogSourceCheckComplete = true", + "\t\t\t\tbreak", + "\t\t\t}", + "", + "\t\t\t// If the catalog source check is complete, break out of the loop.", + "\t\t\tif catalogSourceCheckComplete {", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/catalogsource", + "name": "SkipPMBasedOnChannel", + "kind": "function", + "source": [ + "func SkipPMBasedOnChannel(channels []olmpkgv1.PackageChannel, csvName string) bool {", + "\t// This logic is in place because it is possible for an operator to pull from a multiple package manifests.", + "\tskipPMBasedOnChannel := true", + "\tfor c := range channels {", + "\t\tlog.Debug(\"Comparing channel currentCSV %q with current CSV %q\", channels[c].CurrentCSV, csvName)", + "\t\tlog.Debug(\"Number of channel entries %d\", len(channels[c].Entries))", + "\t\tfor _, entry := range channels[c].Entries {", + "\t\t\tlog.Debug(\"Comparing entry name %q with current CSV %q\", entry.Name, csvName)", + "", + "\t\t\tif entry.Name == csvName {", + "\t\t\t\tlog.Debug(\"Skipping package manifest based on channel entry %q\", entry.Name)", + "\t\t\t\tskipPMBasedOnChannel = false", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !skipPMBasedOnChannel {", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\treturn skipPMBasedOnChannel", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/phasecheck", + "name": "WaitOperatorReady", + "kind": "function", + "source": [ + "func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool {", + "\toc := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tif isOperatorPhaseSucceeded(csv) {", + "\t\t\tlog.Debug(\"%s is ready\", provider.CsvToString(csv))", + "\t\t\treturn true", + "\t\t} else if isOperatorPhaseFailedOrUnknown(csv) {", + "\t\t\tlog.Debug(\"%s failed to be ready, status=%s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Operator is not ready, but we need to take into account that its pods", + "\t\t// could have been deleted by some of the lifecycle test cases, so they", + "\t\t// could be restarting. Let's give it some time before declaring it failed.", + "\t\tlog.Debug(\"Waiting for %s to be in Succeeded phase: %s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\ttime.Sleep(time.Second)", + "", + "\t\tfreshCsv, err := oc.OlmClient.OperatorsV1alpha1().ClusterServiceVersions(csv.Namespace).Get(context.TODO(), csv.Name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not get csv %s, err: %v\", provider.CsvToString(freshCsv), err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// update old csv and check status again", + "\t\t*csv = *freshCsv", + "\t}", + "\tif time.Since(start) \u003e timeout {", + "\t\tlog.Error(\"timeout waiting for csv %s to be ready\", provider.CsvToString(csv))", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/phasecheck", + "name": "isOperatorPhaseFailedOrUnknown", + "kind": "function", + "source": [ + "func isOperatorPhaseFailedOrUnknown(csv *v1alpha1.ClusterServiceVersion) bool {", + "\tlog.Debug(\"Checking failed status phase for csv %s (ns %s). Phase: %v\", csv.Name, csv.Namespace, csv.Status.Phase)", + "\treturn csv.Status.Phase == v1alpha1.CSVPhaseFailed ||", + "\t\tcsv.Status.Phase == v1alpha1.CSVPhaseUnknown", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/phasecheck", + "name": "isOperatorPhaseSucceeded", + "kind": "function", + "source": [ + "func isOperatorPhaseSucceeded(csv *v1alpha1.ClusterServiceVersion) bool {", + "\tlog.Debug(\"Checking succeeded status phase for csv %s (ns %s). Phase: %v\", csv.Name, csv.Namespace, csv.Status.Phase)", + "\treturn csv.Status.Phase == v1alpha1.CSVPhaseSucceeded", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PerformanceTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestExclusiveCPUPool(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRtAppNoExecProbes)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUs).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestRtAppsNoExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSharedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoNonGuaranteedPodContainersWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetNonGuaranteedPodContainersWithoutHostPID(), scheduling.SharedCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsolatedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLimitedUseOfExecProbesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestLimitedUseOfExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "Tester.getNodeNumaHugePages", + "kind": "function", + "source": [ + "func (tester *Tester) getNodeNumaHugePages() (hugepages hugepagesByNuma, err error) {", + "\t// This command must run inside the node, so we'll need the node's context to run commands inside the probe daemonset pod.", + "\tstdout, stderr, err := tester.commander.ExecCommandContainer(tester.context, cmd)", + "\tlog.Debug(\"getNodeNumaHugePages stdout: %s, stderr: %s\", stdout, stderr)", + "\tif err != nil {", + "\t\treturn hugepagesByNuma{}, err", + "\t}", + "\tif stderr != \"\" {", + "\t\treturn hugepagesByNuma{}, errors.New(stderr)", + "\t}", + "", + "\thugepages = hugepagesByNuma{}", + "\tr := regexp.MustCompile(outputRegex)", + "\tfor _, line := range strings.Split(stdout, \"\\n\") {", + "\t\tif line == \"\" {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tvalues := r.FindStringSubmatch(line)", + "\t\tif len(values) != numRegexFields {", + "\t\t\treturn hugepagesByNuma{}, fmt.Errorf(\"failed to parse node's numa hugepages output line:%s (stdout: %s)\", line, stdout)", + "\t\t}", + "", + "\t\tnumaNode, _ := strconv.Atoi(values[1])", + "\t\thpSize, _ := strconv.Atoi(values[2])", + "\t\thpCount, _ := strconv.Atoi(values[3])", + "", + "\t\tif sizeCounts, exists := hugepages[numaNode]; exists {", + "\t\t\tsizeCounts[hpSize] = hpCount", + "\t\t} else {", + "\t\t\thugepages[numaNode] = countBySize{hpSize: hpCount}", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Node %s hugepages: %s\", tester.node.Data.Name, hugepages)", + "\treturn hugepages, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "NodeTainted.GetTainterModules", + "kind": "function", + "source": [ + "func (nt *NodeTainted) GetTainterModules(allowList map[string]bool) (tainters map[string]string, taintBits map[int]bool, err error) {", + "\t// First, get all the modules that are tainting the kernel in this node.", + "\tallTainters, err := nt.getAllTainterModules()", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"failed to get tainter modules: %w\", err)", + "\t}", + "", + "\tfilteredTainters := map[string]string{}", + "\tfor moduleName, moduleTaintsLetters := range allTainters {", + "\t\tmoduleTaints := DecodeKernelTaintsFromLetters(moduleTaintsLetters)", + "\t\tlog.Debug(\"%s: Module %s has taints (%s): %s\", nt.node, moduleName, moduleTaintsLetters, moduleTaints)", + "", + "\t\t// Apply allowlist.", + "\t\tif allowList[moduleName] {", + "\t\t\tlog.Debug(\"%s module %s is tainting the kernel but it has been allowlisted (taints: %v)\",", + "\t\t\t\tnt.node, moduleName, moduleTaints)", + "\t\t} else {", + "\t\t\tfilteredTainters[moduleName] = moduleTaintsLetters", + "\t\t}", + "\t}", + "", + "\t// Finally, get all the bits that all the modules have set.", + "\ttaintBits, err = GetTaintedBitsByModules(allTainters)", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"failed to get taint bits by modules: %w\", err)", + "\t}", + "", + "\treturn filteredTainters, taintBits, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Running %s suite checks\", common.PreflightTestKey)", + "", + "\t// As the preflight lib's checks need to run here, we need to get the test environment now.", + "\tenv = provider.GetTestEnvironment()", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PreflightTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\ttestPreflightContainers(checksGroup, \u0026env)", + "\tif provider.IsOCPCluster() {", + "\t\tlog.Info(\"OCP cluster detected, allowing Preflight operator tests to run\")", + "\t\ttestPreflightOperators(checksGroup, \u0026env)", + "\t} else {", + "\t\tlog.Info(\"Skipping the Preflight operators test because it requires an OCP cluster to run against\")", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Error", + "qualifiedName": "Logger.Error", + "exported": true, + "receiver": "Logger", + "signature": "func(string, ...any)()", + "doc": "Logger.Error Logs a formatted message at the error level\n\nThis method receives a format string followed by optional arguments, then\ndelegates to the generic logging helper passing the error severity. It uses\nthe Logger instance if provided; otherwise it falls back to the default\nlogger. The resulting entry is emitted immediately without returning any\nvalue.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:268", + "calls": [ + { + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite", + "name": "main", + "kind": "function", + "source": [ + "func main() {", + "\trootCmd := newRootCmd()", + "\tif err := rootCmd.Execute(); err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\tos.Exit(1)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare", + "name": "NewCommand", + "kind": "function", + "source": [ + "func NewCommand() *cobra.Command {", + "\tclaimCompareFiles.Flags().StringVarP(", + "\t\t\u0026Claim1FilePathFlag, \"claim1\", \"1\", \"\",", + "\t\t\"existing claim1 file. (Required) first file to compare\",", + "\t)", + "\tclaimCompareFiles.Flags().StringVarP(", + "\t\t\u0026Claim2FilePathFlag, \"claim2\", \"2\", \"\",", + "\t\t\"existing claim2 file. (Required) second file to compare\",", + "\t)", + "\terr := claimCompareFiles.MarkFlagRequired(\"claim1\")", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to mark flag claim1 as required: %v\", err)", + "\t\treturn nil", + "\t}", + "\terr = claimCompareFiles.MarkFlagRequired(\"claim2\")", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to mark flag claim2 as required: %v\", err)", + "\t\treturn nil", + "\t}", + "", + "\treturn claimCompareFiles", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "addPreflightTestsToCatalog", + "kind": "function", + "source": [ + "func addPreflightTestsToCatalog() {", + "\tconst dummy = \"dummy\"", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\tlog.Error(\"Error creating artifact, failed to add preflight tests to catalog: %v\", err)", + "\t\treturn", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\toptsOperator := []plibOperator.Option{}", + "\toptsContainer := []plibContainer.Option{}", + "\tcheckOperator := plibOperator.NewCheck(dummy, dummy, []byte(\"\"), optsOperator...)", + "\tcheckContainer := plibContainer.NewCheck(dummy, optsContainer...)", + "\t_, checksOperator, err := checkOperator.List(ctx)", + "\tif err != nil {", + "\t\tlog.Error(\"Error getting preflight operator tests: %v\", err)", + "\t}", + "\t_, checksContainer, err := checkContainer.List(ctx)", + "\tif err != nil {", + "\t\tlog.Error(\"Error getting preflight container tests: %v\", err)", + "\t}", + "", + "\tallChecks := checksOperator", + "\tallChecks = append(allChecks, checksContainer...)", + "", + "\tfor _, c := range allChecks {", + "\t\tremediation := c.Help().Suggestion", + "", + "\t\t// Custom override for specific preflight test remediation", + "\t\tif c.Name() == \"FollowsRestrictedNetworkEnablementGuidelines\" {", + "\t\t\tremediation = \"If consumers of your operator may need to do so on a restricted network, implement the guidelines outlined in OCP documentation: https://docs.redhat.com/en/documentation/openshift_container_platform/latest/html/disconnected_environments/olm-restricted-networks\"", + "\t\t}", + "", + "\t\t_ = identifiers.AddCatalogEntry(", + "\t\t\tc.Name(),", + "\t\t\tcommon.PreflightTestKey,", + "\t\t\tc.Metadata().Description,", + "\t\t\tremediation,", + "\t\t\tidentifiers.NoDocumentedProcess,", + "\t\t\tidentifiers.NoDocLink,", + "\t\t\ttrue,", + "\t\t\tmap[string]string{", + "\t\t\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\t\t\tidentifiers.Telco: identifiers.Optional,", + "\t\t\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\t\t\tidentifiers.Extended: identifiers.Optional,", + "\t\t\t},", + "\t\t\tidentifiers.TagCommon)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "outputJS", + "kind": "function", + "source": [ + "func outputJS() {", + "\tout, err := json.MarshalIndent(identifiers.Classification, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Error(\"could not Marshall classification, err=%s\", err)", + "\t\treturn", + "\t}", + "\tfmt.Printf(\"classification= %s \", out)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "outputTestCases", + "kind": "function", + "source": [ + "func outputTestCases() (outString string, summary catalogSummary) { //nolint:funlen", + "\t// Adds Preflight tests to catalog", + "\taddPreflightTestsToCatalog()", + "", + "\t// Building a separate data structure to store the key order for the map", + "\tkeys := make([]claim.Identifier, 0, len(identifiers.Catalog))", + "\tfor k := range identifiers.Catalog {", + "\t\tkeys = append(keys, k)", + "\t}", + "", + "\t// Sorting the map by identifier ID", + "\tsort.Slice(keys, func(i, j int) bool {", + "\t\treturn keys[i].Id \u003c keys[j].Id", + "\t})", + "", + "\tcatalog := CreatePrintableCatalogFromIdentifiers(keys)", + "\tif catalog == nil {", + "\t\treturn", + "\t}", + "\t// we need the list of suite's names", + "\tsuites := GetSuitesFromIdentifiers(keys)", + "", + "\t// Sort the list of suite names", + "\tsort.Strings(suites)", + "", + "\t// Iterating the map by test and suite names", + "\toutString = \"## Test Case list\\n\\n\" +", + "\t\t\"Test Cases are the specifications used to perform a meaningful test. \" +", + "\t\t\"Test cases may run once, or several times against several targets. The Red Hat Best Practices Test Suite for Kubernetes includes \" +", + "\t\t\"a number of normative and informative tests to ensure that workloads follow best practices. \" +", + "\t\t\"Here is the list of available Test Cases:\\n\"", + "", + "\tsummary.testPerScenario = make(map[string]map[string]int)", + "\tsummary.testsPerSuite = make(map[string]int)", + "\tsummary.totalSuites = len(suites)", + "\tfor _, suite := range suites {", + "\t\toutString += fmt.Sprintf(\"\\n### %s\\n\", suite)", + "\t\tfor _, k := range catalog[suite] {", + "\t\t\tsummary.testsPerSuite[suite]++", + "\t\t\tsummary.totalTests++", + "\t\t\t// Add the suite to the comma separate list of tags shown. The tags are also modified in the:", + "\t\t\t// GetTestIDAndLabels function.", + "\t\t\ttags := strings.ReplaceAll(identifiers.Catalog[k.identifier].Tags, \"\\n\", \" \") + \",\" + k.identifier.Suite", + "", + "\t\t\tkeys := make([]string, 0, len(identifiers.Catalog[k.identifier].CategoryClassification))", + "", + "\t\t\tfor scenario := range identifiers.Catalog[k.identifier].CategoryClassification {", + "\t\t\t\tkeys = append(keys, scenario)", + "\t\t\t\t_, ok := summary.testPerScenario[scenarioIDToText(scenario)]", + "\t\t\t\tif !ok {", + "\t\t\t\t\tchild := make(map[string]int)", + "\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)] = child", + "\t\t\t\t}", + "\t\t\t\tswitch scenario {", + "\t\t\t\tcase identifiers.NonTelco:", + "\t\t\t\t\ttag := identifiers.TagCommon", + "\t\t\t\t\tif identifiers.Catalog[k.identifier].Tags == tag {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\tdefault:", + "\t\t\t\t\ttag := strings.ToLower(scenario)", + "\t\t\t\t\tif strings.Contains(identifiers.Catalog[k.identifier].Tags, tag) {", + "\t\t\t\t\t\tsummary.testPerScenario[scenarioIDToText(scenario)][identifiers.Catalog[k.identifier].CategoryClassification[scenario]]++", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tsort.Strings(keys)", + "\t\t\tclassificationString := \"|**Scenario**|**Optional/Mandatory**|\\n\"", + "\t\t\tfor _, j := range keys {", + "\t\t\t\tclassificationString += \"|\" + scenarioIDToText(j) + \"|\" + identifiers.Catalog[k.identifier].CategoryClassification[j] + \"|\\n\"", + "\t\t\t}", + "", + "\t\t\t// Every paragraph starts with a new line.", + "", + "\t\t\toutString += fmt.Sprintf(\"\\n#### %s\\n\\n\", k.testName)", + "\t\t\toutString += \"|Property|Description|\\n\"", + "\t\t\toutString += \"|---|---|\\n\"", + "\t\t\toutString += fmt.Sprintf(\"|Unique ID|%s|\\n\", k.identifier.Id)", + "\t\t\toutString += fmt.Sprintf(\"|Description|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Description, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Suggested Remediation|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].Remediation, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Best Practice Reference|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].BestPracticeReference, \"\\n\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"|Exception Process|%s|\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].ExceptionProcess, \"\\n\", \" \"))", + "", + "\t\t\t// Add impact statement if available - fail if missing", + "\t\t\tif impact, exists := identifiers.ImpactMap[k.identifier.Id]; exists {", + "\t\t\t\toutString += fmt.Sprintf(\"|Impact Statement|%s|\\n\", strings.ReplaceAll(impact, \"\\n\", \" \"))", + "\t\t\t} else {", + "\t\t\t\tlog.Error(\"Test case %s is missing an impact statement in the ImpactMap\", k.identifier.Id)", + "\t\t\t\tfmt.Printf(\"ERROR: Test case %s is missing an impact statement in the ImpactMap\\n\", k.identifier.Id)", + "\t\t\t\tos.Exit(1)", + "\t\t\t}", + "", + "\t\t\toutString += fmt.Sprintf(\"|Tags|%s|\\n\", tags)", + "\t\t\toutString += classificationString", + "\t\t}", + "\t}", + "", + "\treturn outString, summary", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "ClientsHolder.ExecCommandContainer", + "kind": "function", + "source": [ + "func (clientsholder *ClientsHolder) ExecCommandContainer(", + "\tctx Context, command string) (stdout, stderr string, err error) {", + "\tcommandStr := []string{\"sh\", \"-c\", command}", + "\tvar buffOut bytes.Buffer", + "\tvar buffErr bytes.Buffer", + "", + "\tlog.Debug(\"execute command on ns=%s, pod=%s container=%s, cmd: %s\", ctx.GetNamespace(), ctx.GetPodName(), ctx.GetContainerName(), strings.Join(commandStr, \" \"))", + "\treq := clientsholder.K8sClient.CoreV1().RESTClient().", + "\t\tPost().", + "\t\tNamespace(ctx.GetNamespace()).", + "\t\tResource(\"pods\").", + "\t\tName(ctx.GetPodName()).", + "\t\tSubResource(\"exec\").", + "\t\tVersionedParams(\u0026corev1.PodExecOptions{", + "\t\t\tContainer: ctx.GetContainerName(),", + "\t\t\tCommand: commandStr,", + "\t\t\tStdin: false,", + "\t\t\tStdout: true,", + "\t\t\tStderr: true,", + "\t\t\tTTY: false,", + "\t\t}, scheme.ParameterCodec)", + "", + "\texec, err := remotecommand.NewSPDYExecutor(clientsholder.RestConfig, \"POST\", req.URL())", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\treturn stdout, stderr, err", + "\t}", + "\terr = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{", + "\t\tStdout: \u0026buffOut,", + "\t\tStderr: \u0026buffErr,", + "\t})", + "\tstdout, stderr = buffOut.String(), buffErr.String()", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t\tlog.Error(\"%v\", req.URL())", + "\t\tlog.Error(\"command: %s\", command)", + "\t\tlog.Error(\"stderr: %s\", stderr)", + "\t\tlog.Error(\"stdout: %s\", stdout)", + "\t\treturn stdout, stderr, err", + "\t}", + "\treturn stdout, stderr, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetPidsFromPidNamespace", + "kind": "function", + "source": [ + "func GetPidsFromPidNamespace(pidNamespace string, container *provider.Container) (p []*Process, err error) {", + "\tconst command = \"trap \\\"\\\" SIGURG ; ps -e -o pidns,pid,ppid,args\"", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(container.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", container, err)", + "\t}", + "", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"command %q failed to run in probe pod=%s (node=%s): %v\", command, ctx.GetPodName(), container.NodeName, err)", + "\t}", + "", + "\tre := regexp.MustCompile(PsRegex)", + "\tmatches := re.FindAllStringSubmatch(stdout, -1)", + "\t// If we do not find a successful log, we fail", + "\tfor _, v := range matches {", + "\t\t// Matching only the right PidNs", + "\t\tif pidNamespace != v[1] {", + "\t\t\tcontinue", + "\t\t}", + "\t\taPidNs, err := strconv.Atoi(v[1])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[1], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPid, err := strconv.Atoi(v[2])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[2], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPPid, err := strconv.Atoi(v[3])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[3], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tp = append(p, \u0026Process{PidNs: aPidNs, Pid: aPid, Args: v[4], PPid: aPPid})", + "\t}", + "\treturn p, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "setProxy", + "kind": "function", + "source": [ + "func setProxy(client *http.Client, proxyURL, proxyPort string) {", + "\tif proxyURL != \"\" \u0026\u0026 proxyPort != \"\" {", + "\t\tlog.Debug(\"Proxy is set. Using proxy %s:%s\", proxyURL, proxyPort)", + "\t\tproxyURL := fmt.Sprintf(\"%s:%s\", proxyURL, proxyPort)", + "\t\tparsedURL, err := url.Parse(proxyURL)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse proxy URL: %v\", err)", + "\t\t}", + "\t\tlog.Debug(\"Proxy URL: %s\", parsedURL)", + "\t\tclient.Transport = \u0026http.Transport{Proxy: http.ProxyURL(parsedURL)}", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "CreateLabels", + "kind": "function", + "source": [ + "func CreateLabels(labelStrings []string) (labelObjects []labelObject) {", + "\tfor _, label := range labelStrings {", + "\t\tr := regexp.MustCompile(labelRegex)", + "", + "\t\tvalues := r.FindStringSubmatch(label)", + "\t\tif len(values) != labelRegexMatches {", + "\t\t\tlog.Error(\"Failed to parse label %q. It will not be used!, \", label)", + "\t\t\tcontinue", + "\t\t}", + "\t\tvar aLabel labelObject", + "\t\taLabel.LabelKey = values[1]", + "\t\taLabel.LabelValue = values[2]", + "\t\tlabelObjects = append(labelObjects, aLabel)", + "\t}", + "\treturn labelObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "FindCrObjectByNameByNamespace", + "kind": "function", + "source": [ + "func FindCrObjectByNameByNamespace(scalesGetter scale.ScalesGetter, ns, name string, groupResourceSchema schema.GroupResource) (*scalingv1.Scale, error) {", + "\tcrScale, err := scalesGetter.Scales(ns).Get(context.TODO(), groupResourceSchema, name, metav1.GetOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot retrieve deployment in ns=%s name=%s\", ns, name)", + "\t\treturn nil, err", + "\t}", + "\treturn crScale, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "FindDeploymentByNameByNamespace", + "kind": "function", + "source": [ + "func FindDeploymentByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.Deployment, error) {", + "\tdp, err := appClient.Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot retrieve deployment in ns=%s name=%s\", namespace, name)", + "\t\treturn nil, err", + "\t}", + "\treturn dp, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "FindPodsByLabels", + "kind": "function", + "source": [ + "func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, namespaces []string) (runningPods, allPods []corev1.Pod) {", + "\trunningPods = []corev1.Pod{}", + "\tallPods = []corev1.Pod{}", + "\tallowNonRunning := configuration.GetTestParameters().AllowNonRunning", + "\t// Iterate through namespaces", + "\tfor _, ns := range namespaces {", + "\t\tvar pods *corev1.PodList", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tpods = findPodsMatchingAtLeastOneLabel(oc, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching Pods in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tpods, err = oc.Pods(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing pods in ns=%s, err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\t// Filter out any pod set to be deleted", + "\t\tfor i := 0; i \u003c len(pods.Items); i++ {", + "\t\t\tif pods.Items[i].DeletionTimestamp == nil {", + "\t\t\t\tif allowNonRunning || pods.Items[i].Status.Phase == corev1.PodRunning {", + "\t\t\t\t\trunningPods = append(runningPods, pods.Items[i])", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tallPods = append(allPods, pods.Items[i])", + "\t\t}", + "\t}", + "", + "\treturn runningPods, allPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "FindStatefulsetByNameByNamespace", + "kind": "function", + "source": [ + "func FindStatefulsetByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.StatefulSet, error) {", + "\tss, err := appClient.StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot retrieve deployment in ns=%s name=%s\", namespace, name)", + "\t\treturn nil, err", + "\t}", + "\treturn ss, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "FindTestCrdNames", + "kind": "function", + "source": [ + "func FindTestCrdNames(clusterCrds []*apiextv1.CustomResourceDefinition, crdFilters []configuration.CrdFilter) (targetCrds []*apiextv1.CustomResourceDefinition) {", + "\tif len(clusterCrds) == 0 {", + "\t\tlog.Error(\"Cluster does not have any CRDs\")", + "\t\treturn []*apiextv1.CustomResourceDefinition{}", + "\t}", + "\tfor _, crd := range clusterCrds {", + "\t\tfor _, crdFilter := range crdFilters {", + "\t\t\tif strings.HasSuffix(crd.Name, crdFilter.NameSuffix) {", + "\t\t\t\ttargetCrds = append(targetCrds, crd)", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn targetCrds", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findAbnormalEvents", + "kind": "function", + "source": [ + "func findAbnormalEvents(oc corev1client.CoreV1Interface, namespaces []string) (abnormalEvents []corev1.Event) {", + "\tabnormalEvents = []corev1.Event{}", + "\tfor _, ns := range namespaces {", + "\t\tsomeAbnormalEvents, err := oc.Events(ns).List(context.TODO(), metav1.ListOptions{FieldSelector: \"type!=Normal\"})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get event list for namespace %q, err: %v\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tabnormalEvents = append(abnormalEvents, someAbnormalEvents.Items...)", + "\t}", + "\treturn abnormalEvents", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findDeploymentsByLabels", + "kind": "function", + "source": [ + "func findDeploymentsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.Deployment {", + "\tallDeployments := []appsv1.Deployment{}", + "\tfor _, ns := range namespaces {", + "\t\tdps, err := appClient.Deployments(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list deployments in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(dps.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any deployments in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(dps.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The deployment is added only once if at least one pod matches one label in the Deployment", + "\t\t\t\tif isDeploymentsPodsMatchingAtLeastOneLabel(labels, ns, \u0026dps.Items[i]) {", + "\t\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all deployments in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in deployment %q found in ns %q without label\", dps.Items[i].Name, ns)", + "\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\tlog.Info(\"Deployment %s found in ns=%s\", dps.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allDeployments) == 0 {", + "\t\tlog.Warn(\"Did not find any deployment in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allDeployments", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findHpaControllers", + "kind": "function", + "source": [ + "func findHpaControllers(cs kubernetes.Interface, namespaces []string) []*scalingv1.HorizontalPodAutoscaler {", + "\tvar m []*scalingv1.HorizontalPodAutoscaler", + "\tfor _, ns := range namespaces {", + "\t\thpas, err := cs.AutoscalingV1().HorizontalPodAutoscalers(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Cannot list HorizontalPodAutoscalers on namespace %q, err: %v\", ns, err)", + "\t\t\treturn m", + "\t\t}", + "\t\tfor i := 0; i \u003c len(hpas.Items); i++ {", + "\t\t\tm = append(m, \u0026hpas.Items[i])", + "\t\t}", + "\t}", + "\tif len(m) == 0 {", + "\t\tlog.Info(\"Cannot find any deployed HorizontalPodAutoscaler\")", + "\t}", + "\treturn m", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findOperatorsByLabels", + "kind": "function", + "source": [ + "func findOperatorsByLabels(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespaces []configuration.Namespace) (csvs []*olmv1Alpha.ClusterServiceVersion) {", + "\tconst nsAnnotation = \"olm.operatorNamespace\"", + "", + "\t// Helper namespaces map to do quick search of the operator's controller namespace.", + "\tnamespacesMap := map[string]bool{}", + "\tfor _, ns := range namespaces {", + "\t\tnamespacesMap[ns.Name] = true", + "\t}", + "", + "\tcsvs = []*olmv1Alpha.ClusterServiceVersion{}", + "\tvar csvList *olmv1Alpha.ClusterServiceVersionList", + "\tfor _, ns := range namespaces {", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tcsvList = findOperatorsMatchingAtLeastOneLabel(olmClient, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching CSVs in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tcsvList, err = olmClient.ClusterServiceVersions(ns.Name).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing csvs in namespace %q , err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\tfor i := range csvList.Items {", + "\t\t\tcsv := \u0026csvList.Items[i]", + "", + "\t\t\t// Filter out CSV if operator's controller pod/s is/are not running in any configured/test namespace.", + "\t\t\tcontrollerNamespace, found := csv.Annotations[nsAnnotation]", + "\t\t\tif !found {", + "\t\t\t\tlog.Error(\"Failed to get ns annotation %q from csv %v/%v\", nsAnnotation, csv.Namespace, csv.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tif namespacesMap[controllerNamespace] {", + "\t\t\t\tcsvs = append(csvs, csv)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range csvs {", + "\t\tlog.Info(\"Found CSV %q (namespace %q)\", csvs[i].Name, csvs[i].Namespace)", + "\t}", + "\treturn csvs", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findOperatorsMatchingAtLeastOneLabel", + "kind": "function", + "source": [ + "func findOperatorsMatchingAtLeastOneLabel(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespace configuration.Namespace) *olmv1Alpha.ClusterServiceVersionList {", + "\tcsvList := \u0026olmv1Alpha.ClusterServiceVersionList{}", + "\tfor _, l := range labels {", + "\t\tlog.Debug(\"Searching CSVs in namespace %q with label %q\", namespace, l)", + "\t\tcsv, err := olmClient.ClusterServiceVersions(namespace.Name).List(context.TODO(), metav1.ListOptions{", + "\t\t\tLabelSelector: l.LabelKey + \"=\" + l.LabelValue,", + "\t\t})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing csvs in namespace %q with label %q, err: %v\", namespace, l.LabelKey+\"=\"+l.LabelValue, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tcsvList.Items = append(csvList.Items, csv.Items...)", + "\t}", + "\treturn csvList", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findPodsMatchingAtLeastOneLabel", + "kind": "function", + "source": [ + "func findPodsMatchingAtLeastOneLabel(oc corev1client.CoreV1Interface, labels []labelObject, namespace string) *corev1.PodList {", + "\tallPods := \u0026corev1.PodList{}", + "\tfor _, l := range labels {", + "\t\tlog.Debug(\"Searching Pods in namespace %s with label %q\", namespace, l)", + "\t\tpods, err := oc.Pods(namespace).List(context.TODO(), metav1.ListOptions{", + "\t\t\tLabelSelector: l.LabelKey + \"=\" + l.LabelValue,", + "\t\t})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing pods in ns=%s label=%s, err: %v\", namespace, l.LabelKey+\"=\"+l.LabelValue, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tallPods.Items = append(allPods.Items, pods.Items...)", + "\t}", + "\treturn allPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findStatefulSetsByLabels", + "kind": "function", + "source": [ + "func findStatefulSetsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.StatefulSet {", + "\tallStatefulSets := []appsv1.StatefulSet{}", + "\tfor _, ns := range namespaces {", + "\t\tstatefulSet, err := appClient.StatefulSets(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list statefulsets in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(statefulSet.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any statefulSet in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(statefulSet.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The StatefulSet is added only once if at least one pod matches one label in the Statefulset", + "\t\t\t\tif isStatefulSetsMatchingAtLeastOneLabel(labels, ns, \u0026statefulSet.Items[i]) {", + "\t\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all statefulsets in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in statefulset %q found in ns %q without label\", statefulSet.Items[i].Name, ns)", + "\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\tlog.Info(\"StatefulSet %s found in ns=%s\", statefulSet.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allStatefulSets) == 0 {", + "\t\tlog.Warn(\"Did not find any statefulset in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allStatefulSets", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findSubscriptions", + "kind": "function", + "source": [ + "func findSubscriptions(olmClient v1alpha1.OperatorsV1alpha1Interface, namespaces []string) []olmv1Alpha.Subscription {", + "\tsubscriptions := []olmv1Alpha.Subscription{}", + "\tfor _, ns := range namespaces {", + "\t\tdisplayNs := ns", + "\t\tif ns == \"\" {", + "\t\t\tdisplayNs = \"All Namespaces\"", + "\t\t}", + "\t\tlog.Debug(\"Searching subscriptions in namespace %q\", displayNs)", + "\t\tsubscription, err := olmClient.Subscriptions(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing subscriptions in namespace %q\", ns)", + "\t\t\tcontinue", + "\t\t}", + "\t\tsubscriptions = append(subscriptions, subscription.Items...)", + "\t}", + "", + "\tfor i := range subscriptions {", + "\t\tlog.Info(\"Found subscription %q (ns %q)\", subscriptions[i].Name, subscriptions[i].Namespace)", + "\t}", + "\treturn subscriptions", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getAllCatalogSources", + "kind": "function", + "source": [ + "func getAllCatalogSources(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*olmv1Alpha.CatalogSource) {", + "\tcatalogSourcesList, err := olmClient.CatalogSources(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Unable get CatalogSources in cluster, err: %v\", err)", + "\t\treturn out", + "\t}", + "\tfor index := range catalogSourcesList.Items {", + "\t\tout = append(out, \u0026catalogSourcesList.Items[index])", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getAllInstallPlans", + "kind": "function", + "source": [ + "func getAllInstallPlans(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*olmv1Alpha.InstallPlan) {", + "\tinstallPlanList, err := olmClient.InstallPlans(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Unable get installplans in cluster, err: %v\", err)", + "\t\treturn out", + "\t}", + "\tfor index := range installPlanList.Items {", + "\t\tout = append(out, \u0026installPlanList.Items[index])", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getAllPackageManifests", + "kind": "function", + "source": [ + "func getAllPackageManifests(olmPkgClient olmpkgclient.PackageManifestInterface) (out []*olmpkgv1.PackageManifest) {", + "\tpackageManifestsList, err := olmPkgClient.List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Unable get Package Manifests in cluster, err: %v\", err)", + "\t\treturn out", + "\t}", + "\tfor index := range packageManifestsList.Items {", + "\t\tout = append(out, \u0026packageManifestsList.Items[index])", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getAllStorageClasses", + "kind": "function", + "source": [ + "func getAllStorageClasses(client storagev1typed.StorageV1Interface) ([]storagev1.StorageClass, error) {", + "\tstorageclasslist, err := client.StorageClasses().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Error when listing storage classes, err: %v\", err)", + "\t\treturn nil, err", + "\t}", + "\treturn storageclasslist.Items, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getClusterRoleBindings", + "kind": "function", + "source": [ + "func getClusterRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.ClusterRoleBinding, error) {", + "\t// Get all of the clusterrolebindings from the cluster", + "\t// These are not namespaced so we want all of them", + "\tcrbList, crbErr := client.ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{})", + "\tif crbErr != nil {", + "\t\tlog.Error(\"Executing clusterrolebinding command failed with error: %v\", crbErr)", + "\t\treturn nil, crbErr", + "\t}", + "\treturn crbList.Items, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getRoleBindings", + "kind": "function", + "source": [ + "func getRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.RoleBinding, error) {", + "\t// Get all of the rolebindings from all namespaces", + "\troleList, roleErr := client.RoleBindings(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif roleErr != nil {", + "\t\tlog.Error(\"Executing rolebinding command failed with error: %v\", roleErr)", + "\t\treturn nil, roleErr", + "\t}", + "\treturn roleList.Items, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getRoles", + "kind": "function", + "source": [ + "func getRoles(client rbacv1typed.RbacV1Interface) ([]rbacv1.Role, error) {", + "\t// Get all of the roles from all namespaces", + "\troleList, roleErr := client.Roles(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif roleErr != nil {", + "\t\tlog.Error(\"Executing roles command failed with error: %v\", roleErr)", + "\t\treturn nil, roleErr", + "\t}", + "\treturn roleList.Items, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "isIstioServiceMeshInstalled", + "kind": "function", + "source": [ + "func isIstioServiceMeshInstalled(appClient appv1client.AppsV1Interface, allNs []string) bool {", + "\t// The Istio namespace must be present", + "\tif !stringhelper.StringInSlice(allNs, istioNamespace, false) {", + "\t\tlog.Info(\"Istio Service Mesh not present (the namespace %q does not exists)\", istioNamespace)", + "\t\treturn false", + "\t}", + "", + "\t// The Deployment \"istiod\" must be present in an active service mesh", + "\t_, err := appClient.Deployments(istioNamespace).Get(context.TODO(), istioDeploymentName, metav1.GetOptions{})", + "\tif errors.IsNotFound(err) {", + "\t\tlog.Warn(\"The Istio Deployment %q is missing (but the Istio namespace exists)\", istioDeploymentName)", + "\t\treturn false", + "\t} else if err != nil {", + "\t\tlog.Error(\"Failed getting Deployment %q\", istioDeploymentName)", + "\t\treturn false", + "\t}", + "", + "\tlog.Info(\"Istio Service Mesh detected\")", + "", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "RunChecks", + "kind": "function", + "source": [ + "func RunChecks(timeout time.Duration) (failedCtr int, err error) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\t// Timeout channel", + "\ttimeOutChan := time.After(timeout)", + "\t// SIGINT(ctrl+c)/SIGTERM capture channel.", + "\tconst SIGINTBufferLen = 10", + "\tsigIntChan := make(chan os.Signal, SIGINTBufferLen)", + "\tsignal.Notify(sigIntChan, syscall.SIGINT, syscall.SIGTERM)", + "\t// turn off ctrl-c capture on exit", + "\tdefer signal.Stop(sigIntChan)", + "", + "\tabort := false", + "\tvar abortReason string", + "\tvar errs []error", + "\tfor _, group := range dbByGroup {", + "\t\tif abort {", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t\tgroup.RecordChecksResults()", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Stop channel, so we can send a stop signal to group.RunChecks()", + "\t\tstopChan := make(chan bool, 1)", + "\t\tabortChan := make(chan string, 1)", + "", + "\t\t// Done channel for the goroutine that runs group.RunChecks().", + "\t\tgroupDone := make(chan bool)", + "\t\tgo func() {", + "\t\t\tchecks, failedCheckCtr := group.RunChecks(stopChan, abortChan)", + "\t\t\tfailedCtr += failedCheckCtr", + "\t\t\terrs = append(errs, checks...)", + "\t\t\tgroupDone \u003c- true", + "\t\t}()", + "", + "\t\tselect {", + "\t\tcase \u003c-groupDone:", + "\t\t\tlog.Debug(\"Group %s finished running checks.\", group.name)", + "\t\tcase abortReason = \u003c-abortChan:", + "\t\t\tlog.Warn(\"Group %s aborted.\", group.name)", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-timeOutChan:", + "\t\t\tlog.Warn(\"Running all checks timed-out.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"global time-out\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-sigIntChan:", + "\t\t\tlog.Warn(\"SIGINT/SIGTERM received.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"SIGINT/SIGTERM\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t}", + "", + "\t\tgroup.RecordChecksResults()", + "\t}", + "", + "\t// Print the results in the CLI", + "\tcli.PrintResultsTable(getResultsSummary())", + "\tprintFailedChecksLog()", + "", + "\tif len(errs) \u003e 0 {", + "\t\tlog.Error(\"RunChecks errors: %v\", errs)", + "\t\treturn 0, fmt.Errorf(\"%d errors found in checks/groups\", len(errs))", + "\t}", + "", + "\treturn failedCtr, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runAfterAllFn", + "kind": "function", + "source": [ + "func runAfterAllFn(group *ChecksGroup, checks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running afterAll\", group.name)", + "", + "\tif group.afterAllFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tlastCheck := checks[len(checks)-1]", + "\tzeroRemainingChecks := []*Check{}", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running afterAll function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"afterAll function panicked\", \"\\n: \"+stackTrace, group, lastCheck, zeroRemainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.afterAllFn(group.checks); err != nil {", + "\t\tlog.Error(\"Unexpected error while running afterAll function: %v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"afterAll function unexpected error\", err.Error(), group, lastCheck, zeroRemainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runAfterEachFn", + "kind": "function", + "source": [ + "func runAfterEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running afterEach for check %s\", group.name, check.ID)", + "", + "\tif group.afterEachFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running afterEach function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"afterEach function panicked\", \"\\n: \"+stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.afterEachFn(check); err != nil {", + "\t\tlog.Error(\"Unexpected error while running afterEach function:\\n%v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"afterEach function unexpected error\", err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runBeforeAllFn", + "kind": "function", + "source": [ + "func runBeforeAllFn(group *ChecksGroup, checks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running beforeAll\", group.name)", + "\tif group.beforeAllFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tfirstCheck := checks[0]", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running beforeAll function:\\n%v\", stackTrace)", + "\t\t\t// Set first check's result as error and skip the remaining ones.", + "\t\t\terr = onFailure(\"beforeAll function panicked\", \"\\n:\"+stackTrace, group, firstCheck, checks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.beforeAllFn(checks); err != nil {", + "\t\tlog.Error(\"Unexpected error while running beforeAll function: %v\", err)", + "\t\t// Set first check's result as error and skip the remaining ones.", + "\t\treturn onFailure(\"beforeAll function unexpected error\", err.Error(), group, firstCheck, checks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runBeforeEachFn", + "kind": "function", + "source": [ + "func runBeforeEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running beforeEach for check %s\", group.name, check.ID)", + "\tif group.beforeEachFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running beforeEach function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"beforeEach function panicked\", \"\\n: \"+stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.beforeEachFn(check); err != nil {", + "\t\tlog.Error(\"Unexpected error while running beforeEach function:\\n%v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"beforeEach function unexpected error\", err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "GetConfigurationFromClaimFile", + "kind": "function", + "source": [ + "func GetConfigurationFromClaimFile(claimFileName string) (env *provider.TestEnvironment, err error) {", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn env, err", + "\t}", + "\tvar aRoot claim.Root", + "\tfmt.Printf(\"%s\", data)", + "\tUnmarshalClaim(data, \u0026aRoot)", + "\tconfigJSON, err := j.Marshal(aRoot.Claim.Configurations)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot convert config to json\")", + "\t}", + "\terr = j.Unmarshal(configJSON, \u0026env)", + "\treturn env, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "MarshalConfigurations", + "kind": "function", + "source": [ + "func MarshalConfigurations(env *provider.TestEnvironment) (configurations []byte, err error) {", + "\tconfig := env", + "\tif config == nil {", + "\t\t*config = provider.GetTestEnvironment()", + "\t}", + "\tconfigurations, err = j.Marshal(\u0026config)", + "\tif err != nil {", + "\t\tlog.Error(\"Error converting configurations to JSON: %v\", err)", + "\t\treturn configurations, err", + "\t}", + "\treturn configurations, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "ReadClaimFile", + "kind": "function", + "source": [ + "func ReadClaimFile(claimFileName string) (data []byte, err error) {", + "\tdata, err = os.ReadFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadFile failed with err: %v\", err)", + "\t}", + "\tlog.Info(\"Reading claim file at path: %s\", claimFileName)", + "\treturn data, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "SanitizeClaimFile", + "kind": "function", + "source": [ + "func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error) {", + "\tlog.Info(\"Sanitizing claim file %s\", claimFileName)", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tvar aRoot claim.Root", + "\tUnmarshalClaim(data, \u0026aRoot)", + "", + "\t// Remove the results that do not match the labels filter", + "\tfor testID := range aRoot.Claim.Results {", + "\t\tevaluator, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to create labels expression evaluator: %v\", err)", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\t_, gatheredLabels := identifiers.GetTestIDAndLabels(*aRoot.Claim.Results[testID].TestID)", + "", + "\t\tif !evaluator.Eval(gatheredLabels) {", + "\t\t\tlog.Info(\"Removing test ID: %s from the claim\", testID)", + "\t\t\tdelete(aRoot.Claim.Results, testID)", + "\t\t}", + "\t}", + "", + "\tWriteClaimOutput(claimFileName, MarshalClaimOutput(\u0026aRoot))", + "\treturn claimFileName, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "populateXMLFromClaim", + "kind": "function", + "source": [ + "func populateXMLFromClaim(c claim.Claim, startTime, endTime time.Time) TestSuitesXML {", + "\tconst (", + "\t\tTestSuiteName = \"CNF Certification Test Suite\"", + "\t)", + "", + "\t// Collector all of the Test IDs", + "\tallTestIDs := []string{}", + "\tfor testID := range c.Results {", + "\t\tallTestIDs = append(allTestIDs, c.Results[testID].TestID.Id)", + "\t}", + "", + "\t// Sort the test IDs", + "\tsort.Strings(allTestIDs)", + "", + "\txmlOutput := TestSuitesXML{}", + "\t// \u003ctestsuites\u003e", + "\txmlOutput.Tests = strconv.Itoa(len(c.Results))", + "", + "\t// Count all of the failed tests in the suite", + "\tfailedTests := 0", + "\tfor testID := range c.Results {", + "\t\tif c.Results[testID].State == TestStateFailed {", + "\t\t\tfailedTests++", + "\t\t}", + "\t}", + "", + "\t// Count all of the skipped tests in the suite", + "\tskippedTests := 0", + "\tfor testID := range c.Results {", + "\t\tif c.Results[testID].State == TestStateSkipped {", + "\t\t\tskippedTests++", + "\t\t}", + "\t}", + "", + "\txmlOutput.Failures = strconv.Itoa(failedTests)", + "\txmlOutput.Disabled = strconv.Itoa(skippedTests)", + "\txmlOutput.Errors = strconv.Itoa(0)", + "\txmlOutput.Time = strconv.FormatFloat(endTime.Sub(startTime).Seconds(), 'f', 5, 64)", + "", + "\t// \u003ctestsuite\u003e", + "\txmlOutput.Testsuite.Name = TestSuiteName", + "\txmlOutput.Testsuite.Tests = strconv.Itoa(len(c.Results))", + "\t// Counters for failed and skipped tests", + "\txmlOutput.Testsuite.Failures = strconv.Itoa(failedTests)", + "\txmlOutput.Testsuite.Skipped = strconv.Itoa(skippedTests)", + "\txmlOutput.Testsuite.Errors = strconv.Itoa(0)", + "", + "\txmlOutput.Testsuite.Time = strconv.FormatFloat(endTime.Sub(startTime).Seconds(), 'f', 5, 64)", + "\txmlOutput.Testsuite.Timestamp = time.Now().UTC().Format(DateTimeFormatDirective)", + "", + "\t// \u003cproperties\u003e", + "", + "\t// \u003ctestcase\u003e", + "\t// Loop through all of the sorted test IDs", + "\tfor _, testID := range allTestIDs {", + "\t\ttestCase := TestCase{}", + "\t\ttestCase.Name = testID", + "\t\ttestCase.Classname = TestSuiteName", + "\t\ttestCase.Status = c.Results[testID].State", + "", + "\t\t// Clean the time strings to remove the \" m=\" suffix", + "\t\tstart, err := time.Parse(DateTimeFormatDirective, strings.Split(c.Results[testID].StartTime, \" m=\")[0])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse start time: %v\", err)", + "\t\t}", + "\t\tend, err := time.Parse(DateTimeFormatDirective, strings.Split(c.Results[testID].EndTime, \" m=\")[0])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse end time: %v\", err)", + "\t\t}", + "", + "\t\t// Calculate the duration of the test case", + "\t\tdifference := end.Sub(start)", + "\t\ttestCase.Time = strconv.FormatFloat(difference.Seconds(), 'f', 10, 64)", + "", + "\t\t// Populate the skipped message if the test case was skipped", + "\t\tif testCase.Status == TestStateSkipped {", + "\t\t\ttestCase.Skipped = \u0026SkippedMessage{}", + "\t\t\ttestCase.Skipped.Text = c.Results[testID].SkipReason", + "\t\t} else {", + "\t\t\ttestCase.Skipped = nil", + "\t\t}", + "", + "\t\t// Populate the failure message if the test case failed", + "\t\tif testCase.Status == TestStateFailed {", + "\t\t\ttestCase.Failure = \u0026FailureMessage{}", + "\t\t\ttestCase.Failure.Text = c.Results[testID].CheckDetails", + "\t\t} else {", + "\t\t\ttestCase.Failure = nil", + "\t\t}", + "", + "\t\t// Append the test case to the test suite", + "\t\txmlOutput.Testsuite.Testcase = append(xmlOutput.Testsuite.Testcase, testCase)", + "\t}", + "", + "\treturn xmlOutput", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "name": "IsRHCOSCompatible", + "kind": "function", + "source": [ + "func IsRHCOSCompatible(machineVersion, ocpVersion string) bool {", + "\tif machineVersion == \"\" || ocpVersion == \"\" {", + "\t\treturn false", + "\t}", + "", + "\t// Exception for beta versions", + "\tif BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion) {", + "\t\treturn true", + "\t}", + "", + "\t// Split the incoming version on the \".\" and make sure we are only looking at major.minor.", + "\tocpVersion = FindMajorMinor(ocpVersion)", + "", + "\tlifecycleInfo := GetLifeCycleDates()", + "\tif entry, ok := lifecycleInfo[ocpVersion]; ok {", + "\t\t// Collect the machine version and the entry version", + "\t\tmv, err := gv.NewVersion(machineVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error parsing machineVersion: %s err: %v\", machineVersion, err)", + "\t\t\treturn false", + "\t\t}", + "\t\tev, err := gv.NewVersion(entry.MinRHCOSVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error parsing MinRHCOSVersion: %s err: %v\", entry.MinRHCOSVersion, err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// If the machine version \u003e= the entry version", + "\t\treturn mv.GreaterThanOrEqual(ev)", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetCniPlugins", + "kind": "function", + "source": [ + "func GetCniPlugins() (out map[string][]interface{}) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string][]interface{})", + "\tfor _, probePod := range env.ProbePods {", + "\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, cniPluginsCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cniPluginsCommand, probePod.String())", + "\t\t\tcontinue", + "\t\t}", + "\t\tdecoded := []interface{}{}", + "\t\terr = json.Unmarshal([]byte(outStr), \u0026decoded)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not decode json file because of: %s\", err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = decoded", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetCsiDriver", + "kind": "function", + "source": [ + "func GetCsiDriver() (out map[string]interface{}) {", + "\to := clientsholder.GetClientsHolder()", + "\tcsiDriver, err := o.K8sClient.StorageV1().CSIDrivers().List(context.TODO(), apimachineryv1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Fail CSIDrivers.list err:%s\", err)", + "\t\treturn out", + "\t}", + "\tscheme := runtime.NewScheme()", + "\terr = storagev1.AddToScheme(scheme)", + "\tif err != nil {", + "\t\tlog.Error(\"Fail AddToScheme err:%s\", err)", + "\t\treturn out", + "\t}", + "\tcodec := serializer.NewCodecFactory(scheme).LegacyCodec(storagev1.SchemeGroupVersion)", + "\tdata, err := runtime.Encode(codec, csiDriver)", + "\tif err != nil {", + "\t\tlog.Error(\"Fail to encode Nodes to json, er: %s\", err)", + "\t\treturn out", + "\t}", + "", + "\terr = json.Unmarshal(data, \u0026out)", + "\tif err != nil {", + "\t\tlog.Error(\"failed to marshall nodes json, err: %v\", err)", + "\t\treturn out", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetHwInfoAllNodes", + "kind": "function", + "source": [ + "func GetHwInfoAllNodes() (out map[string]NodeHwInfo) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string]NodeHwInfo)", + "\tfor _, probePod := range env.ProbePods {", + "\t\thw := NodeHwInfo{}", + "\t\tlscpu, err := getHWJsonOutput(probePod, o, lscpuCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lscpu for node %s\", probePod.Spec.NodeName)", + "\t\t} else {", + "\t\t\tvar ok bool", + "\t\t\ttemp, ok := lscpu.(map[string]interface{})", + "\t\t\tif !ok {", + "\t\t\t\tlog.Error(\"problem casting lscpu field for node %s, lscpu=%v\", probePod.Spec.NodeName, lscpu)", + "\t\t\t} else {", + "\t\t\t\thw.Lscpu = temp[\"lscpu\"]", + "\t\t\t}", + "\t\t}", + "\t\thw.IPconfig, err = getHWJsonOutput(probePod, o, ipCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting ip config for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lsblk, err = getHWJsonOutput(probePod, o, lsblkCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lsblk for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lspci, err = getHWTextOutput(probePod, o, lspciCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lspci for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = hw", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetNodeJSON", + "kind": "function", + "source": [ + "func GetNodeJSON() (out map[string]interface{}) {", + "\tenv := provider.GetTestEnvironment()", + "", + "\tnodesJSON, err := json.Marshal(env.Nodes)", + "\tif err != nil {", + "\t\tlog.Error(\"Could not Marshall env.Nodes, err=%v\", err)", + "\t}", + "", + "\terr = json.Unmarshal(nodesJSON, \u0026out)", + "\tif err != nil {", + "\t\tlog.Error(\"Could not unMarshall env.Nodes, err=%v\", err)", + "\t}", + "", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/labels", + "name": "labelsExprParser.Eval", + "kind": "function", + "source": [ + "func (exprParser labelsExprParser) Eval(labels []string) bool {", + "\t// Define a map for fast name/ident checking when visiting nodes.", + "\tlabelsMap := make(map[string]bool)", + "\tfor _, label := range labels {", + "\t\tlabelsMap[strings.ReplaceAll(label, \"-\", \"_\")] = true", + "\t}", + "", + "\t// Visit function to walk the labels expression's AST.", + "\tvar visit func(e ast.Expr) bool", + "\tvisit = func(e ast.Expr) bool {", + "\t\tswitch v := e.(type) {", + "\t\tcase *ast.Ident:", + "\t\t\t// If the expression is an identifier, check if it exists in the wordMap.", + "\t\t\tif _, ok := labelsMap[v.Name]; !ok {", + "\t\t\t\treturn false", + "\t\t\t}", + "\t\t\treturn true", + "\t\tcase *ast.ParenExpr:", + "\t\t\treturn visit(v.X)", + "\t\tcase *ast.UnaryExpr:", + "\t\t\tif v.Op == token.NOT {", + "\t\t\t\treturn !visit(v.X)", + "\t\t\t}", + "\t\tcase *ast.BinaryExpr:", + "\t\t\t// If the expression is a binary expression, evaluate both operands.", + "\t\t\tleft := visit(v.X)", + "\t\t\tright := visit(v.Y)", + "\t\t\tswitch v.Op {", + "\t\t\tcase token.LAND:", + "\t\t\t\treturn left \u0026\u0026 right", + "\t\t\tcase token.LOR:", + "\t\t\t\treturn left || right", + "\t\t\tdefault:", + "\t\t\t\treturn false", + "\t\t\t}", + "\t\tdefault:", + "\t\t\tlog.Error(\"Unexpected/not-implemented expr: %v\", v)", + "\t\t\treturn false", + "\t\t}", + "\t\treturn false", + "\t}", + "", + "\treturn visit(exprParser.astRootNode)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func GetCatalogSourceBundleCount(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// Now that we know the catalog source, we are going to count up all of the relatedImages", + "\t// that are associated with the catalog source. This will give us the number of bundles that", + "\t// are available in the catalog source.", + "", + "\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count", + "\tconst (", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn 0", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\treturn getCatalogSourceBundleCountFromProbeContainer(env, cs)", + "\t\t}", + "", + "\t\t// If we didn't find the bundle count via the probe container, we can attempt to use the package manifests", + "\t}", + "", + "\t// If we didn't find the bundle count via the probe container, we can use the package manifests", + "\t// to get the bundle count", + "\treturn getCatalogSourceBundleCountFromPackageManifests(env, cs)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "NewPod", + "kind": "function", + "source": [ + "func NewPod(aPod *corev1.Pod) (out Pod) {", + "\tvar err error", + "\tout.Pod = aPod", + "\tout.MultusNetworkInterfaces = make(map[string]CniNetworkInterface)", + "\tannotations := aPod.GetAnnotations()", + "\tnetStatus, exists := annotations[CniNetworksStatusKey]", + "\tif !exists || strings.TrimSpace(netStatus) == \"\" {", + "\t\t// Be graceful: log which annotations are present when the expected one is missing/empty", + "\t\tkeys := make([]string, 0, len(annotations))", + "\t\tfor k := range annotations {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "\t\tlog.Info(\"Pod %q (namespace %q) missing or empty annotation %q. Present annotations: %v\", aPod.Name, aPod.Namespace, CniNetworksStatusKey, keys)", + "\t} else {", + "\t\tout.MultusNetworkInterfaces, err = GetPodIPsPerNet(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get IPs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "", + "\t\tout.MultusPCIs, err = GetPciPerPod(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get PCIs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "\t}", + "", + "\tif _, ok := aPod.GetLabels()[skipConnectivityTestsLabel]; ok {", + "\t\tout.SkipNetTests = true", + "\t}", + "\tif _, ok := aPod.GetLabels()[skipMultusConnectivityTestsLabel]; ok {", + "\t\tout.SkipMultusNetTests = true", + "\t}", + "\tout.Containers = append(out.Containers, getPodContainers(aPod, false)...)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createNodes", + "kind": "function", + "source": [ + "func createNodes(nodes []corev1.Node) map[string]Node {", + "\twrapperNodes := map[string]Node{}", + "", + "\t// machineConfigs is a helper map to avoid download \u0026 process the same mc twice.", + "\tmachineConfigs := map[string]MachineConfig{}", + "\tfor i := range nodes {", + "\t\tnode := \u0026nodes[i]", + "", + "\t\tif !IsOCPCluster() {", + "\t\t\t// Avoid getting Mc info for non ocp clusters.", + "\t\t\twrapperNodes[node.Name] = Node{Data: node}", + "\t\t\tlog.Warn(\"Non-OCP cluster detected. MachineConfig retrieval for node %q skipped.\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Get Node's machineConfig name", + "\t\tmcName, exists := node.Annotations[\"machineconfiguration.openshift.io/currentConfig\"]", + "\t\tif !exists {", + "\t\t\tlog.Error(\"Failed to get machineConfig name for node %q\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "\t\tlog.Info(\"Node %q - mc name %q\", node.Name, mcName)", + "\t\tmc, err := getMachineConfig(mcName, machineConfigs)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get machineConfig %q, err: %v\", mcName, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\twrapperNodes[node.Name] = Node{", + "\t\t\tData: node,", + "\t\t\tMc: mc,", + "\t\t}", + "\t}", + "", + "\treturn wrapperNodes", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createOperators", + "kind": "function", + "source": [ + "func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion,", + "\tallSubscriptions []olmv1Alpha.Subscription,", + "\tallPackageManifests []*olmpkgv1.PackageManifest,", + "\tallInstallPlans []*olmv1Alpha.InstallPlan,", + "\tallCatalogSources []*olmv1Alpha.CatalogSource,", + "\tsucceededRequired,", + "\tkeepCsvDetails bool) []*Operator {", + "\tconst (", + "\t\tmaxSize = 2", + "\t)", + "", + "\toperators := []*Operator{}", + "", + "\t// Make map with unique csv names to original index in the env.Csvs slice.", + "\t// Otherwise, cluster-wide operators info will be repeated unnecessarily.", + "\tuniqueCsvs := getUniqueCsvListByName(csvs)", + "", + "\tfor _, csv := range uniqueCsvs {", + "\t\t// Skip CSVs that are not in the Succeeded phase if the flag is set.", + "\t\tif csv.Status.Phase != olmv1Alpha.CSVPhaseSucceeded \u0026\u0026 succeededRequired {", + "\t\t\tcontinue", + "\t\t}", + "\t\top := \u0026Operator{Name: csv.Name, Namespace: csv.Namespace}", + "\t\tif keepCsvDetails {", + "\t\t\top.Csv = csv", + "\t\t}", + "\t\top.Phase = csv.Status.Phase", + "\t\tpackageAndVersion := strings.SplitN(csv.Name, \".\", maxSize)", + "\t\tif len(packageAndVersion) == 0 {", + "\t\t\tlog.Debug(\"Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v\", csv)", + "\t\t\tcontinue", + "\t\t}", + "\t\top.PackageFromCsvName = packageAndVersion[0]", + "\t\top.Version = csv.Spec.Version.String()", + "\t\t// Get at least one subscription and update the Operator object with it.", + "\t\tif getAtLeastOneSubscription(op, csv, allSubscriptions, allPackageManifests) {", + "\t\t\ttargetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to get target namespaces for operator %s: %v\", csv.Name, err)", + "\t\t\t} else {", + "\t\t\t\top.TargetNamespaces = targetNamespaces", + "\t\t\t\top.IsClusterWide = len(targetNamespaces) == 0", + "\t\t\t}", + "\t\t} else {", + "\t\t\tlog.Warn(\"Subscription not found for CSV: %s (ns %s)\", csv.Name, csv.Namespace)", + "\t\t}", + "\t\tlog.Info(\"Getting installplans for op %s (subs %s ns %s)\", op.Name, op.SubscriptionName, op.SubscriptionNamespace)", + "\t\t// Get at least one Install Plan and update the Operator object with it.", + "\t\tgetAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources)", + "\t\toperators = append(operators, op)", + "\t}", + "\treturn operators", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "filterDPDKRunningPods", + "kind": "function", + "source": [ + "func filterDPDKRunningPods(pods []*Pod) []*Pod {", + "\tvar filteredPods []*Pod", + "\tconst (", + "\t\tdpdkDriver = \"vfio-pci\"", + "\t\tfindDeviceSubCommand = \"find /sys -name\"", + "\t)", + "\to := clientsholder.GetClientsHolder()", + "\tfor _, pod := range pods {", + "\t\tif len(pod.MultusPCIs) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tctx := clientsholder.NewContext(pod.Namespace, pod.Name, pod.Spec.Containers[0].Name)", + "\t\tfindCommand := fmt.Sprintf(\"%s '%s'\", findDeviceSubCommand, pod.MultusPCIs[0])", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, findCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tlog.Error(\"Failed to execute command %s in probe %s, errStr: %s, err: %v\", findCommand, pod.String(), errStr, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif strings.Contains(outStr, dpdkDriver) {", + "\t\t\tfilteredPods = append(filteredPods, pod)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getAtLeastOneSubscription", + "kind": "function", + "source": [ + "func getAtLeastOneSubscription(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, subscriptions []olmv1Alpha.Subscription, packageManifests []*olmpkgv1.PackageManifest) (atLeastOneSubscription bool) {", + "\tatLeastOneSubscription = false", + "\tfor s := range subscriptions {", + "\t\tsubscription := \u0026subscriptions[s]", + "\t\tif subscription.Status.InstalledCSV != csv.Name {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\top.SubscriptionName = subscription.Name", + "\t\top.SubscriptionNamespace = subscription.Namespace", + "\t\top.Package = subscription.Spec.Package", + "\t\top.Org = subscription.Spec.CatalogSource", + "\t\top.Channel = subscription.Spec.Channel", + "\t\tatLeastOneSubscription = true", + "", + "\t\t// If the channel is not present in the subscription, get the default channel from the package manifest", + "\t\tif op.Channel == \"\" {", + "\t\t\taPackageManifest := getPackageManifestWithSubscription(subscription, packageManifests)", + "\t\t\tif aPackageManifest != nil {", + "\t\t\t\top.Channel = aPackageManifest.Status.DefaultChannel", + "\t\t\t} else {", + "\t\t\t\tlog.Error(\"Could not determine the default channel, this operator will always fail certification\")", + "\t\t\t}", + "\t\t}", + "\t\tbreak", + "\t}", + "\treturn atLeastOneSubscription", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getCatalogSourceBundleCountFromProbeContainer", + "kind": "function", + "source": [ + "func getCatalogSourceBundleCountFromProbeContainer(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// We need to use the probe container to get the bundle count", + "\t// This is because the package manifests are not available in the cluster", + "\t// for OCP versions \u003c= 4.12", + "\to := clientsholder.GetClientsHolder()", + "", + "\t// Find the kubernetes service associated with the catalog source", + "\tfor _, svc := range env.AllServices {", + "\t\t// Skip if the service is not associated with the catalog source", + "\t\tif svc.Spec.Selector[\"olm.catalogSource\"] != cs.Name {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tlog.Info(\"Found service %q associated with catalog source %q.\", svc.Name, cs.Name)", + "", + "\t\t// Use a probe pod to get the bundle count", + "\t\tfor _, probePod := range env.ProbePods {", + "\t\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\t\tcmd := \"grpcurl -plaintext \" + svc.Spec.ClusterIP + \":50051 api.Registry.ListBundles | jq -s 'length'\"", + "\t\t\tcmdValue, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\t\t\tif err != nil || errStr != \"\" {", + "\t\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cmd, probePod.String())", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Sanitize the command output", + "\t\t\tcmdValue = strings.TrimSpace(cmdValue)", + "\t\t\tcmdValue = strings.Trim(cmdValue, \"\\\"\")", + "", + "\t\t\t// Parse the command output", + "\t\t\tbundleCount, err := strconv.Atoi(cmdValue)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to convert bundle count to integer: %s\", cmdValue)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Try each probe pod until we get a valid bundle count (which should only be 1 probe pod)", + "\t\t\tlog.Info(\"Found bundle count via grpcurl %d for catalog source %q.\", bundleCount, cs.Name)", + "\t\t\treturn bundleCount", + "\t\t}", + "\t}", + "", + "\tlog.Warn(\"Warning: No services found associated with catalog source %q.\", cs.Name)", + "\treturn -1", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling", + "name": "parseSchedulingPolicyAndPriority", + "kind": "function", + "source": [ + "func parseSchedulingPolicyAndPriority(chrtCommandOutput string) (schedPolicy string, schedPriority int, err error) {", + "\t/*\tSample output:", + "\t\tpid 476's current scheduling policy: SCHED_OTHER", + "\t\tpid 476's current scheduling priority: 0*/", + "", + "\tlines := strings.Split(chrtCommandOutput, newLineCharacter)", + "", + "\tfor _, line := range lines {", + "\t\tif line == \"\" {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\ttokens := strings.Fields(line)", + "\t\tlastToken := tokens[len(tokens)-1]", + "", + "\t\tswitch {", + "\t\tcase strings.Contains(line, CurrentSchedulingPolicy):", + "\t\t\tschedPolicy = lastToken", + "\t\tcase strings.Contains(line, CurrentSchedulingPriority):", + "\t\t\tschedPriority, err = strconv.Atoi(lastToken)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error obtained during strconv %v\", err)", + "\t\t\t\treturn schedPolicy, InvalidPriority, err", + "\t\t\t}", + "\t\tdefault:", + "\t\t\treturn schedPolicy, InvalidPriority, fmt.Errorf(\"invalid: %s\", line)", + "\t\t}", + "\t}", + "\treturn schedPolicy, schedPriority, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/namespace", + "name": "getCrsPerNamespaces", + "kind": "function", + "source": [ + "func getCrsPerNamespaces(aCrd *apiextv1.CustomResourceDefinition) (crdNamespaces map[string][]string, err error) {", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, version := range aCrd.Spec.Versions {", + "\t\tgvr := schema.GroupVersionResource{", + "\t\t\tGroup: aCrd.Spec.Group,", + "\t\t\tVersion: version.Name,", + "\t\t\tResource: aCrd.Spec.Names.Plural,", + "\t\t}", + "\t\tlog.Debug(\"Looking for CRs from CRD: %s api version:%s group:%s plural:%s\", aCrd.Name, version.Name, aCrd.Spec.Group, aCrd.Spec.Names.Plural)", + "\t\tcrs, err := oc.DynamicClient.Resource(gvr).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error getting %s: %v\\n\", aCrd.Name, err)", + "\t\t\treturn crdNamespaces, err", + "\t\t}", + "\t\tcrdNamespaces = make(map[string][]string)", + "\t\tfor _, cr := range crs.Items {", + "\t\t\tname := cr.Object[\"metadata\"].(map[string]interface{})[\"name\"]", + "\t\t\tnamespace := cr.Object[\"metadata\"].(map[string]interface{})[\"namespace\"]", + "\t\t\tvar namespaceStr, nameStr string", + "\t\t\tif namespace == nil {", + "\t\t\t\tnamespaceStr = \"\"", + "\t\t\t} else {", + "\t\t\t\tnamespaceStr = fmt.Sprintf(\"%s\", namespace)", + "\t\t\t}", + "\t\t\tif name == nil {", + "\t\t\t\tnameStr = \"\"", + "\t\t\t} else {", + "\t\t\t\tnameStr = fmt.Sprintf(\"%s\", name)", + "\t\t\t}", + "\t\t\tcrdNamespaces[namespaceStr] = append(crdNamespaces[namespaceStr], nameStr)", + "\t\t}", + "\t}", + "\treturn crdNamespaces, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "CordonHelper", + "kind": "function", + "source": [ + "func CordonHelper(name, operation string) error {", + "\tclients := clientsholder.GetClientsHolder()", + "", + "\tlog.Info(\"Performing %s operation on node %s\", operation, name)", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Fetch node object", + "\t\tnode, err := clients.K8sClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t\tswitch operation {", + "\t\tcase Cordon:", + "\t\t\tnode.Spec.Unschedulable = true", + "\t\tcase Uncordon:", + "\t\t\tnode.Spec.Unschedulable = false", + "\t\tdefault:", + "\t\t\treturn fmt.Errorf(\"cordonHelper: Unsupported operation:%s\", operation)", + "\t\t}", + "\t\t// Update the node", + "\t\t_, err = clients.K8sClient.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})", + "\t\treturn err", + "\t})", + "\tif retryErr != nil {", + "\t\tlog.Error(\"can not %s node: %s, err=%v\", operation, name, retryErr)", + "\t}", + "\treturn retryErr", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "CountPodsWithDelete", + "kind": "function", + "source": [ + "func CountPodsWithDelete(pods []*provider.Pod, nodeName, mode string) (count int, err error) {", + "\tcount = 0", + "\tvar wg sync.WaitGroup", + "\tfor _, put := range pods {", + "\t\t_, isDeployment := put.Labels[\"pod-template-hash\"]", + "\t\t_, isStatefulset := put.Labels[\"controller-revision-hash\"]", + "\t\tif put.Spec.NodeName == nodeName \u0026\u0026", + "\t\t\t(isDeployment || isStatefulset) {", + "\t\t\tif skipDaemonPod(put.Pod) {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tcount++", + "\t\t\tif mode == NoDelete {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\terr := deletePod(put.Pod, mode, \u0026wg)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error deleting %s\", put)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\twg.Wait()", + "\treturn count, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "deletePod", + "kind": "function", + "source": [ + "func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error {", + "\tclients := clientsholder.GetClientsHolder()", + "\tlog.Debug(\"deleting ns=%s pod=%s with %s mode\", pod.Namespace, pod.Name, mode)", + "\tgracePeriodSeconds := *pod.Spec.TerminationGracePeriodSeconds", + "\t// Create watcher before deleting pod", + "\twatcher, err := clients.K8sClient.CoreV1().Pods(pod.Namespace).Watch(context.TODO(), metav1.ListOptions{", + "\t\tFieldSelector: \"metadata.name=\" + pod.Name + \",metadata.namespace=\" + pod.Namespace,", + "\t})", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"waitPodDeleted ns=%s pod=%s, err=%s\", pod.Namespace, pod.Name, err)", + "\t}", + "\t// Actually deleting pod", + "\terr = clients.K8sClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{", + "\t\tGracePeriodSeconds: \u0026gracePeriodSeconds,", + "\t})", + "\tif err != nil {", + "\t\tlog.Error(\"Error deleting %s err: %v\", pod.String(), err)", + "\t\treturn err", + "\t}", + "\tif mode == DeleteBackground {", + "\t\treturn nil", + "\t}", + "\twg.Add(1)", + "\tpodName := pod.Name", + "\tnamespace := pod.Namespace", + "\tgo func() {", + "\t\tdefer wg.Done()", + "\t\twaitPodDeleted(namespace, podName, gracePeriodSeconds, watcher)", + "\t}()", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "getNotReadyDeployments", + "kind": "function", + "source": [ + "func getNotReadyDeployments(deployments []*provider.Deployment) []*provider.Deployment {", + "\tnotReadyDeployments := []*provider.Deployment{}", + "\tfor _, dep := range deployments {", + "\t\tready, err := isDeploymentReady(dep.Name, dep.Namespace)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get %s: %v\", dep.ToString(), err)", + "\t\t\t// We'll mark it as not ready, anyways.", + "\t\t\tnotReadyDeployments = append(notReadyDeployments, dep)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif ready {", + "\t\t\tlog.Debug(\"%s is ready.\", dep.ToString())", + "\t\t} else {", + "\t\t\tnotReadyDeployments = append(notReadyDeployments, dep)", + "\t\t}", + "\t}", + "", + "\treturn notReadyDeployments", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "getNotReadyStatefulSets", + "kind": "function", + "source": [ + "func getNotReadyStatefulSets(statefulSets []*provider.StatefulSet) []*provider.StatefulSet {", + "\tnotReadyStatefulSets := []*provider.StatefulSet{}", + "\tfor _, sts := range statefulSets {", + "\t\tready, err := isStatefulSetReady(sts.Name, sts.Namespace)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get %s: %v\", sts.ToString(), err)", + "\t\t\t// We'll mark it as not ready, anyways.", + "\t\t\tnotReadyStatefulSets = append(notReadyStatefulSets, sts)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif ready {", + "\t\t\tlog.Debug(\"%s is ready.\", sts.ToString())", + "\t\t} else {", + "\t\t\tnotReadyStatefulSets = append(notReadyStatefulSets, sts)", + "\t\t}", + "\t}", + "", + "\treturn notReadyStatefulSets", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "OperatorInstalledMoreThanOnce", + "kind": "function", + "source": [ + "func OperatorInstalledMoreThanOnce(operator1, operator2 *provider.Operator) bool {", + "\t// Safeguard against nil operators (should not happen)", + "\tif operator1 == nil || operator2 == nil {", + "\t\treturn false", + "\t}", + "", + "\tlog.Debug(\"Comparing operator %q with operator %q\", operator1.Name, operator2.Name)", + "", + "\t// Retrieve the version from each CSV", + "\tcsv1Version := operator1.Csv.Spec.Version.String()", + "\tcsv2Version := operator2.Csv.Spec.Version.String()", + "", + "\tlog.Debug(\"CSV1 Version: %s\", csv1Version)", + "\tlog.Debug(\"CSV2 Version: %s\", csv2Version)", + "", + "\t// Strip the version from the CSV name by removing the suffix (which should be the version)", + "\tcsv1Name := strings.TrimSuffix(operator1.Csv.Name, \".v\"+csv1Version)", + "\tcsv2Name := strings.TrimSuffix(operator2.Csv.Name, \".v\"+csv2Version)", + "", + "\tlog.Debug(\"Comparing CSV names %q and %q\", csv1Name, csv2Name)", + "", + "\t// The CSV name should be the same, but the version should be different", + "\t// if the operator is installed more than once.", + "\tif operator1.Csv != nil \u0026\u0026 operator2.Csv != nil \u0026\u0026", + "\t\tcsv1Name == csv2Name \u0026\u0026", + "\t\tcsv1Version != csv2Version {", + "\t\tlog.Error(\"Operator %q is installed more than once\", operator1.Name)", + "\t\treturn true", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func testOperatorCatalogSourceBundleCount(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tconst (", + "\t\tbundleCountLimit = 1000", + "", + "\t\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count.", + "\t\t// This means we cannot use the package manifests to skip based on channel.", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\tocp412Skip := false", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\tcheck.LogError(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003c= 4.12.\")", + "\t\t\tocp412Skip = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003e 4.12.\")", + "\t\t}", + "\t}", + "", + "\tbundleCountLimitStr := strconv.Itoa(bundleCountLimit)", + "", + "\t// Loop through all labeled operators and check if they have more than 1000 referenced images.", + "\tvar catalogsAlreadyReported []string", + "\tfor _, op := range env.Operators {", + "\t\tcatalogSourceCheckComplete := false", + "\t\tcheck.LogInfo(\"Checking bundle count for operator %q\", op.Csv.Name)", + "", + "\t\t// Search through packagemanifests to match the name of the CSV.", + "\t\tfor _, pm := range env.AllPackageManifests {", + "\t\t\t// Skip package manifests based on channel entries.", + "\t\t\t// Note: This only works for OCP versions \u003e 4.12 due to channel entries existence.", + "\t\t\tif !ocp412Skip \u0026\u0026 catalogsource.SkipPMBasedOnChannel(pm.Status.Channels, op.Csv.Name) {", + "\t\t\t\tlog.Debug(\"Skipping package manifest %q based on channel\", pm.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Search through all catalog sources to match the name and namespace of the package manifest.", + "\t\t\tfor _, catalogSource := range env.AllCatalogSources {", + "\t\t\t\tif catalogSource.Name != pm.Status.CatalogSource || catalogSource.Namespace != pm.Status.CatalogSourceNamespace {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on name or namespace\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the catalog source has already been reported, skip it.", + "\t\t\t\tif stringhelper.StringInSlice(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace, false) {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on already reported\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Found matching catalog source %q for operator %q\", catalogSource.Name, op.Csv.Name)", + "", + "\t\t\t\t// The name and namespace match. Lookup the bundle count.", + "\t\t\t\tbundleCount := provider.GetCatalogSourceBundleCount(env, catalogSource)", + "", + "\t\t\t\tif bundleCount == -1 {", + "\t\t\t\t\tcheck.LogError(\"Failed to get bundle count for CatalogSource %q\", catalogSource.Name)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"Failed to get bundle count\", false))", + "\t\t\t\t} else {", + "\t\t\t\t\tif bundleCount \u003e bundleCountLimit {", + "\t\t\t\t\t\tcheck.LogError(\"CatalogSource %q has more than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has more than \"+bundleCountLimitStr+\" referenced images\", false))", + "\t\t\t\t\t} else {", + "\t\t\t\t\t\tcheck.LogInfo(\"CatalogSource %q has less than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has less than \"+bundleCountLimitStr+\" referenced images\", true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Adding catalog source %q to list of already reported\", catalogSource.Name)", + "\t\t\t\tcatalogsAlreadyReported = append(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace)", + "\t\t\t\t// Signal that the catalog source check is complete.", + "\t\t\t\tcatalogSourceCheckComplete = true", + "\t\t\t\tbreak", + "\t\t\t}", + "", + "\t\t\t// If the catalog source check is complete, break out of the loop.", + "\t\t\tif catalogSourceCheckComplete {", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/phasecheck", + "name": "WaitOperatorReady", + "kind": "function", + "source": [ + "func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool {", + "\toc := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tif isOperatorPhaseSucceeded(csv) {", + "\t\t\tlog.Debug(\"%s is ready\", provider.CsvToString(csv))", + "\t\t\treturn true", + "\t\t} else if isOperatorPhaseFailedOrUnknown(csv) {", + "\t\t\tlog.Debug(\"%s failed to be ready, status=%s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Operator is not ready, but we need to take into account that its pods", + "\t\t// could have been deleted by some of the lifecycle test cases, so they", + "\t\t// could be restarting. Let's give it some time before declaring it failed.", + "\t\tlog.Debug(\"Waiting for %s to be in Succeeded phase: %s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\ttime.Sleep(time.Second)", + "", + "\t\tfreshCsv, err := oc.OlmClient.OperatorsV1alpha1().ClusterServiceVersions(csv.Namespace).Get(context.TODO(), csv.Name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not get csv %s, err: %v\", provider.CsvToString(freshCsv), err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// update old csv and check status again", + "\t\t*csv = *freshCsv", + "\t}", + "\tif time.Since(start) \u003e timeout {", + "\t\tlog.Error(\"timeout waiting for csv %s to be ready\", provider.CsvToString(csv))", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/isredhat", + "name": "BaseImageInfo.runCommand", + "kind": "function", + "source": [ + "func (b *BaseImageInfo) runCommand(cmd string) (string, error) {", + "\toutput, outerr, err := b.ClientHolder.ExecCommandContainer(b.OCPContext, cmd)", + "\tif err != nil {", + "\t\tlog.Error(\"can not execute command on container, err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tif outerr != \"\" {", + "\t\tlog.Error(\"Error when running baseimage command, err: %v\", outerr)", + "\t\treturn \"\", errors.New(outerr)", + "\t}", + "\treturn output, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "runHandler", + "kind": "function", + "source": [ + "func runHandler(w http.ResponseWriter, r *http.Request) {", + "\tbuf = bytes.NewBufferString(\"\")", + "\t// The log output will be written to the log file and to this buffer buf", + "\tlog.SetLogger(log.GetMultiLogger(buf))", + "", + "\tjsonData := r.FormValue(\"jsonData\") // \"jsonData\" is the name of the JSON input field", + "\tvar data RequestedData", + "\tif err := json.Unmarshal([]byte(jsonData), \u0026data); err != nil {", + "\t\tfmt.Println(\"Error:\", err)", + "\t}", + "\tflattenedOptions := data.SelectedOptions", + "", + "\t// Get the file from the request", + "\tfile, fileHeader, err := r.FormFile(\"kubeConfigPath\") // \"fileInput\" is the name of the file input field", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to retrieve file from form\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "\tdefer file.Close()", + "", + "\tlog.Info(\"Kubeconfig file name received: %s\", fileHeader.Filename)", + "\tkubeconfigTempFile, err := os.CreateTemp(\"\", \"webserver-kubeconfig-*\")", + "\tif err != nil {", + "\t\thttp.Error(w, \"Failed to create temp file to store the kubeconfig content.\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "", + "\tdefer func() {", + "\t\tlog.Info(\"Removing temporary kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\terr = os.Remove(kubeconfigTempFile.Name())", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to remove temp kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\t}", + "\t}()", + "", + "\t_, err = io.Copy(kubeconfigTempFile, file)", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to copy file\", http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t_ = kubeconfigTempFile.Close()", + "", + "\tlog.Info(\"Web Server kubeconfig file : %v (copied into %v)\", fileHeader.Filename, kubeconfigTempFile.Name())", + "\tlog.Info(\"Web Server Labels filter : %v\", flattenedOptions)", + "", + "\ttnfConfig, err := os.ReadFile(\"config/certsuite_config.yml\")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error reading YAML file: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t}", + "", + "\tnewData := updateTnf(tnfConfig, \u0026data)", + "", + "\t// Write the modified YAML data back to the file", + "\tvar filePerm fs.FileMode = 0o644 // owner can read/write, group and others can only read", + "\terr = os.WriteFile(\"config/certsuite_config.yml\", newData, filePerm)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing YAML file: %v\", err)", + "\t}", + "\tlabelsFilter := strings.Join(flattenedOptions, \",\")", + "", + "\t_ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name())", + "\tcertsuite.LoadChecksDB(labelsFilter)", + "", + "\toutputFolder := r.Context().Value(outputFolderCtxKey).(string)", + "", + "\tif err := checksdb.InitLabelsExprEvaluator(labelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(outputFolder, \"debug\"); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tlog.Info(\"Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s\", labelsFilter, outputFolder)", + "\terr = certsuite.Run(labelsFilter, outputFolder)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to run CNF Cert Suite: %v\", err)", + "\t}", + "", + "\t// Return the result as JSON", + "\tresponse := struct {", + "\t\tMessage string `json:\"Message\"`", + "\t}{", + "\t\tMessage: fmt.Sprintf(\"Succeeded to run %s\", strings.Join(flattenedOptions, \" \")),", + "\t}", + "\t// Serialize the response data to JSON", + "\tjsonResponse, err := json.Marshal(response)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to marshal jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t// Set the Content-Type header to specify that the response is JSON", + "\tw.Header().Set(\"Content-Type\", \"application/json\")", + "\t// Write the JSON response to the client", + "\tlog.Info(\"Sending web response: %v\", response)", + "\t_, err = w.Write(jsonResponse)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to write jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "Fatal", + "qualifiedName": "Logger.Fatal", + "exported": true, + "receiver": "Logger", + "signature": "func(string, ...any)()", + "doc": "Logger.Fatal Outputs a fatal error message, writes to stderr and exits the program\n\nThe method logs a formatted fatal message using the Logger’s Logf helper,\nthen prints the same message prefixed with \"FATAL:\" to standard error for\nvisibility. After displaying the message it terminates the process by calling\nos.. No return value is produced because execution stops immediately.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:278", + "calls": [ + { + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Exit", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare", + "name": "claimCompare", + "kind": "function", + "source": [ + "func claimCompare(_ *cobra.Command, _ []string) error {", + "\terr := claimCompareFilesfunc(Claim1FilePathFlag, Claim2FilePathFlag)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error comparing claim files: %v\", err)", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run", + "name": "runTestSuite", + "kind": "function", + "source": [ + "func runTestSuite(cmd *cobra.Command, _ []string) error {", + "\terr := initTestParamsFromFlags(cmd)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to initialize the test parameters, err: %v\", err)", + "\t}", + "", + "\ttestParams := configuration.GetTestParameters()", + "\tif testParams.ServerMode {", + "\t\tlog.Info(\"Running Certification Suite in web server mode\")", + "\t\twebserver.StartServer(testParams.OutputDir)", + "\t} else {", + "\t\tcertsuite.Startup()", + "\t\tdefer certsuite.Shutdown()", + "\t\tlog.Info(\"Running Certification Suite in stand-alone mode\")", + "\t\terr := certsuite.Run(testParams.LabelsFilter, testParams.OutputDir)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to run Certification Suite: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetNewClientsHolder", + "kind": "function", + "source": [ + "func GetNewClientsHolder(kubeconfigFile string) *ClientsHolder {", + "\t_, err := newClientsHolder(kubeconfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "", + "\treturn \u0026clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "GetScaleCrUnderTest", + "kind": "function", + "source": [ + "func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDefinition) []ScaleObject {", + "\tdynamicClient := clientsholder.GetClientsHolder().DynamicClient", + "", + "\tvar scaleObjects []ScaleObject", + "\tfor _, crd := range crds {", + "\t\tif crd.Spec.Scope != apiextv1.NamespaceScoped {", + "\t\t\tlog.Warn(\"Target CRD %q is cluster-wide scoped. Skipping search of scale objects.\", crd.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range crd.Spec.Versions {", + "\t\t\tcrdVersion := crd.Spec.Versions[i]", + "\t\t\tgvr := schema.GroupVersionResource{", + "\t\t\t\tGroup: crd.Spec.Group,", + "\t\t\t\tVersion: crdVersion.Name,", + "\t\t\t\tResource: crd.Spec.Names.Plural,", + "\t\t\t}", + "", + "\t\t\t// Filter out non-scalable CRDs.", + "\t\t\tif crdVersion.Subresources == nil || crdVersion.Subresources.Scale == nil {", + "\t\t\t\tlog.Info(\"Target CRD %q is not scalable. Skipping search of scalable CRs.\", crd.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Looking for Scalable CRs of CRD %q (api version %q, group %q, plural %q) in target namespaces.\",", + "\t\t\t\tcrd.Name, crdVersion.Name, crd.Spec.Group, crd.Spec.Names.Plural)", + "", + "\t\t\tfor _, ns := range namespaces {", + "\t\t\t\tcrs, err := dynamicClient.Resource(gvr).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tlog.Fatal(\"Error getting CRs of CRD %q in namespace %q, err: %v\", crd.Name, ns, err)", + "\t\t\t\t}", + "", + "\t\t\t\tif len(crs.Items) \u003e 0 {", + "\t\t\t\t\tscaleObjects = append(scaleObjects, getCrScaleObjects(crs.Items, crd)...)", + "\t\t\t\t} else {", + "\t\t\t\t\tlog.Warn(\"No CRs of CRD %q found in the target namespaces.\", crd.Name)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn scaleObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getCrScaleObjects", + "kind": "function", + "source": [ + "func getCrScaleObjects(crs []unstructured.Unstructured, crd *apiextv1.CustomResourceDefinition) []ScaleObject {", + "\tvar scaleObjects []ScaleObject", + "\tclients := clientsholder.GetClientsHolder()", + "\tfor _, cr := range crs {", + "\t\tgroupResourceSchema := schema.GroupResource{", + "\t\t\tGroup: crd.Spec.Group,", + "\t\t\tResource: crd.Spec.Names.Plural,", + "\t\t}", + "", + "\t\tname := cr.GetName()", + "\t\tnamespace := cr.GetNamespace()", + "\t\tcrScale, err := clients.ScalingClient.Scales(namespace).Get(context.TODO(), groupResourceSchema, name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Error while getting the scale of CR=%s (CRD=%s) in namespace %s: %v\", name, crd.Name, namespace, err)", + "\t\t}", + "", + "\t\tscaleObjects = append(scaleObjects, ScaleObject{Scale: crScale, GroupResourceSchema: groupResourceSchema})", + "\t}", + "\treturn scaleObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "ClaimBuilder.ToJUnitXML", + "kind": "function", + "source": [ + "func (c *ClaimBuilder) ToJUnitXML(outputFile string, startTime, endTime time.Time) {", + "\t// Create the JUnit XML file from the claim output.", + "\txmlOutput := populateXMLFromClaim(*c.claimRoot.Claim, startTime, endTime)", + "", + "\t// Write the JUnit XML file.", + "\tpayload, err := xml.MarshalIndent(xmlOutput, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to generate the xml: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Writing JUnit XML file: %s\", outputFile)", + "\terr = os.WriteFile(outputFile, payload, claimFilePermissions)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to write the xml file\")", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "MarshalClaimOutput", + "kind": "function", + "source": [ + "func MarshalClaimOutput(claimRoot *claim.Root) []byte {", + "\tpayload, err := j.MarshalIndent(claimRoot, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to generate the claim: %v\", err)", + "\t}", + "\treturn payload", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "UnmarshalClaim", + "kind": "function", + "source": [ + "func UnmarshalClaim(claimFile []byte, claimRoot *claim.Root) {", + "\terr := j.Unmarshal(claimFile, \u0026claimRoot)", + "\tif err != nil {", + "\t\tlog.Fatal(\"error unmarshalling claim file: %v\", err)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "UnmarshalConfigurations", + "kind": "function", + "source": [ + "func UnmarshalConfigurations(configurations []byte, claimConfigurations map[string]interface{}) {", + "\terr := j.Unmarshal(configurations, \u0026claimConfigurations)", + "\tif err != nil {", + "\t\tlog.Fatal(\"error unmarshalling configurations: %v\", err)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "WriteClaimOutput", + "kind": "function", + "source": [ + "func WriteClaimOutput(claimOutputFile string, payload []byte) {", + "\tlog.Info(\"Writing claim data to %s\", claimOutputFile)", + "\terr := os.WriteFile(claimOutputFile, payload, claimFilePermissions)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing claim data:\\n%s\", string(payload))", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Operator.SetPreflightResults", + "kind": "function", + "source": [ + "func (op *Operator) SetPreflightResults(env *TestEnvironment) error {", + "\tif len(op.InstallPlans) == 0 {", + "\t\tlog.Warn(\"Operator %q has no InstallPlans. Skipping setting Preflight results\", op)", + "\t\treturn nil", + "\t}", + "", + "\tbundleImage := op.InstallPlans[0].BundleImage", + "\tindexImage := op.InstallPlans[0].IndexImage", + "\toc := clientsholder.GetClientsHolder()", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\topts := []plibOperator.Option{}", + "\topts = append(opts, plibOperator.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibOperator.WithInsecureConnection())", + "\t}", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibOperator.NewCheck(bundleImage, indexImage, oc.KubeConfig, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\te := os.RemoveAll(\"artifacts/\")", + "\tif e != nil {", + "\t\tlog.Fatal(\"%v\", e)", + "\t}", + "", + "\tlog.Info(\"Storing operator Preflight results into object for %q\", bundleImage)", + "\top.PreflightResults = GetPreflightResultsDB(\u0026results)", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "testPreflightContainers", + "kind": "function", + "source": [ + "func testPreflightContainers(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) {", + "\t// Using a cache to prevent unnecessary processing of images if we already have the results available", + "\tpreflightImageCache := make(map[string]provider.PreflightResultsDB)", + "", + "\t// Loop through all of the containers, run preflight, and set their results into their respective objects", + "\tfor _, cut := range env.Containers {", + "\t\terr := cut.SetPreflightResults(preflightImageCache, env)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed running Preflight on image %q, err: %v\", cut.Image, err)", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed running Preflight container tests for %d containers\", len(env.Containers))", + "", + "\t// Handle Container-based preflight tests", + "\t// Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables.", + "\tfor testName, testEntry := range getUniqueTestEntriesFromContainerResults(env.Containers) {", + "\t\tlog.Info(\"Setting Preflight container test results for %q\", testName)", + "\t\tgeneratePreflightContainerCnfCertTest(checksGroup, testName, testEntry.Description, testEntry.Remediation, env.Containers)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "testPreflightOperators", + "kind": "function", + "source": [ + "func testPreflightOperators(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) {", + "\t// Loop through all of the operators, run preflight, and set their results into their respective object", + "\tfor _, op := range env.Operators {", + "\t\t// Note: We are not using a cache here for the operator bundle images because", + "\t\t// in-general you are only going to have an operator installed once in a cluster.", + "\t\terr := op.SetPreflightResults(env)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed running Preflight on operator %q, err: %v\", op.Name, err)", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed running Preflight operator tests for %d operators\", len(env.Operators))", + "", + "\t// Handle Operator-based preflight tests", + "\t// Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables.", + "\tfor testName, testEntry := range getUniqueTestEntriesFromOperatorResults(env.Operators) {", + "\t\tlog.Info(\"Setting Preflight operator test results for %q\", testName)", + "\t\tgeneratePreflightOperatorCnfCertTest(checksGroup, testName, testEntry.Description, testEntry.Remediation, env.Operators)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "runHandler", + "kind": "function", + "source": [ + "func runHandler(w http.ResponseWriter, r *http.Request) {", + "\tbuf = bytes.NewBufferString(\"\")", + "\t// The log output will be written to the log file and to this buffer buf", + "\tlog.SetLogger(log.GetMultiLogger(buf))", + "", + "\tjsonData := r.FormValue(\"jsonData\") // \"jsonData\" is the name of the JSON input field", + "\tvar data RequestedData", + "\tif err := json.Unmarshal([]byte(jsonData), \u0026data); err != nil {", + "\t\tfmt.Println(\"Error:\", err)", + "\t}", + "\tflattenedOptions := data.SelectedOptions", + "", + "\t// Get the file from the request", + "\tfile, fileHeader, err := r.FormFile(\"kubeConfigPath\") // \"fileInput\" is the name of the file input field", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to retrieve file from form\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "\tdefer file.Close()", + "", + "\tlog.Info(\"Kubeconfig file name received: %s\", fileHeader.Filename)", + "\tkubeconfigTempFile, err := os.CreateTemp(\"\", \"webserver-kubeconfig-*\")", + "\tif err != nil {", + "\t\thttp.Error(w, \"Failed to create temp file to store the kubeconfig content.\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "", + "\tdefer func() {", + "\t\tlog.Info(\"Removing temporary kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\terr = os.Remove(kubeconfigTempFile.Name())", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to remove temp kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\t}", + "\t}()", + "", + "\t_, err = io.Copy(kubeconfigTempFile, file)", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to copy file\", http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t_ = kubeconfigTempFile.Close()", + "", + "\tlog.Info(\"Web Server kubeconfig file : %v (copied into %v)\", fileHeader.Filename, kubeconfigTempFile.Name())", + "\tlog.Info(\"Web Server Labels filter : %v\", flattenedOptions)", + "", + "\ttnfConfig, err := os.ReadFile(\"config/certsuite_config.yml\")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error reading YAML file: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t}", + "", + "\tnewData := updateTnf(tnfConfig, \u0026data)", + "", + "\t// Write the modified YAML data back to the file", + "\tvar filePerm fs.FileMode = 0o644 // owner can read/write, group and others can only read", + "\terr = os.WriteFile(\"config/certsuite_config.yml\", newData, filePerm)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing YAML file: %v\", err)", + "\t}", + "\tlabelsFilter := strings.Join(flattenedOptions, \",\")", + "", + "\t_ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name())", + "\tcertsuite.LoadChecksDB(labelsFilter)", + "", + "\toutputFolder := r.Context().Value(outputFolderCtxKey).(string)", + "", + "\tif err := checksdb.InitLabelsExprEvaluator(labelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(outputFolder, \"debug\"); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tlog.Info(\"Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s\", labelsFilter, outputFolder)", + "\terr = certsuite.Run(labelsFilter, outputFolder)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to run CNF Cert Suite: %v\", err)", + "\t}", + "", + "\t// Return the result as JSON", + "\tresponse := struct {", + "\t\tMessage string `json:\"Message\"`", + "\t}{", + "\t\tMessage: fmt.Sprintf(\"Succeeded to run %s\", strings.Join(flattenedOptions, \" \")),", + "\t}", + "\t// Serialize the response data to JSON", + "\tjsonResponse, err := json.Marshal(response)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to marshal jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t// Set the Content-Type header to specify that the response is JSON", + "\tw.Header().Set(\"Content-Type\", \"application/json\")", + "\t// Write the JSON response to the client", + "\tlog.Info(\"Sending web response: %v\", response)", + "\t_, err = w.Write(jsonResponse)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to write jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "updateTnf", + "kind": "function", + "source": [ + "func updateTnf(tnfConfig []byte, data *RequestedData) []byte {", + "\t// Unmarshal the YAML data into a Config struct", + "\tvar config configuration.TestConfiguration", + "", + "\terr := yaml.Unmarshal(tnfConfig, \u0026config)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error unmarshalling YAML: %v\", err)", + "\t}", + "", + "\t// Modify the configuration", + "\tvar namespace []configuration.Namespace", + "\tfor _, tnamespace := range data.TargetNameSpaces {", + "\t\tnamespace = append(namespace, configuration.Namespace{Name: tnamespace})", + "\t}", + "\tconfig.TargetNameSpaces = namespace", + "", + "\tconfig.PodsUnderTestLabels = data.PodsUnderTestLabels", + "", + "\tconfig.OperatorsUnderTestLabels = data.OperatorsUnderTestLabels", + "", + "\tvar managedDeployments []configuration.ManagedDeploymentsStatefulsets", + "\tfor _, val := range data.ManagedDeployments {", + "\t\tmanagedDeployments = append(managedDeployments, configuration.ManagedDeploymentsStatefulsets{Name: val})", + "\t}", + "\tconfig.ManagedDeployments = managedDeployments", + "", + "\tvar managedStatefulsets []configuration.ManagedDeploymentsStatefulsets", + "\tfor _, val := range data.ManagedDeployments {", + "\t\tmanagedStatefulsets = append(managedStatefulsets, configuration.ManagedDeploymentsStatefulsets{Name: val})", + "\t}", + "\tconfig.ManagedStatefulsets = managedStatefulsets", + "", + "\tvar crdFilter []configuration.CrdFilter", + "\tfor i := range data.TargetCrdFiltersnameSuffix {", + "\t\tval := true", + "\t\tif data.TargetCrdFiltersscalable[i] == \"false\" {", + "\t\t\tval = false", + "\t\t}", + "\t\tcrdFilter = append(crdFilter, configuration.CrdFilter{NameSuffix: data.TargetCrdFiltersnameSuffix[i],", + "\t\t\tScalable: val})", + "\t}", + "\tconfig.CrdFilters = crdFilter", + "", + "\tvar acceptedKernelTaints []configuration.AcceptedKernelTaintsInfo", + "\tfor _, val := range data.AcceptedKernelTaints {", + "\t\tacceptedKernelTaints = append(acceptedKernelTaints, configuration.AcceptedKernelTaintsInfo{Module: val})", + "\t}", + "\tconfig.AcceptedKernelTaints = acceptedKernelTaints", + "", + "\tvar skipHelmChartList []configuration.SkipHelmChartList", + "\tfor _, val := range data.SkipHelmChartList {", + "\t\tskipHelmChartList = append(skipHelmChartList, configuration.SkipHelmChartList{Name: val})", + "\t}", + "\tconfig.SkipHelmChartList = skipHelmChartList", + "", + "\tvar skipScalingTestDeployments []configuration.SkipScalingTestDeploymentsInfo", + "\tfor i := range data.SkipScalingTestDeploymentsname {", + "\t\tskipScalingTestDeployments = append(skipScalingTestDeployments, configuration.SkipScalingTestDeploymentsInfo{Name: data.SkipScalingTestDeploymentsname[i],", + "\t\t\tNamespace: data.SkipScalingTestDeploymentsnamespace[i]})", + "\t}", + "\tconfig.SkipScalingTestDeployments = skipScalingTestDeployments", + "", + "\tvar skipScalingTestStatefulSets []configuration.SkipScalingTestStatefulSetsInfo", + "\tfor i := range data.SkipScalingTestStatefulsetsname {", + "\t\tskipScalingTestStatefulSets = append(skipScalingTestStatefulSets, configuration.SkipScalingTestStatefulSetsInfo{Name: data.SkipScalingTestStatefulsetsname[i],", + "\t\t\tNamespace: data.SkipScalingTestStatefulsetsnamespace[i]})", + "\t}", + "\tconfig.SkipScalingTestStatefulSets = skipScalingTestStatefulSets", + "", + "\tconfig.ServicesIgnoreList = data.Servicesignorelist", + "\tconfig.ValidProtocolNames = data.ValidProtocolNames", + "\tif len(data.CollectorAppPassword) \u003e 0 {", + "\t\tconfig.CollectorAppPassword = data.CollectorAppPassword[0]", + "\t}", + "\tif len(data.ExecutedBy) \u003e 0 {", + "\t\tconfig.ExecutedBy = data.ExecutedBy[0]", + "\t}", + "\tif len(data.PartnerName) \u003e 0 {", + "\t\tconfig.PartnerName = data.PartnerName[0]", + "\t}", + "\tif len(data.ProbeDaemonSetNamespace) \u003e 0 {", + "\t\tconfig.ProbeDaemonSetNamespace = data.ProbeDaemonSetNamespace[0]", + "\t}", + "\tif len(data.ConnectAPIKey) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.APIKey = data.ConnectAPIKey[0]", + "\t}", + "\tif len(data.ConnectProjectID) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.ProjectID = data.ConnectProjectID[0]", + "\t}", + "\tif len(data.ConnectAPIBaseURL) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.BaseURL = data.ConnectAPIBaseURL[0]", + "\t}", + "\tif len(data.ConnectAPIProxyURL) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.ProxyURL = data.ConnectAPIProxyURL[0]", + "\t}", + "\tif len(data.ConnectAPIProxyPort) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.ProxyPort = data.ConnectAPIProxyPort[0]", + "\t}", + "", + "\t// Serialize the modified config back to YAML format", + "\tnewData, err := yaml.Marshal(\u0026config)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error marshaling YAML: %v\", err)", + "\t}", + "\treturn newData", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "Info", + "qualifiedName": "Logger.Info", + "exported": true, + "receiver": "Logger", + "signature": "func(string, ...any)()", + "doc": "Logger.Info Logs an informational message\n\nThis method forwards the supplied format string and arguments to the internal\nlogging routine at the info level. It relies on Logf to create a log record\nwith the appropriate severity, ensuring the message is emitted only if the\nlogger’s configuration allows that level. No value is returned.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:247", + "calls": [ + { + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run", + "name": "runTestSuite", + "kind": "function", + "source": [ + "func runTestSuite(cmd *cobra.Command, _ []string) error {", + "\terr := initTestParamsFromFlags(cmd)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to initialize the test parameters, err: %v\", err)", + "\t}", + "", + "\ttestParams := configuration.GetTestParameters()", + "\tif testParams.ServerMode {", + "\t\tlog.Info(\"Running Certification Suite in web server mode\")", + "\t\twebserver.StartServer(testParams.OutputDir)", + "\t} else {", + "\t\tcertsuite.Startup()", + "\t\tdefer certsuite.Shutdown()", + "\t\tlog.Info(\"Running Certification Suite in stand-alone mode\")", + "\t\terr := certsuite.Run(testParams.LabelsFilter, testParams.OutputDir)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to run Certification Suite: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "getClusterRestConfig", + "kind": "function", + "source": [ + "func getClusterRestConfig(filenames ...string) (*rest.Config, error) {", + "\trestConfig, err := rest.InClusterConfig()", + "\tif err == nil {", + "\t\tlog.Info(\"CNF Cert Suite is running inside a cluster.\")", + "", + "\t\t// Convert restConfig to clientcmdapi.Config so we can get the kubeconfig \"file\" bytes", + "\t\t// needed by preflight's operator checks.", + "\t\tclientConfig := GetClientConfigFromRestConfig(restConfig)", + "\t\tclientsHolder.KubeConfig, err = createByteArrayKubeConfig(clientConfig)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to create byte array from kube config reference: %v\", err)", + "\t\t}", + "", + "\t\t// No error: we're inside a cluster.", + "\t\treturn restConfig, nil", + "\t}", + "", + "\tlog.Info(\"Running outside a cluster. Parsing kubeconfig file/s %+v\", filenames)", + "\tif len(filenames) == 0 {", + "\t\treturn nil, errors.New(\"no kubeconfig files set\")", + "\t}", + "", + "\t// Get the rest.Config from the kubeconfig file/s.", + "\tprecedence := []string{}", + "\tprecedence = append(precedence, filenames...)", + "", + "\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()", + "\tloadingRules.Precedence = precedence", + "", + "\tkubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(", + "\t\tloadingRules,", + "\t\t\u0026clientcmd.ConfigOverrides{},", + "\t)", + "", + "\t// Save merged config to temporary kubeconfig file.", + "\tkubeRawConfig, err := kubeconfig.RawConfig()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get kube raw config: %w\", err)", + "\t}", + "", + "\tclientsHolder.KubeConfig, err = createByteArrayKubeConfig(\u0026kubeRawConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to byte array kube config reference: %w\", err)", + "\t}", + "", + "\trestConfig, err = kubeconfig.ClientConfig()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate rest config: %s\", err)", + "\t}", + "", + "\treturn restConfig, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "newClientsHolder", + "kind": "function", + "source": [ + "func newClientsHolder(filenames ...string) (*ClientsHolder, error) { //nolint:funlen // this is a special function with lots of assignments", + "\tlog.Info(\"Creating k8s go-clients holder.\")", + "", + "\tvar err error", + "\tclientsHolder.RestConfig, err = getClusterRestConfig(filenames...)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get rest.Config: %v\", err)", + "\t}", + "\tclientsHolder.RestConfig.Timeout = DefaultTimeout", + "", + "\tclientsHolder.DynamicClient, err = dynamic.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate dynamic client (unstructured/dynamic): %s\", err)", + "\t}", + "\tclientsHolder.APIExtClient, err = apiextv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate apiextv1: %s\", err)", + "\t}", + "\tclientsHolder.OlmClient, err = olmClient.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate olm clientset: %s\", err)", + "\t}", + "\tclientsHolder.OlmPkgClient, err = olmpkgclient.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate olm clientset: %s\", err)", + "\t}", + "\tclientsHolder.K8sClient, err = kubernetes.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate k8sclient: %s\", err)", + "\t}", + "\t// create the oc client", + "\tclientsHolder.OcpClient, err = clientconfigv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate ocClient: %s\", err)", + "\t}", + "\tclientsHolder.MachineCfg, err = ocpMachine.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate MachineCfg client: %s\", err)", + "\t}", + "\tclientsHolder.K8sNetworkingClient, err = networkingv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate k8s networking client: %s\", err)", + "\t}", + "", + "\tdiscoveryClient, err := discovery.NewDiscoveryClientForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate discoveryClient: %s\", err)", + "\t}", + "", + "\tclientsHolder.GroupResources, err = discoveryClient.ServerPreferredResources()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot get list of resources in cluster: %s\", err)", + "\t}", + "", + "\tresolver := scale.NewDiscoveryScaleKindResolver(discoveryClient)", + "\tgr, err := restmapper.GetAPIGroupResources(clientsHolder.K8sClient.Discovery())", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate GetAPIGroupResources: %s\", err)", + "\t}", + "", + "\tmapper := restmapper.NewDiscoveryRESTMapper(gr)", + "\tclientsHolder.ScalingClient, err = scale.NewForConfig(clientsHolder.RestConfig, mapper, dynamic.LegacyAPIPathResolverFunc, resolver)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate ScalesGetter: %s\", err)", + "\t}", + "", + "\tclientsHolder.CNCFNetworkingClient, err = cncfNetworkAttachmentv1.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate CNCF networking client\")", + "\t}", + "", + "\tclientsHolder.ApiserverClient, err = apiserverscheme.NewForConfig(clientsHolder.RestConfig)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot instantiate apiserverscheme: %w\", err)", + "\t}", + "", + "\tclientsHolder.ready = true", + "\treturn \u0026clientsHolder, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "CompressResultsArtifacts", + "kind": "function", + "source": [ + "func CompressResultsArtifacts(outputDir string, filePaths []string) (string, error) {", + "\tzipFileName := generateZipFileName()", + "\tzipFilePath := filepath.Join(outputDir, zipFileName)", + "", + "\tlog.Info(\"Compressing results artifacts into %s\", zipFilePath)", + "\tzipFile, err := os.Create(zipFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed creating tar.gz file %s in dir %s (filepath=%s): %v\",", + "\t\t\tzipFileName, outputDir, zipFilePath, err)", + "\t}", + "", + "\tzipWriter := gzip.NewWriter(zipFile)", + "\tdefer zipWriter.Close()", + "", + "\ttarWriter := tar.NewWriter(zipWriter)", + "\tdefer tarWriter.Close()", + "", + "\tfor _, file := range filePaths {", + "\t\tlog.Debug(\"Zipping file %s\", file)", + "", + "\t\ttarHeader, err := getFileTarHeader(file)", + "\t\tif err != nil {", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\terr = tarWriter.WriteHeader(tarHeader)", + "\t\tif err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to write tar header for %s: %v\", file, err)", + "\t\t}", + "", + "\t\tf, err := os.Open(file)", + "\t\tif err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to open file %s: %v\", file, err)", + "\t\t}", + "", + "\t\tif _, err = io.Copy(tarWriter, f); err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to tar file %s: %v\", file, err)", + "\t\t}", + "", + "\t\tf.Close()", + "\t}", + "", + "\t// Create fully qualified path to the zip file", + "\tzipFilePath, err = filepath.Abs(zipFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get absolute path for %s: %v\", zipFilePath, err)", + "\t}", + "", + "\t// Return the entire path to the zip file", + "\treturn zipFilePath, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "GetCertIDFromConnectAPI", + "kind": "function", + "source": [ + "func GetCertIDFromConnectAPI(apiKey, projectID, connectAPIBaseURL, proxyURL, proxyPort string) (string, error) {", + "\tlog.Info(\"Getting certification ID from Red Hat Connect API\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tprojectID = strings.ReplaceAll(projectID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectAPIBaseURL = strings.ReplaceAll(connectAPIBaseURL, \"\\\"\", \"\")", + "", + "\t// remove quotes from projectID", + "\tprojectIDJSON := fmt.Sprintf(`{ \"projectId\": %q }`, projectID)", + "", + "\t// Convert JSON to bytes", + "\tprojectIDJSONBytes := []byte(projectIDJSON)", + "", + "\t// Create the URL", + "\tcertIDURL := fmt.Sprintf(\"%s/projects/certifications\", connectAPIBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", certIDURL, bytes.NewBuffer(projectIDJSONBytes))", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\tlog.Debug(\"Request Body: %s\", req.Body)", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", \"application/json\")", + "\treq.Header.Set(\"Accept\", \"application/json\")", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// print the request", + "\tlog.Debug(\"Sending request to %s\", certIDURL)", + "", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tres, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer res.Body.Close()", + "", + "\t// Parse the response", + "\tvar certIDResponse CertIDResponse", + "\terr = json.NewDecoder(res.Body).Decode(\u0026certIDResponse)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Certification ID retrieved from the API: %d\", certIDResponse.ID)", + "", + "\t// Return the certification ID", + "\treturn fmt.Sprintf(\"%d\", certIDResponse.ID), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "SendResultsToConnectAPI", + "kind": "function", + "source": [ + "func SendResultsToConnectAPI(zipFile, apiKey, connectBaseURL, certID, proxyURL, proxyPort string) error {", + "\tlog.Info(\"Sending results to Red Hat Connect\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tcertID = strings.ReplaceAll(certID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectBaseURL = strings.ReplaceAll(connectBaseURL, \"\\\"\", \"\")", + "", + "\tvar buffer bytes.Buffer", + "", + "\t// Create a new multipart writer", + "\tw := multipart.NewWriter(\u0026buffer)", + "", + "\tlog.Debug(\"Creating form file for %s\", zipFile)", + "", + "\tclaimFile, err := os.Open(zipFile)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tdefer claimFile.Close()", + "", + "\tfw, err := w.CreateFormFile(\"attachment\", zipFile)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create form file: %v\", err)", + "\t}", + "", + "\tif _, err = io.Copy(fw, claimFile); err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"type\", \"RhocpBestPracticeTestResult\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"certId\", certID)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"description\", \"CNF Test Results\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\tw.Close()", + "", + "\t// Create the URL", + "\tconnectAPIURL := fmt.Sprintf(\"%s/attachments/upload\", connectBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", connectAPIURL, \u0026buffer)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", w.FormDataContentType())", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// Create a client", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API upload", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tresponse, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer response.Body.Close()", + "", + "\t// Parse the result of the request", + "\tvar uploadResult UploadResult", + "\terr = json.NewDecoder(response.Body).Decode(\u0026uploadResult)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Download URL: %s\", uploadResult.DownloadURL)", + "\tlog.Info(\"Upload Date: %s\", uploadResult.UploadedDate)", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "GetScaleCrUnderTest", + "kind": "function", + "source": [ + "func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDefinition) []ScaleObject {", + "\tdynamicClient := clientsholder.GetClientsHolder().DynamicClient", + "", + "\tvar scaleObjects []ScaleObject", + "\tfor _, crd := range crds {", + "\t\tif crd.Spec.Scope != apiextv1.NamespaceScoped {", + "\t\t\tlog.Warn(\"Target CRD %q is cluster-wide scoped. Skipping search of scale objects.\", crd.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range crd.Spec.Versions {", + "\t\t\tcrdVersion := crd.Spec.Versions[i]", + "\t\t\tgvr := schema.GroupVersionResource{", + "\t\t\t\tGroup: crd.Spec.Group,", + "\t\t\t\tVersion: crdVersion.Name,", + "\t\t\t\tResource: crd.Spec.Names.Plural,", + "\t\t\t}", + "", + "\t\t\t// Filter out non-scalable CRDs.", + "\t\t\tif crdVersion.Subresources == nil || crdVersion.Subresources.Scale == nil {", + "\t\t\t\tlog.Info(\"Target CRD %q is not scalable. Skipping search of scalable CRs.\", crd.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Looking for Scalable CRs of CRD %q (api version %q, group %q, plural %q) in target namespaces.\",", + "\t\t\t\tcrd.Name, crdVersion.Name, crd.Spec.Group, crd.Spec.Names.Plural)", + "", + "\t\t\tfor _, ns := range namespaces {", + "\t\t\t\tcrs, err := dynamicClient.Resource(gvr).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tlog.Fatal(\"Error getting CRs of CRD %q in namespace %q, err: %v\", crd.Name, ns, err)", + "\t\t\t\t}", + "", + "\t\t\t\tif len(crs.Items) \u003e 0 {", + "\t\t\t\t\tscaleObjects = append(scaleObjects, getCrScaleObjects(crs.Items, crd)...)", + "\t\t\t\t} else {", + "\t\t\t\t\tlog.Warn(\"No CRs of CRD %q found in the target namespaces.\", crd.Name)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn scaleObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findDeploymentsByLabels", + "kind": "function", + "source": [ + "func findDeploymentsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.Deployment {", + "\tallDeployments := []appsv1.Deployment{}", + "\tfor _, ns := range namespaces {", + "\t\tdps, err := appClient.Deployments(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list deployments in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(dps.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any deployments in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(dps.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The deployment is added only once if at least one pod matches one label in the Deployment", + "\t\t\t\tif isDeploymentsPodsMatchingAtLeastOneLabel(labels, ns, \u0026dps.Items[i]) {", + "\t\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all deployments in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in deployment %q found in ns %q without label\", dps.Items[i].Name, ns)", + "\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\tlog.Info(\"Deployment %s found in ns=%s\", dps.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allDeployments) == 0 {", + "\t\tlog.Warn(\"Did not find any deployment in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allDeployments", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findHpaControllers", + "kind": "function", + "source": [ + "func findHpaControllers(cs kubernetes.Interface, namespaces []string) []*scalingv1.HorizontalPodAutoscaler {", + "\tvar m []*scalingv1.HorizontalPodAutoscaler", + "\tfor _, ns := range namespaces {", + "\t\thpas, err := cs.AutoscalingV1().HorizontalPodAutoscalers(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Cannot list HorizontalPodAutoscalers on namespace %q, err: %v\", ns, err)", + "\t\t\treturn m", + "\t\t}", + "\t\tfor i := 0; i \u003c len(hpas.Items); i++ {", + "\t\t\tm = append(m, \u0026hpas.Items[i])", + "\t\t}", + "\t}", + "\tif len(m) == 0 {", + "\t\tlog.Info(\"Cannot find any deployed HorizontalPodAutoscaler\")", + "\t}", + "\treturn m", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findOperatorsByLabels", + "kind": "function", + "source": [ + "func findOperatorsByLabels(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespaces []configuration.Namespace) (csvs []*olmv1Alpha.ClusterServiceVersion) {", + "\tconst nsAnnotation = \"olm.operatorNamespace\"", + "", + "\t// Helper namespaces map to do quick search of the operator's controller namespace.", + "\tnamespacesMap := map[string]bool{}", + "\tfor _, ns := range namespaces {", + "\t\tnamespacesMap[ns.Name] = true", + "\t}", + "", + "\tcsvs = []*olmv1Alpha.ClusterServiceVersion{}", + "\tvar csvList *olmv1Alpha.ClusterServiceVersionList", + "\tfor _, ns := range namespaces {", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tcsvList = findOperatorsMatchingAtLeastOneLabel(olmClient, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching CSVs in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tcsvList, err = olmClient.ClusterServiceVersions(ns.Name).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing csvs in namespace %q , err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\tfor i := range csvList.Items {", + "\t\t\tcsv := \u0026csvList.Items[i]", + "", + "\t\t\t// Filter out CSV if operator's controller pod/s is/are not running in any configured/test namespace.", + "\t\t\tcontrollerNamespace, found := csv.Annotations[nsAnnotation]", + "\t\t\tif !found {", + "\t\t\t\tlog.Error(\"Failed to get ns annotation %q from csv %v/%v\", nsAnnotation, csv.Namespace, csv.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tif namespacesMap[controllerNamespace] {", + "\t\t\t\tcsvs = append(csvs, csv)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range csvs {", + "\t\tlog.Info(\"Found CSV %q (namespace %q)\", csvs[i].Name, csvs[i].Namespace)", + "\t}", + "\treturn csvs", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findStatefulSetsByLabels", + "kind": "function", + "source": [ + "func findStatefulSetsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.StatefulSet {", + "\tallStatefulSets := []appsv1.StatefulSet{}", + "\tfor _, ns := range namespaces {", + "\t\tstatefulSet, err := appClient.StatefulSets(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list statefulsets in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(statefulSet.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any statefulSet in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(statefulSet.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The StatefulSet is added only once if at least one pod matches one label in the Statefulset", + "\t\t\t\tif isStatefulSetsMatchingAtLeastOneLabel(labels, ns, \u0026statefulSet.Items[i]) {", + "\t\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all statefulsets in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in statefulset %q found in ns %q without label\", statefulSet.Items[i].Name, ns)", + "\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\tlog.Info(\"StatefulSet %s found in ns=%s\", statefulSet.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allStatefulSets) == 0 {", + "\t\tlog.Warn(\"Did not find any statefulset in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allStatefulSets", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findSubscriptions", + "kind": "function", + "source": [ + "func findSubscriptions(olmClient v1alpha1.OperatorsV1alpha1Interface, namespaces []string) []olmv1Alpha.Subscription {", + "\tsubscriptions := []olmv1Alpha.Subscription{}", + "\tfor _, ns := range namespaces {", + "\t\tdisplayNs := ns", + "\t\tif ns == \"\" {", + "\t\t\tdisplayNs = \"All Namespaces\"", + "\t\t}", + "\t\tlog.Debug(\"Searching subscriptions in namespace %q\", displayNs)", + "\t\tsubscription, err := olmClient.Subscriptions(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing subscriptions in namespace %q\", ns)", + "\t\t\tcontinue", + "\t\t}", + "\t\tsubscriptions = append(subscriptions, subscription.Items...)", + "\t}", + "", + "\tfor i := range subscriptions {", + "\t\tlog.Info(\"Found subscription %q (ns %q)\", subscriptions[i].Name, subscriptions[i].Namespace)", + "\t}", + "\treturn subscriptions", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getAllOperators", + "kind": "function", + "source": [ + "func getAllOperators(olmClient v1alpha1.OperatorsV1alpha1Interface) ([]*olmv1Alpha.ClusterServiceVersion, error) {", + "\tcsvs := []*olmv1Alpha.ClusterServiceVersion{}", + "", + "\tcsvList, err := olmClient.ClusterServiceVersions(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"error when listing CSVs in all namespaces, err: %v\", err)", + "\t}", + "\tfor i := range csvList.Items {", + "\t\tcsvs = append(csvs, \u0026csvList.Items[i])", + "\t}", + "", + "\tfor i := range csvs {", + "\t\tlog.Info(\"Found CSV %q (ns %q)\", csvs[i].Name, csvs[i].Namespace)", + "\t}", + "\treturn csvs, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getOpenshiftVersion", + "kind": "function", + "source": [ + "func getOpenshiftVersion(oClient clientconfigv1.ConfigV1Interface) (ver string, err error) {", + "\tvar clusterOperator *configv1.ClusterOperator", + "\tclusterOperator, err = oClient.ClusterOperators().Get(context.TODO(), \"openshift-apiserver\", metav1.GetOptions{})", + "\tif err != nil {", + "\t\tswitch {", + "\t\tcase kerrors.IsNotFound(err):", + "\t\t\tlog.Warn(\"Unable to get ClusterOperator CR from openshift-apiserver. Running in a non-OCP cluster.\")", + "\t\t\treturn NonOpenshiftClusterVersion, nil", + "\t\tdefault:", + "\t\t\treturn \"\", err", + "\t\t}", + "\t}", + "", + "\tfor _, ver := range clusterOperator.Status.Versions {", + "\t\tif ver.Name == tnfCsvTargetLabelName {", + "\t\t\t// openshift-apiserver does not report version,", + "\t\t\t// clusteroperator/openshift-apiserver does, and only version number", + "\t\t\tlog.Info(\"OpenShift Version found: %v\", ver.Version)", + "\t\t\treturn ver.Version, nil", + "\t\t}", + "\t}", + "", + "\treturn \"\", errors.New(\"could not get openshift version from clusterOperator\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getOperandPodsFromTestCsvs", + "kind": "function", + "source": [ + "func getOperandPodsFromTestCsvs(testCsvs []*olmv1Alpha.ClusterServiceVersion, pods []corev1.Pod) ([]*corev1.Pod, error) {", + "\t// Helper var to store all the managed crds from the operators under test", + "\t// They map key is \"Kind.group/version\" or \"Kind.APIversion\", which should be the same.", + "\t// e.g.: \"Subscription.operators.coreos.com/v1alpha1\"", + "\tcrds := map[string]*olmv1Alpha.ClusterServiceVersion{}", + "", + "\t// First, iterate on each testCsv to fill the helper crds map.", + "\tfor _, csv := range testCsvs {", + "\t\townedCrds := csv.Spec.CustomResourceDefinitions.Owned", + "\t\tif len(ownedCrds) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range ownedCrds {", + "\t\t\tcrd := \u0026ownedCrds[i]", + "", + "\t\t\t_, group, found := strings.Cut(crd.Name, \".\")", + "\t\t\tif !found {", + "\t\t\t\treturn nil, fmt.Errorf(\"failed to parse resources and group from crd name %q\", crd.Name)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"CSV %q owns crd %v\", csv.Name, crd.Kind+\"/\"+group+\"/\"+crd.Version)", + "", + "\t\t\tcrdPath := path.Join(crd.Kind, group, crd.Version)", + "\t\t\tcrds[crdPath] = csv", + "\t\t}", + "\t}", + "", + "\t// Now, iterate on every pod in the list to check whether they're owned by any of the CRs that", + "\t// the csvs are managing.", + "\toperandPods := []*corev1.Pod{}", + "\tfor i := range pods {", + "\t\tpod := \u0026pods[i]", + "\t\towners, err := podhelper.GetPodTopOwner(pod.Namespace, pod.OwnerReferences)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to get top owners of pod %v/%v: %v\", pod.Namespace, pod.Name, err)", + "\t\t}", + "", + "\t\tfor _, owner := range owners {", + "\t\t\tversionedCrdPath := path.Join(owner.Kind, owner.APIVersion)", + "", + "\t\t\tvar csv *olmv1Alpha.ClusterServiceVersion", + "\t\t\tif csv = crds[versionedCrdPath]; csv == nil {", + "\t\t\t\t// The owner is not a CR or it's not a CR owned by any operator under test", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Pod %v/%v has owner CR %s of CRD %q (CSV %v)\", pod.Namespace, pod.Name,", + "\t\t\t\towner.Name, versionedCrdPath, csv.Name)", + "", + "\t\t\toperandPods = append(operandPods, pod)", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\treturn operandPods, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "isDeploymentsPodsMatchingAtLeastOneLabel", + "kind": "function", + "source": [ + "func isDeploymentsPodsMatchingAtLeastOneLabel(labels []labelObject, namespace string, deployment *appsv1.Deployment) bool {", + "\tfor _, aLabelObject := range labels {", + "\t\tlog.Debug(\"Searching pods in deployment %q found in ns %q using label %s=%s\", deployment.Name, namespace, aLabelObject.LabelKey, aLabelObject.LabelValue)", + "\t\tif deployment.Spec.Template.Labels[aLabelObject.LabelKey] == aLabelObject.LabelValue {", + "\t\t\tlog.Info(\"Deployment %s found in ns=%s\", deployment.Name, namespace)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "isIstioServiceMeshInstalled", + "kind": "function", + "source": [ + "func isIstioServiceMeshInstalled(appClient appv1client.AppsV1Interface, allNs []string) bool {", + "\t// The Istio namespace must be present", + "\tif !stringhelper.StringInSlice(allNs, istioNamespace, false) {", + "\t\tlog.Info(\"Istio Service Mesh not present (the namespace %q does not exists)\", istioNamespace)", + "\t\treturn false", + "\t}", + "", + "\t// The Deployment \"istiod\" must be present in an active service mesh", + "\t_, err := appClient.Deployments(istioNamespace).Get(context.TODO(), istioDeploymentName, metav1.GetOptions{})", + "\tif errors.IsNotFound(err) {", + "\t\tlog.Warn(\"The Istio Deployment %q is missing (but the Istio namespace exists)\", istioDeploymentName)", + "\t\treturn false", + "\t} else if err != nil {", + "\t\tlog.Error(\"Failed getting Deployment %q\", istioDeploymentName)", + "\t\treturn false", + "\t}", + "", + "\tlog.Info(\"Istio Service Mesh detected\")", + "", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "isStatefulSetsMatchingAtLeastOneLabel", + "kind": "function", + "source": [ + "func isStatefulSetsMatchingAtLeastOneLabel(labels []labelObject, namespace string, statefulSet *appsv1.StatefulSet) bool {", + "\tfor _, aLabelObject := range labels {", + "\t\tlog.Debug(\"Searching pods in statefulset %q found in ns %q using label %s=%s\", statefulSet.Name, namespace, aLabelObject.LabelKey, aLabelObject.LabelValue)", + "\t\tif statefulSet.Spec.Template.Labels[aLabelObject.LabelKey] == aLabelObject.LabelValue {", + "\t\t\tlog.Info(\"StatefulSet %s found in ns=%s\", statefulSet.Name, namespace)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Startup", + "kind": "function", + "source": [ + "func Startup() {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Create an evaluator to filter test cases with labels", + "\tif err := checksdb.InitLabelsExprEvaluator(testParams.LabelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(testParams.OutputDir, testParams.LogLevel); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\t// Diagnostic functions will run when no labels are provided.", + "\tif testParams.LabelsFilter == noLabelsFilterExpr {", + "\t\tlog.Warn(\"The Best Practices Test Suite will run in diagnostic mode so no test case will be launched\")", + "\t}", + "", + "\t// Set clientsholder singleton with the filenames from the env vars.", + "\t_ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...)", + "\tLoadChecksDB(testParams.LabelsFilter)", + "", + "\tlog.Info(\"Certsuite Version: %v\", versions.GitVersion())", + "\tlog.Info(\"Claim Format Version: %s\", versions.ClaimFormatVersion)", + "\tlog.Info(\"Labels filter: %v\", testParams.LabelsFilter)", + "\tlog.Info(\"Log level: %s\", strings.ToUpper(testParams.LogLevel))", + "", + "\tcli.PrintBanner()", + "", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "\tfmt.Printf(\"Checks filter: %s\\n\", testParams.LabelsFilter)", + "\tfmt.Printf(\"Output folder: %s\\n\", testParams.OutputDir)", + "\tfmt.Printf(\"Log file: %s (level=%s)\\n\", log.LogFileName, testParams.LogLevel)", + "\tfmt.Printf(\"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "getK8sClientsConfigFileNames", + "kind": "function", + "source": [ + "func getK8sClientsConfigFileNames() []string {", + "\tparams := configuration.GetTestParameters()", + "\tfileNames := []string{}", + "\tif params.Kubeconfig != \"\" {", + "\t\t// Add the kubeconfig path", + "\t\tfileNames = append(fileNames, params.Kubeconfig)", + "\t}", + "\thomeDir := os.Getenv(\"HOME\")", + "\tif homeDir != \"\" {", + "\t\tkubeConfigFilePath := filepath.Join(homeDir, \".kube\", \"config\")", + "\t\t// Check if the kubeconfig path exists", + "\t\tif _, err := os.Stat(kubeConfigFilePath); err == nil {", + "\t\t\tlog.Info(\"kubeconfig path %s is present\", kubeConfigFilePath)", + "\t\t\t// Only add the kubeconfig to the list of paths if it exists, since it is not added by the user", + "\t\t\tfileNames = append(fileNames, kubeConfigFilePath)", + "\t\t} else {", + "\t\t\tlog.Info(\"kubeconfig path %s is not present\", kubeConfigFilePath)", + "\t\t}", + "\t}", + "", + "\treturn fileNames", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.RecordChecksResults", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RecordChecksResults() {", + "\tlog.Info(\"Recording checks results of group %s\", group.name)", + "\tfor _, check := range group.checks {", + "\t\trecordCheckResult(check)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.RunChecks", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RunChecks(stopChan \u003c-chan bool, abortChan chan string) (errs []error, failedChecks int) {", + "\tlog.Info(\"Running group %q checks.\", group.name)", + "\tfmt.Printf(\"Running suite %s\\n\", strings.ToUpper(group.name))", + "", + "\t// Get checks to run based on the label expr.", + "\tchecks := []*Check{}", + "\tfor _, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tskipCheck(check, \"no matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tchecks = append(checks, check)", + "\t}", + "", + "\tif len(checks) == 0 {", + "\t\treturn nil, 0", + "\t}", + "", + "\t// Run afterAllFn always, no matter previous panics/crashes.", + "\tdefer func() {", + "\t\tif err := runAfterAllFn(group, checks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "\t}()", + "", + "\tif err := runBeforeAllFn(group, checks); err != nil {", + "\t\terrs = append(errs, err)", + "\t\treturn errs, 0", + "\t}", + "", + "\tlog.Info(\"Checks to run: %d (group's total=%d)\", len(checks), len(group.checks))", + "\tgroup.currentRunningCheckIdx = 0", + "\tfor i, check := range checks {", + "\t\t// Fast stop in case the stop (abort/timeout) signal received.", + "\t\tselect {", + "\t\tcase \u003c-stopChan:", + "\t\t\treturn nil, 0", + "\t\tdefault:", + "\t\t}", + "", + "\t\t// Create a remainingChecks list excluding the current check.", + "\t\tremainingChecks := []*Check{}", + "\t\tif i+1 \u003c len(checks) {", + "\t\t\tremainingChecks = checks[i+1:]", + "\t\t}", + "", + "\t\tif err := runBeforeEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = []error{err}", + "\t\t}", + "", + "\t\tif len(errs) == 0 {", + "\t\t\t// Should we skip this check?", + "\t\t\tskip, reasons := shouldSkipCheck(check)", + "\t\t\tif skip {", + "\t\t\t\tskipCheck(check, strings.Join(reasons, \", \"))", + "\t\t\t} else {", + "\t\t\t\tcheck.SetAbortChan(abortChan) // Set the abort channel for the check.", + "\t\t\t\terr := runCheck(check, group, remainingChecks)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\terrs = append(errs, err)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// afterEach func must run even if the check was skipped or panicked/unexpected error.", + "\t\tif err := runAfterEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "", + "\t\t// Don't run more checks if any of beforeEach, the checkFn or afterEach functions errored/panicked.", + "\t\tif len(errs) \u003e 0 {", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Increment the failed checks counter.", + "\t\tif check.Result.String() == CheckResultFailed {", + "\t\t\tfailedChecks++", + "\t\t}", + "", + "\t\tgroup.currentRunningCheckIdx++", + "\t}", + "", + "\treturn errs, failedChecks", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "ClaimBuilder.Build", + "kind": "function", + "source": [ + "func (c *ClaimBuilder) Build(outputFile string) {", + "\tendTime := time.Now()", + "", + "\tc.claimRoot.Claim.Metadata.EndTime = endTime.UTC().Format(DateTimeFormatDirective)", + "\tc.claimRoot.Claim.Results = checksdb.GetReconciledResults()", + "", + "\t// Marshal the claim and output to file", + "\tpayload := MarshalClaimOutput(c.claimRoot)", + "\tWriteClaimOutput(outputFile, payload)", + "", + "\tlog.Info(\"Claim file created at %s\", outputFile)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "ClaimBuilder.ToJUnitXML", + "kind": "function", + "source": [ + "func (c *ClaimBuilder) ToJUnitXML(outputFile string, startTime, endTime time.Time) {", + "\t// Create the JUnit XML file from the claim output.", + "\txmlOutput := populateXMLFromClaim(*c.claimRoot.Claim, startTime, endTime)", + "", + "\t// Write the JUnit XML file.", + "\tpayload, err := xml.MarshalIndent(xmlOutput, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to generate the xml: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Writing JUnit XML file: %s\", outputFile)", + "\terr = os.WriteFile(outputFile, payload, claimFilePermissions)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to write the xml file\")", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "ReadClaimFile", + "kind": "function", + "source": [ + "func ReadClaimFile(claimFileName string) (data []byte, err error) {", + "\tdata, err = os.ReadFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadFile failed with err: %v\", err)", + "\t}", + "\tlog.Info(\"Reading claim file at path: %s\", claimFileName)", + "\treturn data, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "SanitizeClaimFile", + "kind": "function", + "source": [ + "func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error) {", + "\tlog.Info(\"Sanitizing claim file %s\", claimFileName)", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tvar aRoot claim.Root", + "\tUnmarshalClaim(data, \u0026aRoot)", + "", + "\t// Remove the results that do not match the labels filter", + "\tfor testID := range aRoot.Claim.Results {", + "\t\tevaluator, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to create labels expression evaluator: %v\", err)", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\t_, gatheredLabels := identifiers.GetTestIDAndLabels(*aRoot.Claim.Results[testID].TestID)", + "", + "\t\tif !evaluator.Eval(gatheredLabels) {", + "\t\t\tlog.Info(\"Removing test ID: %s from the claim\", testID)", + "\t\t\tdelete(aRoot.Claim.Results, testID)", + "\t\t}", + "\t}", + "", + "\tWriteClaimOutput(claimFileName, MarshalClaimOutput(\u0026aRoot))", + "\treturn claimFileName, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "WriteClaimOutput", + "kind": "function", + "source": [ + "func WriteClaimOutput(claimOutputFile string, payload []byte) {", + "\tlog.Info(\"Writing claim data to %s\", claimOutputFile)", + "\terr := os.WriteFile(claimOutputFile, payload, claimFilePermissions)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing claim data:\\n%s\", string(payload))", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "LoadConfiguration", + "kind": "function", + "source": [ + "func LoadConfiguration(filePath string) (TestConfiguration, error) {", + "\tif confLoaded {", + "\t\tlog.Debug(\"config file already loaded, return previous element\")", + "\t\treturn configuration, nil", + "\t}", + "", + "\tlog.Info(\"Loading config from file: %s\", filePath)", + "\tcontents, err := os.ReadFile(filePath)", + "\tif err != nil {", + "\t\treturn configuration, err", + "\t}", + "", + "\terr = yaml.Unmarshal(contents, \u0026configuration)", + "\tif err != nil {", + "\t\treturn configuration, err", + "\t}", + "", + "\t// Set default namespace for the probe daemonset pods, in case it was not set.", + "\tif configuration.ProbeDaemonSetNamespace == \"\" {", + "\t\tlog.Warn(\"No namespace configured for the probe daemonset. Defaulting to namespace %q\", defaultProbeDaemonSetNamespace)", + "\t\tconfiguration.ProbeDaemonSetNamespace = defaultProbeDaemonSetNamespace", + "\t} else {", + "\t\tlog.Info(\"Namespace for probe daemonset: %s\", configuration.ProbeDaemonSetNamespace)", + "\t}", + "", + "\tconfLoaded = true", + "\treturn configuration, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Container.SetPreflightResults", + "kind": "function", + "source": [ + "func (c *Container) SetPreflightResults(preflightImageCache map[string]PreflightResultsDB, env *TestEnvironment) error {", + "\tlog.Info(\"Running Preflight container test for container %q with image %q\", c, c.Image)", + "", + "\t// Short circuit if the image already exists in the cache", + "\tif _, exists := preflightImageCache[c.Image]; exists {", + "\t\tlog.Info(\"Container image %q exists in the cache. Skipping this run.\", c.Image)", + "\t\tc.PreflightResults = preflightImageCache[c.Image]", + "\t\treturn nil", + "\t}", + "", + "\topts := []plibContainer.Option{}", + "\topts = append(opts, plibContainer.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibContainer.WithInsecureConnection())", + "\t}", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibContainer.NewCheck(c.Image, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "", + "\t\tresults.TestedImage = c.Image", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the Preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\t// Store the Preflight test results into the container's PreflightResults var and into the cache.", + "\tresultsDB := GetPreflightResultsDB(\u0026results)", + "\tc.PreflightResults = resultsDB", + "\tpreflightImageCache[c.Image] = resultsDB", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "CrScale.IsScaleObjectReady", + "kind": "function", + "source": [ + "func (crScale CrScale) IsScaleObjectReady() bool {", + "\treplicas := (crScale.Spec.Replicas)", + "\tlog.Info(\"replicas is %d status replica is %d\", replicas, crScale.Status.Replicas)", + "\treturn crScale.Status.Replicas == replicas", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func GetCatalogSourceBundleCount(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// Now that we know the catalog source, we are going to count up all of the relatedImages", + "\t// that are associated with the catalog source. This will give us the number of bundles that", + "\t// are available in the catalog source.", + "", + "\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count", + "\tconst (", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn 0", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\treturn getCatalogSourceBundleCountFromProbeContainer(env, cs)", + "\t\t}", + "", + "\t\t// If we didn't find the bundle count via the probe container, we can attempt to use the package manifests", + "\t}", + "", + "\t// If we didn't find the bundle count via the probe container, we can use the package manifests", + "\t// to get the bundle count", + "\treturn getCatalogSourceBundleCountFromPackageManifests(env, cs)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "NewPod", + "kind": "function", + "source": [ + "func NewPod(aPod *corev1.Pod) (out Pod) {", + "\tvar err error", + "\tout.Pod = aPod", + "\tout.MultusNetworkInterfaces = make(map[string]CniNetworkInterface)", + "\tannotations := aPod.GetAnnotations()", + "\tnetStatus, exists := annotations[CniNetworksStatusKey]", + "\tif !exists || strings.TrimSpace(netStatus) == \"\" {", + "\t\t// Be graceful: log which annotations are present when the expected one is missing/empty", + "\t\tkeys := make([]string, 0, len(annotations))", + "\t\tfor k := range annotations {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "\t\tlog.Info(\"Pod %q (namespace %q) missing or empty annotation %q. Present annotations: %v\", aPod.Name, aPod.Namespace, CniNetworksStatusKey, keys)", + "\t} else {", + "\t\tout.MultusNetworkInterfaces, err = GetPodIPsPerNet(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get IPs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "", + "\t\tout.MultusPCIs, err = GetPciPerPod(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get PCIs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "\t}", + "", + "\tif _, ok := aPod.GetLabels()[skipConnectivityTestsLabel]; ok {", + "\t\tout.SkipNetTests = true", + "\t}", + "\tif _, ok := aPod.GetLabels()[skipMultusConnectivityTestsLabel]; ok {", + "\t\tout.SkipMultusNetTests = true", + "\t}", + "\tout.Containers = append(out.Containers, getPodContainers(aPod, false)...)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Operator.SetPreflightResults", + "kind": "function", + "source": [ + "func (op *Operator) SetPreflightResults(env *TestEnvironment) error {", + "\tif len(op.InstallPlans) == 0 {", + "\t\tlog.Warn(\"Operator %q has no InstallPlans. Skipping setting Preflight results\", op)", + "\t\treturn nil", + "\t}", + "", + "\tbundleImage := op.InstallPlans[0].BundleImage", + "\tindexImage := op.InstallPlans[0].IndexImage", + "\toc := clientsholder.GetClientsHolder()", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\topts := []plibOperator.Option{}", + "\topts = append(opts, plibOperator.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibOperator.WithInsecureConnection())", + "\t}", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibOperator.NewCheck(bundleImage, indexImage, oc.KubeConfig, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\te := os.RemoveAll(\"artifacts/\")", + "\tif e != nil {", + "\t\tlog.Fatal(\"%v\", e)", + "\t}", + "", + "\tlog.Info(\"Storing operator Preflight results into object for %q\", bundleImage)", + "\top.PreflightResults = GetPreflightResultsDB(\u0026results)", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "addOperandPodsToTestPods", + "kind": "function", + "source": [ + "func addOperandPodsToTestPods(operandPods []*Pod, env *TestEnvironment) {", + "\tfor _, operandPod := range operandPods {", + "\t\t// Check whether the pod was already discovered", + "\t\ttestPod := searchPodInSlice(operandPod.Name, operandPod.Namespace, env.Pods)", + "\t\tif testPod != nil {", + "\t\t\tlog.Info(\"Operand pod %v/%v already discovered.\", testPod.Namespace, testPod.Name)", + "\t\t\t// Make sure it's flagged as operand pod.", + "\t\t\ttestPod.IsOperand = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Operand pod %v/%v added to test pod list\", operandPod.Namespace, operandPod.Name)", + "\t\t\t// Append pod to the test pod list.", + "\t\t\tenv.Pods = append(env.Pods, operandPod)", + "\t\t}", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "addOperatorPodsToTestPods", + "kind": "function", + "source": [ + "func addOperatorPodsToTestPods(operatorPods []*Pod, env *TestEnvironment) {", + "\tfor _, operatorPod := range operatorPods {", + "\t\t// Check whether the pod was already discovered", + "\t\ttestPod := searchPodInSlice(operatorPod.Name, operatorPod.Namespace, env.Pods)", + "\t\tif testPod != nil {", + "\t\t\tlog.Info(\"Operator pod %v/%v already discovered.\", testPod.Namespace, testPod.Name)", + "\t\t\t// Make sure it's flagged as operator pod.", + "\t\t\ttestPod.IsOperator = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Operator pod %v/%v added to test pod list\", operatorPod.Namespace, operatorPod.Name)", + "\t\t\t// Append pod to the test pod list.", + "\t\t\tenv.Pods = append(env.Pods, operatorPod)", + "\t\t}", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createNodes", + "kind": "function", + "source": [ + "func createNodes(nodes []corev1.Node) map[string]Node {", + "\twrapperNodes := map[string]Node{}", + "", + "\t// machineConfigs is a helper map to avoid download \u0026 process the same mc twice.", + "\tmachineConfigs := map[string]MachineConfig{}", + "\tfor i := range nodes {", + "\t\tnode := \u0026nodes[i]", + "", + "\t\tif !IsOCPCluster() {", + "\t\t\t// Avoid getting Mc info for non ocp clusters.", + "\t\t\twrapperNodes[node.Name] = Node{Data: node}", + "\t\t\tlog.Warn(\"Non-OCP cluster detected. MachineConfig retrieval for node %q skipped.\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Get Node's machineConfig name", + "\t\tmcName, exists := node.Annotations[\"machineconfiguration.openshift.io/currentConfig\"]", + "\t\tif !exists {", + "\t\t\tlog.Error(\"Failed to get machineConfig name for node %q\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "\t\tlog.Info(\"Node %q - mc name %q\", node.Name, mcName)", + "\t\tmc, err := getMachineConfig(mcName, machineConfigs)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get machineConfig %q, err: %v\", mcName, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\twrapperNodes[node.Name] = Node{", + "\t\t\tData: node,", + "\t\t\tMc: mc,", + "\t\t}", + "\t}", + "", + "\treturn wrapperNodes", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createOperators", + "kind": "function", + "source": [ + "func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion,", + "\tallSubscriptions []olmv1Alpha.Subscription,", + "\tallPackageManifests []*olmpkgv1.PackageManifest,", + "\tallInstallPlans []*olmv1Alpha.InstallPlan,", + "\tallCatalogSources []*olmv1Alpha.CatalogSource,", + "\tsucceededRequired,", + "\tkeepCsvDetails bool) []*Operator {", + "\tconst (", + "\t\tmaxSize = 2", + "\t)", + "", + "\toperators := []*Operator{}", + "", + "\t// Make map with unique csv names to original index in the env.Csvs slice.", + "\t// Otherwise, cluster-wide operators info will be repeated unnecessarily.", + "\tuniqueCsvs := getUniqueCsvListByName(csvs)", + "", + "\tfor _, csv := range uniqueCsvs {", + "\t\t// Skip CSVs that are not in the Succeeded phase if the flag is set.", + "\t\tif csv.Status.Phase != olmv1Alpha.CSVPhaseSucceeded \u0026\u0026 succeededRequired {", + "\t\t\tcontinue", + "\t\t}", + "\t\top := \u0026Operator{Name: csv.Name, Namespace: csv.Namespace}", + "\t\tif keepCsvDetails {", + "\t\t\top.Csv = csv", + "\t\t}", + "\t\top.Phase = csv.Status.Phase", + "\t\tpackageAndVersion := strings.SplitN(csv.Name, \".\", maxSize)", + "\t\tif len(packageAndVersion) == 0 {", + "\t\t\tlog.Debug(\"Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v\", csv)", + "\t\t\tcontinue", + "\t\t}", + "\t\top.PackageFromCsvName = packageAndVersion[0]", + "\t\top.Version = csv.Spec.Version.String()", + "\t\t// Get at least one subscription and update the Operator object with it.", + "\t\tif getAtLeastOneSubscription(op, csv, allSubscriptions, allPackageManifests) {", + "\t\t\ttargetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to get target namespaces for operator %s: %v\", csv.Name, err)", + "\t\t\t} else {", + "\t\t\t\top.TargetNamespaces = targetNamespaces", + "\t\t\t\top.IsClusterWide = len(targetNamespaces) == 0", + "\t\t\t}", + "\t\t} else {", + "\t\t\tlog.Warn(\"Subscription not found for CSV: %s (ns %s)\", csv.Name, csv.Namespace)", + "\t\t}", + "\t\tlog.Info(\"Getting installplans for op %s (subs %s ns %s)\", op.Name, op.SubscriptionName, op.SubscriptionNamespace)", + "\t\t// Get at least one Install Plan and update the Operator object with it.", + "\t\tgetAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources)", + "\t\toperators = append(operators, op)", + "\t}", + "\treturn operators", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getCatalogSourceBundleCountFromProbeContainer", + "kind": "function", + "source": [ + "func getCatalogSourceBundleCountFromProbeContainer(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// We need to use the probe container to get the bundle count", + "\t// This is because the package manifests are not available in the cluster", + "\t// for OCP versions \u003c= 4.12", + "\to := clientsholder.GetClientsHolder()", + "", + "\t// Find the kubernetes service associated with the catalog source", + "\tfor _, svc := range env.AllServices {", + "\t\t// Skip if the service is not associated with the catalog source", + "\t\tif svc.Spec.Selector[\"olm.catalogSource\"] != cs.Name {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tlog.Info(\"Found service %q associated with catalog source %q.\", svc.Name, cs.Name)", + "", + "\t\t// Use a probe pod to get the bundle count", + "\t\tfor _, probePod := range env.ProbePods {", + "\t\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\t\tcmd := \"grpcurl -plaintext \" + svc.Spec.ClusterIP + \":50051 api.Registry.ListBundles | jq -s 'length'\"", + "\t\t\tcmdValue, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\t\t\tif err != nil || errStr != \"\" {", + "\t\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cmd, probePod.String())", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Sanitize the command output", + "\t\t\tcmdValue = strings.TrimSpace(cmdValue)", + "\t\t\tcmdValue = strings.Trim(cmdValue, \"\\\"\")", + "", + "\t\t\t// Parse the command output", + "\t\t\tbundleCount, err := strconv.Atoi(cmdValue)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to convert bundle count to integer: %s\", cmdValue)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Try each probe pod until we get a valid bundle count (which should only be 1 probe pod)", + "\t\t\tlog.Info(\"Found bundle count via grpcurl %d for catalog source %q.\", bundleCount, cs.Name)", + "\t\t\treturn bundleCount", + "\t\t}", + "\t}", + "", + "\tlog.Warn(\"Warning: No services found associated with catalog source %q.\", cs.Name)", + "\treturn -1", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getUniqueCsvListByName", + "kind": "function", + "source": [ + "func getUniqueCsvListByName(csvs []*olmv1Alpha.ClusterServiceVersion) []*olmv1Alpha.ClusterServiceVersion {", + "\tuniqueCsvsMap := map[string]*olmv1Alpha.ClusterServiceVersion{}", + "\tfor _, csv := range csvs {", + "\t\tuniqueCsvsMap[csv.Name] = csv", + "\t}", + "", + "\tuniqueCsvsList := []*olmv1Alpha.ClusterServiceVersion{}", + "\tlog.Info(\"Found %d unique CSVs\", len(uniqueCsvsMap))", + "\tfor name, csv := range uniqueCsvsMap {", + "\t\tlog.Info(\" CSV: %s\", name)", + "\t\tuniqueCsvsList = append(uniqueCsvsList, csv)", + "\t}", + "", + "\t// Sort by name: (1) creates a deterministic output, (2) makes UT easier.", + "\tsort.Slice(uniqueCsvsList, func(i, j int) bool { return uniqueCsvsList[i].Name \u003c uniqueCsvsList[j].Name })", + "\treturn uniqueCsvsList", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "isSkipHelmChart", + "kind": "function", + "source": [ + "func isSkipHelmChart(helmName string, skipHelmChartList []configuration.SkipHelmChartList) bool {", + "\tif len(skipHelmChartList) == 0 {", + "\t\treturn false", + "\t}", + "\tfor _, helm := range skipHelmChartList {", + "\t\tif helmName == helm.Name {", + "\t\t\tlog.Info(\"Helm chart with name %s was skipped\", helmName)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling", + "name": "GetProcessCPUScheduling", + "kind": "function", + "source": [ + "func GetProcessCPUScheduling(pid int, testContainer *provider.Container) (schedulePolicy string, schedulePriority int, err error) {", + "\tlog.Info(\"Checking the scheduling policy/priority in %v for pid=%d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"chrt -p %d\", pid)", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := crclient.GetNodeProbePodContext(testContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", 0, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\tstdout, stderr, err := ch.ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"command %q failed to run in probe pod %s (node %s): %v (stderr: %v)\",", + "\t\t\tcommand, ctx.GetPodName(), testContainer.NodeName, err, stderr)", + "\t}", + "", + "\tschedulePolicy, schedulePriority, err = parseSchedulingPolicyAndPriority(stdout)", + "\tif err != nil {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"error getting the scheduling policy and priority for %v : %v\", testContainer, err)", + "\t}", + "\tlog.Info(\"pid %d in %v has the cpu scheduling policy %s, scheduling priority %d\", pid, testContainer, schedulePolicy, schedulePriority)", + "", + "\treturn schedulePolicy, schedulePriority, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "CordonHelper", + "kind": "function", + "source": [ + "func CordonHelper(name, operation string) error {", + "\tclients := clientsholder.GetClientsHolder()", + "", + "\tlog.Info(\"Performing %s operation on node %s\", operation, name)", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Fetch node object", + "\t\tnode, err := clients.K8sClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t\tswitch operation {", + "\t\tcase Cordon:", + "\t\t\tnode.Spec.Unschedulable = true", + "\t\tcase Uncordon:", + "\t\t\tnode.Spec.Unschedulable = false", + "\t\tdefault:", + "\t\t\treturn fmt.Errorf(\"cordonHelper: Unsupported operation:%s\", operation)", + "\t\t}", + "\t\t// Update the node", + "\t\t_, err = clients.K8sClient.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})", + "\t\treturn err", + "\t})", + "\tif retryErr != nil {", + "\t\tlog.Error(\"can not %s node: %s, err=%v\", operation, name, retryErr)", + "\t}", + "\treturn retryErr", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "waitPodDeleted", + "kind": "function", + "source": [ + "func waitPodDeleted(ns, podName string, timeout int64, watcher watch.Interface) {", + "\tlog.Debug(\"Entering waitPodDeleted ns=%s pod=%s\", ns, podName)", + "\tdefer watcher.Stop()", + "", + "\tfor {", + "\t\tselect {", + "\t\tcase event := \u003c-watcher.ResultChan():", + "\t\t\tif event.Type == watch.Deleted || event.Type == \"\" {", + "\t\t\t\tlog.Debug(\"ns=%s pod=%s deleted\", ns, podName)", + "\t\t\t\treturn", + "\t\t\t}", + "\t\tcase \u003c-time.After(time.Duration(timeout) * time.Second):", + "\t\t\tlog.Info(\"watch for pod deletion timedout after %d seconds\", timeout)", + "\t\t\treturn", + "\t\t}", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func testOperatorCatalogSourceBundleCount(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tconst (", + "\t\tbundleCountLimit = 1000", + "", + "\t\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count.", + "\t\t// This means we cannot use the package manifests to skip based on channel.", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\tocp412Skip := false", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\tcheck.LogError(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003c= 4.12.\")", + "\t\t\tocp412Skip = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003e 4.12.\")", + "\t\t}", + "\t}", + "", + "\tbundleCountLimitStr := strconv.Itoa(bundleCountLimit)", + "", + "\t// Loop through all labeled operators and check if they have more than 1000 referenced images.", + "\tvar catalogsAlreadyReported []string", + "\tfor _, op := range env.Operators {", + "\t\tcatalogSourceCheckComplete := false", + "\t\tcheck.LogInfo(\"Checking bundle count for operator %q\", op.Csv.Name)", + "", + "\t\t// Search through packagemanifests to match the name of the CSV.", + "\t\tfor _, pm := range env.AllPackageManifests {", + "\t\t\t// Skip package manifests based on channel entries.", + "\t\t\t// Note: This only works for OCP versions \u003e 4.12 due to channel entries existence.", + "\t\t\tif !ocp412Skip \u0026\u0026 catalogsource.SkipPMBasedOnChannel(pm.Status.Channels, op.Csv.Name) {", + "\t\t\t\tlog.Debug(\"Skipping package manifest %q based on channel\", pm.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Search through all catalog sources to match the name and namespace of the package manifest.", + "\t\t\tfor _, catalogSource := range env.AllCatalogSources {", + "\t\t\t\tif catalogSource.Name != pm.Status.CatalogSource || catalogSource.Namespace != pm.Status.CatalogSourceNamespace {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on name or namespace\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the catalog source has already been reported, skip it.", + "\t\t\t\tif stringhelper.StringInSlice(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace, false) {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on already reported\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Found matching catalog source %q for operator %q\", catalogSource.Name, op.Csv.Name)", + "", + "\t\t\t\t// The name and namespace match. Lookup the bundle count.", + "\t\t\t\tbundleCount := provider.GetCatalogSourceBundleCount(env, catalogSource)", + "", + "\t\t\t\tif bundleCount == -1 {", + "\t\t\t\t\tcheck.LogError(\"Failed to get bundle count for CatalogSource %q\", catalogSource.Name)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"Failed to get bundle count\", false))", + "\t\t\t\t} else {", + "\t\t\t\t\tif bundleCount \u003e bundleCountLimit {", + "\t\t\t\t\t\tcheck.LogError(\"CatalogSource %q has more than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has more than \"+bundleCountLimitStr+\" referenced images\", false))", + "\t\t\t\t\t} else {", + "\t\t\t\t\t\tcheck.LogInfo(\"CatalogSource %q has less than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has less than \"+bundleCountLimitStr+\" referenced images\", true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Adding catalog source %q to list of already reported\", catalogSource.Name)", + "\t\t\t\tcatalogsAlreadyReported = append(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace)", + "\t\t\t\t// Signal that the catalog source check is complete.", + "\t\t\t\tcatalogSourceCheckComplete = true", + "\t\t\t\tbreak", + "\t\t\t}", + "", + "\t\t\t// If the catalog source check is complete, break out of the loop.", + "\t\t\tif catalogSourceCheckComplete {", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/clusteroperator", + "name": "IsClusterOperatorAvailable", + "kind": "function", + "source": [ + "func IsClusterOperatorAvailable(co *configv1.ClusterOperator) bool {", + "\t// Loop through the conditions, looking for the 'Available' state.", + "\tfor _, condition := range co.Status.Conditions {", + "\t\tif condition.Type == configv1.OperatorAvailable {", + "\t\t\tlog.Info(\"ClusterOperator %q is in an 'Available' state\", co.Name)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"ClusterOperator %q is not in an 'Available' state\", co.Name)", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "NewTester", + "kind": "function", + "source": [ + "func NewTester(node *provider.Node, probePod *corev1.Pod, commander clientsholder.Command) (*Tester, error) {", + "\ttester := \u0026Tester{", + "\t\tnode: node,", + "\t\tcommander: commander,", + "\t\tcontext: clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name),", + "\t}", + "", + "\tlog.Info(\"Getting node %s numa's hugepages values.\", node.Data.Name)", + "\tvar err error", + "\ttester.nodeHugepagesByNuma, err = tester.getNodeNumaHugePages()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to get node hugepages, err: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Parsing machineconfig's kernelArguments and systemd's hugepages units.\")", + "\ttester.mcSystemdHugepagesByNuma, err = getMcSystemdUnitsHugepagesConfig(\u0026tester.node.Mc)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get MC systemd hugepages config, err: %v\", err)", + "\t}", + "", + "\treturn tester, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "Tester.Run", + "kind": "function", + "source": [ + "func (tester *Tester) Run() error {", + "\tif tester.HasMcSystemdHugepagesUnits() {", + "\t\tlog.Info(\"Comparing MachineConfig Systemd hugepages info against node values.\")", + "\t\tif pass, err := tester.TestNodeHugepagesWithMcSystemd(); !pass {", + "\t\t\treturn fmt.Errorf(\"failed to compare machineConfig systemd's unit hugepages config with node values, err: %v\", err)", + "\t\t}", + "\t} else {", + "\t\tlog.Info(\"Comparing MC KernelArguments hugepages info against node values.\")", + "\t\tif pass, err := tester.TestNodeHugepagesWithKernelArgs(); !pass {", + "\t\t\treturn fmt.Errorf(\"failed to compare machineConfig KernelArguments with node ones, err: %v\", err)", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "Tester.TestNodeHugepagesWithKernelArgs", + "kind": "function", + "source": [ + "func (tester *Tester) TestNodeHugepagesWithKernelArgs() (bool, error) {", + "\tkernelArgsHpCountBySize, _ := getMcHugepagesFromMcKernelArguments(\u0026tester.node.Mc)", + "", + "\t// First, check that all the actual hp sizes across all numas exist in the kernelArguments.", + "\tfor nodeNumaIdx, nodeCountBySize := range tester.nodeHugepagesByNuma {", + "\t\tfor nodeSize, nodeCount := range nodeCountBySize {", + "\t\t\tif _, sizeExistsInKernelArgs := kernelArgsHpCountBySize[nodeSize]; !sizeExistsInKernelArgs \u0026\u0026 nodeCount != 0 {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa %d hugepages size=%d does not appear in kernelArgs, but the count is not zero (%d)\",", + "\t\t\t\t\tnodeNumaIdx, nodeSize, nodeCount)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// kernelArguments don't have numa info, so we'll add up all numa's hp count", + "\t// for the same size and it should match the values in the kernelArgs.", + "\tfor kernelSize, kernelCount := range kernelArgsHpCountBySize {", + "\t\ttotal := 0", + "\t\tfor numaIdx, numaCountBySize := range tester.nodeHugepagesByNuma {", + "\t\t\tnodeCount, sizeExistsInNode := numaCountBySize[kernelSize]", + "\t\t\tif !sizeExistsInNode {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa %d has no hugepages of kernelArgs' size %d\", numaIdx, kernelSize)", + "\t\t\t}", + "\t\t\ttotal += nodeCount", + "\t\t}", + "", + "\t\tif total == kernelCount {", + "\t\t\tlog.Info(\"kernelArguments' hugepages count:%d, size:%d match total node ones for that size.\", kernelCount, kernelSize)", + "\t\t} else {", + "\t\t\treturn false, fmt.Errorf(\"total hugepages of size %d will not match (node count=%d, expected=%d)\", kernelSize, total, kernelCount)", + "\t\t}", + "\t}", + "", + "\treturn true, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "Tester.getNodeNumaHugePages", + "kind": "function", + "source": [ + "func (tester *Tester) getNodeNumaHugePages() (hugepages hugepagesByNuma, err error) {", + "\t// This command must run inside the node, so we'll need the node's context to run commands inside the probe daemonset pod.", + "\tstdout, stderr, err := tester.commander.ExecCommandContainer(tester.context, cmd)", + "\tlog.Debug(\"getNodeNumaHugePages stdout: %s, stderr: %s\", stdout, stderr)", + "\tif err != nil {", + "\t\treturn hugepagesByNuma{}, err", + "\t}", + "\tif stderr != \"\" {", + "\t\treturn hugepagesByNuma{}, errors.New(stderr)", + "\t}", + "", + "\thugepages = hugepagesByNuma{}", + "\tr := regexp.MustCompile(outputRegex)", + "\tfor _, line := range strings.Split(stdout, \"\\n\") {", + "\t\tif line == \"\" {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tvalues := r.FindStringSubmatch(line)", + "\t\tif len(values) != numRegexFields {", + "\t\t\treturn hugepagesByNuma{}, fmt.Errorf(\"failed to parse node's numa hugepages output line:%s (stdout: %s)\", line, stdout)", + "\t\t}", + "", + "\t\tnumaNode, _ := strconv.Atoi(values[1])", + "\t\thpSize, _ := strconv.Atoi(values[2])", + "\t\thpCount, _ := strconv.Atoi(values[3])", + "", + "\t\tif sizeCounts, exists := hugepages[numaNode]; exists {", + "\t\t\tsizeCounts[hpSize] = hpCount", + "\t\t} else {", + "\t\t\thugepages[numaNode] = countBySize{hpSize: hpCount}", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Node %s hugepages: %s\", tester.node.Data.Name, hugepages)", + "\treturn hugepages, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "getMcSystemdUnitsHugepagesConfig", + "kind": "function", + "source": [ + "func getMcSystemdUnitsHugepagesConfig(mc *provider.MachineConfig) (hugepages hugepagesByNuma, err error) {", + "\tconst UnitContentsRegexMatchLen = 4", + "\thugepages = hugepagesByNuma{}", + "", + "\tr := regexp.MustCompile(`(?ms)HUGEPAGES_COUNT=(\\d+).*HUGEPAGES_SIZE=(\\d+).*NUMA_NODE=(\\d+)`)", + "\tfor _, unit := range mc.Config.Systemd.Units {", + "\t\tunit.Name = strings.Trim(unit.Name, \"\\\"\")", + "\t\tif !strings.Contains(unit.Name, \"hugepages-allocation\") {", + "\t\t\tcontinue", + "\t\t}", + "\t\tlog.Info(\"Systemd Unit with hugepages info -\u003e name: %s, contents: %s\", unit.Name, unit.Contents)", + "\t\tunit.Contents = strings.Trim(unit.Contents, \"\\\"\")", + "\t\tvalues := r.FindStringSubmatch(unit.Contents)", + "\t\tif len(values) \u003c UnitContentsRegexMatchLen {", + "\t\t\treturn hugepagesByNuma{}, fmt.Errorf(\"unable to get hugepages values from mc (contents=%s)\", unit.Contents)", + "\t\t}", + "", + "\t\tnumaNode, _ := strconv.Atoi(values[3])", + "\t\thpSize, _ := strconv.Atoi(values[2])", + "\t\thpCount, _ := strconv.Atoi(values[1])", + "", + "\t\tif sizeCounts, exists := hugepages[numaNode]; exists {", + "\t\t\tsizeCounts[hpSize] = hpCount", + "\t\t} else {", + "\t\t\thugepages[numaNode] = countBySize{hpSize: hpCount}", + "\t\t}", + "\t}", + "", + "\tif len(hugepages) \u003e 0 {", + "\t\tlog.Info(\"Machineconfig's systemd.units hugepages: %v\", hugepages)", + "\t} else {", + "\t\tlog.Info(\"No hugepages found in machineconfig system.units\")", + "\t}", + "", + "\treturn hugepages, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "logMcKernelArgumentsHugepages", + "kind": "function", + "source": [ + "func logMcKernelArgumentsHugepages(hugepagesPerSize map[int]int, defhugepagesz int) {", + "\tvar sb strings.Builder", + "\tsb.WriteString(fmt.Sprintf(\"MC KernelArguments hugepages config: default_hugepagesz=%d-kB\", defhugepagesz))", + "\tfor size, count := range hugepagesPerSize {", + "\t\tsb.WriteString(fmt.Sprintf(\", size=%dkB - count=%d\", size, count))", + "\t}", + "\tlog.Info(\"%s\", sb.String())", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/isredhat", + "name": "BaseImageInfo.TestContainerIsRedHatRelease", + "kind": "function", + "source": [ + "func (b *BaseImageInfo) TestContainerIsRedHatRelease() (bool, error) {", + "\toutput, err := b.runCommand(`if [ -e /etc/redhat-release ]; then cat /etc/redhat-release; else echo \\\"Unknown Base Image\\\"; fi`)", + "\tlog.Info(\"Output from /etc/redhat-release: %q\", output)", + "\tif err != nil {", + "\t\treturn false, err", + "\t}", + "\treturn IsRHEL(output), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/isredhat", + "name": "IsRHEL", + "kind": "function", + "source": [ + "func IsRHEL(output string) bool {", + "\t// If the 'Unknown Base Image' string appears, return false.", + "\tnotRedHatRegex := regexp.MustCompile(NotRedHatBasedRegex)", + "\tmatchNotRedhat := notRedHatRegex.FindAllString(output, -1)", + "\tif len(matchNotRedhat) \u003e 0 {", + "\t\treturn false", + "\t}", + "", + "\t// /etc/redhat-release exists. check if it matches the regex for an official build.", + "\tlog.Info(\"redhat-release was found to be: %s\", output)", + "\tredHatVersionRegex := regexp.MustCompile(VersionRegex)", + "\tmatchVersion := redHatVersionRegex.FindAllString(output, -1)", + "\treturn len(matchVersion) \u003e 0", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Running %s suite checks\", common.PreflightTestKey)", + "", + "\t// As the preflight lib's checks need to run here, we need to get the test environment now.", + "\tenv = provider.GetTestEnvironment()", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PreflightTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\ttestPreflightContainers(checksGroup, \u0026env)", + "\tif provider.IsOCPCluster() {", + "\t\tlog.Info(\"OCP cluster detected, allowing Preflight operator tests to run\")", + "\t\ttestPreflightOperators(checksGroup, \u0026env)", + "\t} else {", + "\t\tlog.Info(\"Skipping the Preflight operators test because it requires an OCP cluster to run against\")", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "testPreflightContainers", + "kind": "function", + "source": [ + "func testPreflightContainers(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) {", + "\t// Using a cache to prevent unnecessary processing of images if we already have the results available", + "\tpreflightImageCache := make(map[string]provider.PreflightResultsDB)", + "", + "\t// Loop through all of the containers, run preflight, and set their results into their respective objects", + "\tfor _, cut := range env.Containers {", + "\t\terr := cut.SetPreflightResults(preflightImageCache, env)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed running Preflight on image %q, err: %v\", cut.Image, err)", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed running Preflight container tests for %d containers\", len(env.Containers))", + "", + "\t// Handle Container-based preflight tests", + "\t// Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables.", + "\tfor testName, testEntry := range getUniqueTestEntriesFromContainerResults(env.Containers) {", + "\t\tlog.Info(\"Setting Preflight container test results for %q\", testName)", + "\t\tgeneratePreflightContainerCnfCertTest(checksGroup, testName, testEntry.Description, testEntry.Remediation, env.Containers)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "testPreflightOperators", + "kind": "function", + "source": [ + "func testPreflightOperators(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) {", + "\t// Loop through all of the operators, run preflight, and set their results into their respective object", + "\tfor _, op := range env.Operators {", + "\t\t// Note: We are not using a cache here for the operator bundle images because", + "\t\t// in-general you are only going to have an operator installed once in a cluster.", + "\t\terr := op.SetPreflightResults(env)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed running Preflight on operator %q, err: %v\", op.Name, err)", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed running Preflight operator tests for %d operators\", len(env.Operators))", + "", + "\t// Handle Operator-based preflight tests", + "\t// Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables.", + "\tfor testName, testEntry := range getUniqueTestEntriesFromOperatorResults(env.Operators) {", + "\t\tlog.Info(\"Setting Preflight operator test results for %q\", testName)", + "\t\tgeneratePreflightOperatorCnfCertTest(checksGroup, testName, testEntry.Description, testEntry.Remediation, env.Operators)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "StartServer", + "kind": "function", + "source": [ + "func StartServer(outputFolder string) {", + "\tctx := context.TODO()", + "\tserver := \u0026http.Server{", + "\t\tAddr: \":8084\", // Server address", + "\t\tReadTimeout: readTimeoutSeconds * time.Second, // Maximum duration for reading the entire request", + "\t\tBaseContext: func(l net.Listener) context.Context {", + "\t\t\tctx = context.WithValue(ctx, outputFolderCtxKey, outputFolder)", + "\t\t\treturn ctx", + "\t\t},", + "\t}", + "", + "\tinstallReqHandlers()", + "", + "\thttp.HandleFunc(\"/runFunction\", runHandler)", + "", + "\tlog.Info(\"Server is running on :8084...\")", + "\tif err := server.ListenAndServe(); err != nil {", + "\t\tpanic(err)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "logStreamHandler", + "kind": "function", + "source": [ + "func logStreamHandler(w http.ResponseWriter, r *http.Request) {", + "\tconn, err := upgrader.Upgrade(w, r, nil)", + "\tif err != nil {", + "\t\tlog.Info(\"WebSocket upgrade error: %v\", err)", + "\t\treturn", + "\t}", + "\tdefer conn.Close()", + "\t// Create a scanner to read the log file line by line", + "\tfor {", + "\t\tscanner := bufio.NewScanner(buf)", + "\t\tfor scanner.Scan() {", + "\t\t\tline := scanner.Bytes()", + "\t\t\tfmt.Println(string(line))", + "\t\t\tline = append(ansihtml.ConvertToHTML(line), []byte(\"\u003cbr\u003e\")...)", + "", + "\t\t\t// Send each log line to the client", + "\t\t\tif err := conn.WriteMessage(websocket.TextMessage, line); err != nil {", + "\t\t\t\tfmt.Println(err)", + "\t\t\t\treturn", + "\t\t\t}", + "\t\t\ttime.Sleep(logTimeout)", + "\t\t}", + "\t\tif err := scanner.Err(); err != nil {", + "\t\t\tlog.Info(\"Error reading log file: %v\", err)", + "\t\t\treturn", + "\t\t}", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "runHandler", + "kind": "function", + "source": [ + "func runHandler(w http.ResponseWriter, r *http.Request) {", + "\tbuf = bytes.NewBufferString(\"\")", + "\t// The log output will be written to the log file and to this buffer buf", + "\tlog.SetLogger(log.GetMultiLogger(buf))", + "", + "\tjsonData := r.FormValue(\"jsonData\") // \"jsonData\" is the name of the JSON input field", + "\tvar data RequestedData", + "\tif err := json.Unmarshal([]byte(jsonData), \u0026data); err != nil {", + "\t\tfmt.Println(\"Error:\", err)", + "\t}", + "\tflattenedOptions := data.SelectedOptions", + "", + "\t// Get the file from the request", + "\tfile, fileHeader, err := r.FormFile(\"kubeConfigPath\") // \"fileInput\" is the name of the file input field", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to retrieve file from form\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "\tdefer file.Close()", + "", + "\tlog.Info(\"Kubeconfig file name received: %s\", fileHeader.Filename)", + "\tkubeconfigTempFile, err := os.CreateTemp(\"\", \"webserver-kubeconfig-*\")", + "\tif err != nil {", + "\t\thttp.Error(w, \"Failed to create temp file to store the kubeconfig content.\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "", + "\tdefer func() {", + "\t\tlog.Info(\"Removing temporary kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\terr = os.Remove(kubeconfigTempFile.Name())", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to remove temp kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\t}", + "\t}()", + "", + "\t_, err = io.Copy(kubeconfigTempFile, file)", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to copy file\", http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t_ = kubeconfigTempFile.Close()", + "", + "\tlog.Info(\"Web Server kubeconfig file : %v (copied into %v)\", fileHeader.Filename, kubeconfigTempFile.Name())", + "\tlog.Info(\"Web Server Labels filter : %v\", flattenedOptions)", + "", + "\ttnfConfig, err := os.ReadFile(\"config/certsuite_config.yml\")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error reading YAML file: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t}", + "", + "\tnewData := updateTnf(tnfConfig, \u0026data)", + "", + "\t// Write the modified YAML data back to the file", + "\tvar filePerm fs.FileMode = 0o644 // owner can read/write, group and others can only read", + "\terr = os.WriteFile(\"config/certsuite_config.yml\", newData, filePerm)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing YAML file: %v\", err)", + "\t}", + "\tlabelsFilter := strings.Join(flattenedOptions, \",\")", + "", + "\t_ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name())", + "\tcertsuite.LoadChecksDB(labelsFilter)", + "", + "\toutputFolder := r.Context().Value(outputFolderCtxKey).(string)", + "", + "\tif err := checksdb.InitLabelsExprEvaluator(labelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(outputFolder, \"debug\"); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tlog.Info(\"Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s\", labelsFilter, outputFolder)", + "\terr = certsuite.Run(labelsFilter, outputFolder)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to run CNF Cert Suite: %v\", err)", + "\t}", + "", + "\t// Return the result as JSON", + "\tresponse := struct {", + "\t\tMessage string `json:\"Message\"`", + "\t}{", + "\t\tMessage: fmt.Sprintf(\"Succeeded to run %s\", strings.Join(flattenedOptions, \" \")),", + "\t}", + "\t// Serialize the response data to JSON", + "\tjsonResponse, err := json.Marshal(response)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to marshal jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t// Set the Content-Type header to specify that the response is JSON", + "\tw.Header().Set(\"Content-Type\", \"application/json\")", + "\t// Write the JSON response to the client", + "\tlog.Info(\"Sending web response: %v\", response)", + "\t_, err = w.Write(jsonResponse)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to write jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Warn", + "qualifiedName": "Logger.Warn", + "exported": true, + "receiver": "Logger", + "signature": "func(string, ...any)()", + "doc": "Logger.Warn Logs a warning message with optional formatting\n\nThis method takes a format string and an arbitrary number of arguments,\npasses them to the underlying Logf function along with the warning level\nconstant. It records the warning using the logger's handler if the warning\nlevel is enabled for the current context. The call does not return any value.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:257", + "calls": [ + { + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (logger *Logger) Warn(msg string, args ...any) {", + "\tLogf(logger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "With", + "qualifiedName": "Logger.With", + "exported": true, + "receiver": "Logger", + "signature": "func(...any)(*Logger)", + "doc": "Logger.With Creates a child logger with added contextual fields\n\nThe method accepts any number of key-value pairs or structured arguments and\nforwards them to the underlying logger’s With function. It constructs a new\nLogger instance that preserves the original logger while extending its\ncontext, allowing subsequent log entries to include these additional fields.\nThe returned logger can be used independently for further logging calls.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:291", + "calls": [ + { + "name": "Logger.With", + "kind": "function", + "source": [ + "func (logger *Logger) With(args ...any) *Logger {", + "\treturn \u0026Logger{", + "\t\tl: logger.l.With(args...),", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.With", + "kind": "function", + "source": [ + "func (logger *Logger) With(args ...any) *Logger {", + "\treturn \u0026Logger{", + "\t\tl: logger.l.With(args...),", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (logger *Logger) With(args ...any) *Logger {", + "\treturn \u0026Logger{", + "\t\tl: logger.l.With(args...),", + "\t}", + "}" + ] + }, + { + "name": "Enabled", + "qualifiedName": "MultiHandler.Enabled", + "exported": true, + "receiver": "MultiHandler", + "signature": "func(context.Context, slog.Level)(bool)", + "doc": "MultiHandler.Enabled True when any contained handler accepts the log level\n\nThe method iterates over all handlers stored in the MultiHandler and queries\neach one to see if it would handle messages at the specified level. If any\nhandler reports enabled, the function immediately returns true; otherwise it\nreturns false after checking all handlers.", + "position": "/Users/deliedit/dev/certsuite/internal/log/multi_handler.go:39", + "calls": [ + { + "name": "MultiHandler.Enabled", + "kind": "function", + "source": [ + "func (h *MultiHandler) Enabled(ctx context.Context, level slog.Level) bool {", + "\tfor i := range h.handlers {", + "\t\tif h.handlers[i].Enabled(ctx, level) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "MultiHandler.Enabled", + "kind": "function", + "source": [ + "func (h *MultiHandler) Enabled(ctx context.Context, level slog.Level) bool {", + "\tfor i := range h.handlers {", + "\t\tif h.handlers[i].Enabled(ctx, level) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (h *MultiHandler) Enabled(ctx context.Context, level slog.Level) bool {", + "\tfor i := range h.handlers {", + "\t\tif h.handlers[i].Enabled(ctx, level) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "Handle", + "qualifiedName": "MultiHandler.Handle", + "exported": true, + "receiver": "MultiHandler", + "signature": "func(context.Context, slog.Record)(error)", + "doc": "MultiHandler.Handle distributes a log record to all registered handlers\n\nThe method iterates over each handler stored in the MultiHandler, cloning the\nincoming record before passing it to ensure isolation between handlers. If\nany handler returns an error, that error is immediately returned and no\nfurther handlers are invoked. When all handlers succeed, the method completes\nwithout error.\n\nnolint:gocritic", + "position": "/Users/deliedit/dev/certsuite/internal/log/multi_handler.go:58", + "calls": [ + { + "name": "MultiHandler.Handle", + "kind": "function", + "source": [ + "func (h *MultiHandler) Handle(ctx context.Context, r slog.Record) error {", + "\tfor i := range h.handlers {", + "\t\tif err := h.handlers[i].Handle(ctx, r.Clone()); err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "Clone", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "MultiHandler.Handle", + "kind": "function", + "source": [ + "func (h *MultiHandler) Handle(ctx context.Context, r slog.Record) error {", + "\tfor i := range h.handlers {", + "\t\tif err := h.handlers[i].Handle(ctx, r.Clone()); err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (h *MultiHandler) Handle(ctx context.Context, r slog.Record) error {", + "\tfor i := range h.handlers {", + "\t\tif err := h.handlers[i].Handle(ctx, r.Clone()); err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "WithAttrs", + "qualifiedName": "MultiHandler.WithAttrs", + "exported": true, + "receiver": "MultiHandler", + "signature": "func([]slog.Attr)(slog.Handler)", + "doc": "MultiHandler.WithAttrs creates a new handler that adds attributes to all sub-handlers\n\nThis method iterates over each contained handler, invoking its\nattribute-adding function with the supplied slice of attributes. It collects\nthe resulting handlers into a new slice and constructs a fresh multi-handler\nfrom them. The returned handler behaves like the original but ensures every\nlog record includes the provided attributes.", + "position": "/Users/deliedit/dev/certsuite/internal/log/multi_handler.go:75", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "MultiHandler.WithAttrs", + "kind": "function", + "source": [ + "func (h *MultiHandler) WithAttrs(attrs []slog.Attr) slog.Handler {", + "\thandlersWithAttrs := make([]slog.Handler, len(h.handlers))", + "\tfor i := range h.handlers {", + "\t\thandlersWithAttrs[i] = h.handlers[i].WithAttrs(attrs)", + "\t}", + "\treturn NewMultiHandler(handlersWithAttrs...)", + "}" + ] + }, + { + "name": "NewMultiHandler", + "kind": "function", + "source": [ + "func NewMultiHandler(handlers ...slog.Handler) *MultiHandler {", + "\treturn \u0026MultiHandler{", + "\t\thandlers: handlers,", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "MultiHandler.WithAttrs", + "kind": "function", + "source": [ + "func (h *MultiHandler) WithAttrs(attrs []slog.Attr) slog.Handler {", + "\thandlersWithAttrs := make([]slog.Handler, len(h.handlers))", + "\tfor i := range h.handlers {", + "\t\thandlersWithAttrs[i] = h.handlers[i].WithAttrs(attrs)", + "\t}", + "\treturn NewMultiHandler(handlersWithAttrs...)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (h *MultiHandler) WithAttrs(attrs []slog.Attr) slog.Handler {", + "\thandlersWithAttrs := make([]slog.Handler, len(h.handlers))", + "\tfor i := range h.handlers {", + "\t\thandlersWithAttrs[i] = h.handlers[i].WithAttrs(attrs)", + "\t}", + "\treturn NewMultiHandler(handlersWithAttrs...)", + "}" + ] + }, + { + "name": "WithGroup", + "qualifiedName": "MultiHandler.WithGroup", + "exported": true, + "receiver": "MultiHandler", + "signature": "func(string)(slog.Handler)", + "doc": "MultiHandler.WithGroup Adds a named group to all underlying handlers\n\nThis method creates a new slice of slog.Handler by iterating over the\nexisting handlers and invoking each one's WithGroup method with the provided\nname. The resulting handlers are then wrapped into a new MultiHandler\ninstance, which is returned as a slog.Handler. This allows grouping log\nentries consistently across multiple output destinations.", + "position": "/Users/deliedit/dev/certsuite/internal/log/multi_handler.go:90", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "MultiHandler.WithGroup", + "kind": "function", + "source": [ + "func (h *MultiHandler) WithGroup(name string) slog.Handler {", + "\thandlersWithGroup := make([]slog.Handler, len(h.handlers))", + "\tfor i := range h.handlers {", + "\t\thandlersWithGroup[i] = h.handlers[i].WithGroup(name)", + "\t}", + "\treturn NewMultiHandler(handlersWithGroup...)", + "}" + ] + }, + { + "name": "NewMultiHandler", + "kind": "function", + "source": [ + "func NewMultiHandler(handlers ...slog.Handler) *MultiHandler {", + "\treturn \u0026MultiHandler{", + "\t\thandlers: handlers,", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "MultiHandler.WithGroup", + "kind": "function", + "source": [ + "func (h *MultiHandler) WithGroup(name string) slog.Handler {", + "\thandlersWithGroup := make([]slog.Handler, len(h.handlers))", + "\tfor i := range h.handlers {", + "\t\thandlersWithGroup[i] = h.handlers[i].WithGroup(name)", + "\t}", + "\treturn NewMultiHandler(handlersWithGroup...)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (h *MultiHandler) WithGroup(name string) slog.Handler {", + "\thandlersWithGroup := make([]slog.Handler, len(h.handlers))", + "\tfor i := range h.handlers {", + "\t\thandlersWithGroup[i] = h.handlers[i].WithGroup(name)", + "\t}", + "\treturn NewMultiHandler(handlersWithGroup...)", + "}" + ] + }, + { + "name": "NewCustomHandler", + "qualifiedName": "NewCustomHandler", + "exported": true, + "signature": "func(io.Writer, *slog.HandlerOptions)(*CustomHandler)", + "doc": "NewCustomHandler Creates a thread‑safe log handler that writes to an io.Writer\n\nThis function constructs a CustomHandler with the supplied writer and\noptional slog.HandlerOptions. If options are nil or lack a level, it defaults\nto slog.LevelInfo. The resulting handler can be used by other components to\nemit structured logs in a concurrency‑safe manner.", + "position": "/Users/deliedit/dev/certsuite/internal/log/custom_handler.go:41", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "GetMultiLogger", + "kind": "function", + "source": [ + "func GetMultiLogger(writers ...io.Writer) *Logger {", + "\topts := slog.HandlerOptions{", + "\t\tLevel: globalLogLevel,", + "\t\tReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {", + "\t\t\tif a.Key == slog.LevelKey {", + "\t\t\t\tlevel := a.Value.Any().(slog.Level)", + "\t\t\t\tlevelLabel, exists := CustomLevelNames[level]", + "\t\t\t\tif !exists {", + "\t\t\t\t\tlevelLabel = level.String()", + "\t\t\t\t}", + "", + "\t\t\t\ta.Value = slog.StringValue(levelLabel)", + "\t\t\t}", + "", + "\t\t\treturn a", + "\t\t},", + "\t}", + "", + "\tvar handlers []slog.Handler", + "\tif globalLogger != nil {", + "\t\thandlers = []slog.Handler{globalLogger.l.Handler()}", + "\t}", + "", + "\tfor _, writer := range writers {", + "\t\thandlers = append(handlers, NewCustomHandler(writer, \u0026opts))", + "\t}", + "", + "\treturn \u0026Logger{l: slog.New(NewMultiHandler(handlers...))}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "SetupLogger", + "kind": "function", + "source": [ + "func SetupLogger(logWriter io.Writer, level string) {", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not parse log level, err: %v. Defaulting to DEBUG.\", err)", + "\t\tglobalLogLevel = slog.LevelInfo", + "\t} else {", + "\t\tglobalLogLevel = logLevel", + "\t}", + "", + "\topts := slog.HandlerOptions{", + "\t\tLevel: globalLogLevel,", + "\t\tReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {", + "\t\t\tif a.Key == slog.LevelKey {", + "\t\t\t\tlevel := a.Value.Any().(slog.Level)", + "\t\t\t\tlevelLabel, exists := CustomLevelNames[level]", + "\t\t\t\tif !exists {", + "\t\t\t\t\tlevelLabel = level.String()", + "\t\t\t\t}", + "", + "\t\t\t\ta.Value = slog.StringValue(levelLabel)", + "\t\t\t}", + "", + "\t\t\treturn a", + "\t\t},", + "\t}", + "", + "\tglobalLogger = \u0026Logger{", + "\t\tl: slog.New(NewCustomHandler(logWriter, \u0026opts)),", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCustomHandler(out io.Writer, opts *slog.HandlerOptions) *CustomHandler {", + "\th := \u0026CustomHandler{out: out, mu: \u0026sync.Mutex{}}", + "\tif opts != nil {", + "\t\th.opts = *opts", + "\t}", + "\tif h.opts.Level == nil {", + "\t\th.opts.Level = slog.LevelInfo", + "\t}", + "", + "\treturn h", + "}" + ] + }, + { + "name": "NewMultiHandler", + "qualifiedName": "NewMultiHandler", + "exported": true, + "signature": "func(...slog.Handler)(*MultiHandler)", + "doc": "NewMultiHandler Creates a composite handler for multiple slog handlers\n\nThis function takes any number of slog.Handler instances and returns a new\nMultiHandler that aggregates them. The returned object holds the provided\nhandlers in order, enabling log records to be dispatched to each underlying\nhandler when emitted. No additional processing or filtering is performed; it\nsimply stores the handlers for later use.", + "position": "/Users/deliedit/dev/certsuite/internal/log/multi_handler.go:27", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "GetMultiLogger", + "kind": "function", + "source": [ + "func GetMultiLogger(writers ...io.Writer) *Logger {", + "\topts := slog.HandlerOptions{", + "\t\tLevel: globalLogLevel,", + "\t\tReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {", + "\t\t\tif a.Key == slog.LevelKey {", + "\t\t\t\tlevel := a.Value.Any().(slog.Level)", + "\t\t\t\tlevelLabel, exists := CustomLevelNames[level]", + "\t\t\t\tif !exists {", + "\t\t\t\t\tlevelLabel = level.String()", + "\t\t\t\t}", + "", + "\t\t\t\ta.Value = slog.StringValue(levelLabel)", + "\t\t\t}", + "", + "\t\t\treturn a", + "\t\t},", + "\t}", + "", + "\tvar handlers []slog.Handler", + "\tif globalLogger != nil {", + "\t\thandlers = []slog.Handler{globalLogger.l.Handler()}", + "\t}", + "", + "\tfor _, writer := range writers {", + "\t\thandlers = append(handlers, NewCustomHandler(writer, \u0026opts))", + "\t}", + "", + "\treturn \u0026Logger{l: slog.New(NewMultiHandler(handlers...))}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "MultiHandler.WithAttrs", + "kind": "function", + "source": [ + "func (h *MultiHandler) WithAttrs(attrs []slog.Attr) slog.Handler {", + "\thandlersWithAttrs := make([]slog.Handler, len(h.handlers))", + "\tfor i := range h.handlers {", + "\t\thandlersWithAttrs[i] = h.handlers[i].WithAttrs(attrs)", + "\t}", + "\treturn NewMultiHandler(handlersWithAttrs...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "MultiHandler.WithGroup", + "kind": "function", + "source": [ + "func (h *MultiHandler) WithGroup(name string) slog.Handler {", + "\thandlersWithGroup := make([]slog.Handler, len(h.handlers))", + "\tfor i := range h.handlers {", + "\t\thandlersWithGroup[i] = h.handlers[i].WithGroup(name)", + "\t}", + "\treturn NewMultiHandler(handlersWithGroup...)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewMultiHandler(handlers ...slog.Handler) *MultiHandler {", + "\treturn \u0026MultiHandler{", + "\t\thandlers: handlers,", + "\t}", + "}" + ] + }, + { + "name": "SetLogger", + "qualifiedName": "SetLogger", + "exported": true, + "signature": "func(*Logger)()", + "doc": "SetLogger Sets the package-wide logger instance\n\nThis function assigns the provided Logger to a global variable used\nthroughout the logging package, making it available for all subsequent log\noperations. It performs no validation or side effects beyond the assignment\nand does not return any value.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:123", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "runHandler", + "kind": "function", + "source": [ + "func runHandler(w http.ResponseWriter, r *http.Request) {", + "\tbuf = bytes.NewBufferString(\"\")", + "\t// The log output will be written to the log file and to this buffer buf", + "\tlog.SetLogger(log.GetMultiLogger(buf))", + "", + "\tjsonData := r.FormValue(\"jsonData\") // \"jsonData\" is the name of the JSON input field", + "\tvar data RequestedData", + "\tif err := json.Unmarshal([]byte(jsonData), \u0026data); err != nil {", + "\t\tfmt.Println(\"Error:\", err)", + "\t}", + "\tflattenedOptions := data.SelectedOptions", + "", + "\t// Get the file from the request", + "\tfile, fileHeader, err := r.FormFile(\"kubeConfigPath\") // \"fileInput\" is the name of the file input field", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to retrieve file from form\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "\tdefer file.Close()", + "", + "\tlog.Info(\"Kubeconfig file name received: %s\", fileHeader.Filename)", + "\tkubeconfigTempFile, err := os.CreateTemp(\"\", \"webserver-kubeconfig-*\")", + "\tif err != nil {", + "\t\thttp.Error(w, \"Failed to create temp file to store the kubeconfig content.\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "", + "\tdefer func() {", + "\t\tlog.Info(\"Removing temporary kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\terr = os.Remove(kubeconfigTempFile.Name())", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to remove temp kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\t}", + "\t}()", + "", + "\t_, err = io.Copy(kubeconfigTempFile, file)", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to copy file\", http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t_ = kubeconfigTempFile.Close()", + "", + "\tlog.Info(\"Web Server kubeconfig file : %v (copied into %v)\", fileHeader.Filename, kubeconfigTempFile.Name())", + "\tlog.Info(\"Web Server Labels filter : %v\", flattenedOptions)", + "", + "\ttnfConfig, err := os.ReadFile(\"config/certsuite_config.yml\")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error reading YAML file: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t}", + "", + "\tnewData := updateTnf(tnfConfig, \u0026data)", + "", + "\t// Write the modified YAML data back to the file", + "\tvar filePerm fs.FileMode = 0o644 // owner can read/write, group and others can only read", + "\terr = os.WriteFile(\"config/certsuite_config.yml\", newData, filePerm)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing YAML file: %v\", err)", + "\t}", + "\tlabelsFilter := strings.Join(flattenedOptions, \",\")", + "", + "\t_ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name())", + "\tcertsuite.LoadChecksDB(labelsFilter)", + "", + "\toutputFolder := r.Context().Value(outputFolderCtxKey).(string)", + "", + "\tif err := checksdb.InitLabelsExprEvaluator(labelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(outputFolder, \"debug\"); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tlog.Info(\"Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s\", labelsFilter, outputFolder)", + "\terr = certsuite.Run(labelsFilter, outputFolder)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to run CNF Cert Suite: %v\", err)", + "\t}", + "", + "\t// Return the result as JSON", + "\tresponse := struct {", + "\t\tMessage string `json:\"Message\"`", + "\t}{", + "\t\tMessage: fmt.Sprintf(\"Succeeded to run %s\", strings.Join(flattenedOptions, \" \")),", + "\t}", + "\t// Serialize the response data to JSON", + "\tjsonResponse, err := json.Marshal(response)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to marshal jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t// Set the Content-Type header to specify that the response is JSON", + "\tw.Header().Set(\"Content-Type\", \"application/json\")", + "\t// Write the JSON response to the client", + "\tlog.Info(\"Sending web response: %v\", response)", + "\t_, err = w.Write(jsonResponse)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to write jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SetLogger(l *Logger) {", + "\tglobalLogger = l", + "}" + ] + }, + { + "name": "SetupLogger", + "qualifiedName": "SetupLogger", + "exported": true, + "signature": "func(io.Writer, string)()", + "doc": "SetupLogger configures global logging with a custom level and writer\n\nThis function parses the supplied log level string, falling back to INFO if\nparsing fails, and sets the global logger to write formatted slog entries to\nthe provided io.Writer. It uses a custom handler that replaces standard level\nstrings with user‑defined names when necessary. The resulting Logger\ninstance is stored globally for use throughout the application.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:86", + "calls": [ + { + "name": "parseLevel", + "kind": "function", + "source": [ + "func parseLevel(level string) (slog.Level, error) {", + "\tswitch strings.ToLower(level) {", + "\tcase \"debug\":", + "\t\treturn slog.LevelDebug, nil", + "\tcase \"info\":", + "\t\treturn slog.LevelInfo, nil", + "\tcase \"warn\", \"warning\":", + "\t\treturn slog.LevelWarn, nil", + "\tcase \"error\":", + "\t\treturn slog.LevelError, nil", + "\tcase \"fatal\":", + "\t\treturn CustomLevelFatal, nil", + "\t}", + "", + "\treturn 0, fmt.Errorf(\"not a valid slog Level: %q\", level)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + }, + { + "name": "Any", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "pkgPath": "log/slog", + "name": "StringValue", + "kind": "function" + }, + { + "pkgPath": "log/slog", + "name": "New", + "kind": "function" + }, + { + "name": "NewCustomHandler", + "kind": "function", + "source": [ + "func NewCustomHandler(out io.Writer, opts *slog.HandlerOptions) *CustomHandler {", + "\th := \u0026CustomHandler{out: out, mu: \u0026sync.Mutex{}}", + "\tif opts != nil {", + "\t\th.opts = *opts", + "\t}", + "\tif h.opts.Level == nil {", + "\t\th.opts.Level = slog.LevelInfo", + "\t}", + "", + "\treturn h", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "CreateGlobalLogFile", + "kind": "function", + "source": [ + "func CreateGlobalLogFile(outputDir, logLevel string) error {", + "\tlogFilePath := outputDir + \"/\" + LogFileName", + "\terr := os.Remove(logFilePath)", + "\tif err != nil \u0026\u0026 !os.IsNotExist(err) {", + "\t\treturn fmt.Errorf(\"could not delete old log file, err: %v\", err)", + "\t}", + "", + "\tlogFile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE, LogFilePermissions)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not open a new log file, err: %v\", err)", + "\t}", + "", + "\tSetupLogger(logFile, logLevel)", + "\tglobalLogFile = logFile", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SetupLogger(logWriter io.Writer, level string) {", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not parse log level, err: %v. Defaulting to DEBUG.\", err)", + "\t\tglobalLogLevel = slog.LevelInfo", + "\t} else {", + "\t\tglobalLogLevel = logLevel", + "\t}", + "", + "\topts := slog.HandlerOptions{", + "\t\tLevel: globalLogLevel,", + "\t\tReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {", + "\t\t\tif a.Key == slog.LevelKey {", + "\t\t\t\tlevel := a.Value.Any().(slog.Level)", + "\t\t\t\tlevelLabel, exists := CustomLevelNames[level]", + "\t\t\t\tif !exists {", + "\t\t\t\t\tlevelLabel = level.String()", + "\t\t\t\t}", + "", + "\t\t\t\ta.Value = slog.StringValue(levelLabel)", + "\t\t\t}", + "", + "\t\t\treturn a", + "\t\t},", + "\t}", + "", + "\tglobalLogger = \u0026Logger{", + "\t\tl: slog.New(NewCustomHandler(logWriter, \u0026opts)),", + "\t}", + "}" + ] + }, + { + "name": "Warn", + "qualifiedName": "Warn", + "exported": true, + "signature": "func(string, ...any)()", + "doc": "Warn Logs a message at warning level\n\nThe function forwards its arguments to Logf, supplying the global logger and\na warning severity indicator. It accepts a format string followed by optional\nvalues, which are interpolated into the log entry. The resulting record is\nwritten using slog's handling mechanisms.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:203", + "calls": [ + { + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "GetScaleCrUnderTest", + "kind": "function", + "source": [ + "func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDefinition) []ScaleObject {", + "\tdynamicClient := clientsholder.GetClientsHolder().DynamicClient", + "", + "\tvar scaleObjects []ScaleObject", + "\tfor _, crd := range crds {", + "\t\tif crd.Spec.Scope != apiextv1.NamespaceScoped {", + "\t\t\tlog.Warn(\"Target CRD %q is cluster-wide scoped. Skipping search of scale objects.\", crd.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range crd.Spec.Versions {", + "\t\t\tcrdVersion := crd.Spec.Versions[i]", + "\t\t\tgvr := schema.GroupVersionResource{", + "\t\t\t\tGroup: crd.Spec.Group,", + "\t\t\t\tVersion: crdVersion.Name,", + "\t\t\t\tResource: crd.Spec.Names.Plural,", + "\t\t\t}", + "", + "\t\t\t// Filter out non-scalable CRDs.", + "\t\t\tif crdVersion.Subresources == nil || crdVersion.Subresources.Scale == nil {", + "\t\t\t\tlog.Info(\"Target CRD %q is not scalable. Skipping search of scalable CRs.\", crd.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Looking for Scalable CRs of CRD %q (api version %q, group %q, plural %q) in target namespaces.\",", + "\t\t\t\tcrd.Name, crdVersion.Name, crd.Spec.Group, crd.Spec.Names.Plural)", + "", + "\t\t\tfor _, ns := range namespaces {", + "\t\t\t\tcrs, err := dynamicClient.Resource(gvr).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tlog.Fatal(\"Error getting CRs of CRD %q in namespace %q, err: %v\", crd.Name, ns, err)", + "\t\t\t\t}", + "", + "\t\t\t\tif len(crs.Items) \u003e 0 {", + "\t\t\t\t\tscaleObjects = append(scaleObjects, getCrScaleObjects(crs.Items, crd)...)", + "\t\t\t\t} else {", + "\t\t\t\t\tlog.Warn(\"No CRs of CRD %q found in the target namespaces.\", crd.Name)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn scaleObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findDeploymentsByLabels", + "kind": "function", + "source": [ + "func findDeploymentsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.Deployment {", + "\tallDeployments := []appsv1.Deployment{}", + "\tfor _, ns := range namespaces {", + "\t\tdps, err := appClient.Deployments(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list deployments in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(dps.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any deployments in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(dps.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The deployment is added only once if at least one pod matches one label in the Deployment", + "\t\t\t\tif isDeploymentsPodsMatchingAtLeastOneLabel(labels, ns, \u0026dps.Items[i]) {", + "\t\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all deployments in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in deployment %q found in ns %q without label\", dps.Items[i].Name, ns)", + "\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\tlog.Info(\"Deployment %s found in ns=%s\", dps.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allDeployments) == 0 {", + "\t\tlog.Warn(\"Did not find any deployment in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allDeployments", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findStatefulSetsByLabels", + "kind": "function", + "source": [ + "func findStatefulSetsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.StatefulSet {", + "\tallStatefulSets := []appsv1.StatefulSet{}", + "\tfor _, ns := range namespaces {", + "\t\tstatefulSet, err := appClient.StatefulSets(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list statefulsets in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(statefulSet.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any statefulSet in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(statefulSet.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The StatefulSet is added only once if at least one pod matches one label in the Statefulset", + "\t\t\t\tif isStatefulSetsMatchingAtLeastOneLabel(labels, ns, \u0026statefulSet.Items[i]) {", + "\t\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all statefulsets in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in statefulset %q found in ns %q without label\", statefulSet.Items[i].Name, ns)", + "\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\tlog.Info(\"StatefulSet %s found in ns=%s\", statefulSet.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allStatefulSets) == 0 {", + "\t\tlog.Warn(\"Did not find any statefulset in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allStatefulSets", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getOpenshiftVersion", + "kind": "function", + "source": [ + "func getOpenshiftVersion(oClient clientconfigv1.ConfigV1Interface) (ver string, err error) {", + "\tvar clusterOperator *configv1.ClusterOperator", + "\tclusterOperator, err = oClient.ClusterOperators().Get(context.TODO(), \"openshift-apiserver\", metav1.GetOptions{})", + "\tif err != nil {", + "\t\tswitch {", + "\t\tcase kerrors.IsNotFound(err):", + "\t\t\tlog.Warn(\"Unable to get ClusterOperator CR from openshift-apiserver. Running in a non-OCP cluster.\")", + "\t\t\treturn NonOpenshiftClusterVersion, nil", + "\t\tdefault:", + "\t\t\treturn \"\", err", + "\t\t}", + "\t}", + "", + "\tfor _, ver := range clusterOperator.Status.Versions {", + "\t\tif ver.Name == tnfCsvTargetLabelName {", + "\t\t\t// openshift-apiserver does not report version,", + "\t\t\t// clusteroperator/openshift-apiserver does, and only version number", + "\t\t\tlog.Info(\"OpenShift Version found: %v\", ver.Version)", + "\t\t\treturn ver.Version, nil", + "\t\t}", + "\t}", + "", + "\treturn \"\", errors.New(\"could not get openshift version from clusterOperator\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "isIstioServiceMeshInstalled", + "kind": "function", + "source": [ + "func isIstioServiceMeshInstalled(appClient appv1client.AppsV1Interface, allNs []string) bool {", + "\t// The Istio namespace must be present", + "\tif !stringhelper.StringInSlice(allNs, istioNamespace, false) {", + "\t\tlog.Info(\"Istio Service Mesh not present (the namespace %q does not exists)\", istioNamespace)", + "\t\treturn false", + "\t}", + "", + "\t// The Deployment \"istiod\" must be present in an active service mesh", + "\t_, err := appClient.Deployments(istioNamespace).Get(context.TODO(), istioDeploymentName, metav1.GetOptions{})", + "\tif errors.IsNotFound(err) {", + "\t\tlog.Warn(\"The Istio Deployment %q is missing (but the Istio namespace exists)\", istioDeploymentName)", + "\t\treturn false", + "\t} else if err != nil {", + "\t\tlog.Error(\"Failed getting Deployment %q\", istioDeploymentName)", + "\t\treturn false", + "\t}", + "", + "\tlog.Info(\"Istio Service Mesh detected\")", + "", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Startup", + "kind": "function", + "source": [ + "func Startup() {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Create an evaluator to filter test cases with labels", + "\tif err := checksdb.InitLabelsExprEvaluator(testParams.LabelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(testParams.OutputDir, testParams.LogLevel); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\t// Diagnostic functions will run when no labels are provided.", + "\tif testParams.LabelsFilter == noLabelsFilterExpr {", + "\t\tlog.Warn(\"The Best Practices Test Suite will run in diagnostic mode so no test case will be launched\")", + "\t}", + "", + "\t// Set clientsholder singleton with the filenames from the env vars.", + "\t_ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...)", + "\tLoadChecksDB(testParams.LabelsFilter)", + "", + "\tlog.Info(\"Certsuite Version: %v\", versions.GitVersion())", + "\tlog.Info(\"Claim Format Version: %s\", versions.ClaimFormatVersion)", + "\tlog.Info(\"Labels filter: %v\", testParams.LabelsFilter)", + "\tlog.Info(\"Log level: %s\", strings.ToUpper(testParams.LogLevel))", + "", + "\tcli.PrintBanner()", + "", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "\tfmt.Printf(\"Checks filter: %s\\n\", testParams.LabelsFilter)", + "\tfmt.Printf(\"Output folder: %s\\n\", testParams.OutputDir)", + "\tfmt.Printf(\"Log file: %s (level=%s)\\n\", log.LogFileName, testParams.LogLevel)", + "\tfmt.Printf(\"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "RunChecks", + "kind": "function", + "source": [ + "func RunChecks(timeout time.Duration) (failedCtr int, err error) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\t// Timeout channel", + "\ttimeOutChan := time.After(timeout)", + "\t// SIGINT(ctrl+c)/SIGTERM capture channel.", + "\tconst SIGINTBufferLen = 10", + "\tsigIntChan := make(chan os.Signal, SIGINTBufferLen)", + "\tsignal.Notify(sigIntChan, syscall.SIGINT, syscall.SIGTERM)", + "\t// turn off ctrl-c capture on exit", + "\tdefer signal.Stop(sigIntChan)", + "", + "\tabort := false", + "\tvar abortReason string", + "\tvar errs []error", + "\tfor _, group := range dbByGroup {", + "\t\tif abort {", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t\tgroup.RecordChecksResults()", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Stop channel, so we can send a stop signal to group.RunChecks()", + "\t\tstopChan := make(chan bool, 1)", + "\t\tabortChan := make(chan string, 1)", + "", + "\t\t// Done channel for the goroutine that runs group.RunChecks().", + "\t\tgroupDone := make(chan bool)", + "\t\tgo func() {", + "\t\t\tchecks, failedCheckCtr := group.RunChecks(stopChan, abortChan)", + "\t\t\tfailedCtr += failedCheckCtr", + "\t\t\terrs = append(errs, checks...)", + "\t\t\tgroupDone \u003c- true", + "\t\t}()", + "", + "\t\tselect {", + "\t\tcase \u003c-groupDone:", + "\t\t\tlog.Debug(\"Group %s finished running checks.\", group.name)", + "\t\tcase abortReason = \u003c-abortChan:", + "\t\t\tlog.Warn(\"Group %s aborted.\", group.name)", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-timeOutChan:", + "\t\t\tlog.Warn(\"Running all checks timed-out.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"global time-out\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-sigIntChan:", + "\t\t\tlog.Warn(\"SIGINT/SIGTERM received.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"SIGINT/SIGTERM\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t}", + "", + "\t\tgroup.RecordChecksResults()", + "\t}", + "", + "\t// Print the results in the CLI", + "\tcli.PrintResultsTable(getResultsSummary())", + "\tprintFailedChecksLog()", + "", + "\tif len(errs) \u003e 0 {", + "\t\tlog.Error(\"RunChecks errors: %v\", errs)", + "\t\treturn 0, fmt.Errorf(\"%d errors found in checks/groups\", len(errs))", + "\t}", + "", + "\treturn failedCtr, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runCheck", + "kind": "function", + "source": [ + "func runCheck(check *Check, group *ChecksGroup, remainingChecks []*Check) (err error) {", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\t// Don't do anything in case the check was manually aborted by check.Abort().", + "\t\t\tif msg, ok := r.(AbortPanicMsg); ok {", + "\t\t\t\tlog.Warn(\"Check was manually aborted, msg: %v\", msg)", + "\t\t\t\terr = fmt.Errorf(\"%v\", msg)", + "\t\t\t\treturn", + "\t\t\t}", + "", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "", + "\t\t\tcheck.LogError(\"Panic while running check %s function:\\n%v\", check.ID, stackTrace)", + "\t\t\terr = onFailure(fmt.Sprintf(\"check %s function panic\", check.ID), stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := check.Run(); err != nil {", + "\t\tcheck.LogError(\"Unexpected error while running check %s function: %v\", check.ID, err.Error())", + "\t\treturn onFailure(fmt.Sprintf(\"check %s function unexpected error\", check.ID), err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "LoadConfiguration", + "kind": "function", + "source": [ + "func LoadConfiguration(filePath string) (TestConfiguration, error) {", + "\tif confLoaded {", + "\t\tlog.Debug(\"config file already loaded, return previous element\")", + "\t\treturn configuration, nil", + "\t}", + "", + "\tlog.Info(\"Loading config from file: %s\", filePath)", + "\tcontents, err := os.ReadFile(filePath)", + "\tif err != nil {", + "\t\treturn configuration, err", + "\t}", + "", + "\terr = yaml.Unmarshal(contents, \u0026configuration)", + "\tif err != nil {", + "\t\treturn configuration, err", + "\t}", + "", + "\t// Set default namespace for the probe daemonset pods, in case it was not set.", + "\tif configuration.ProbeDaemonSetNamespace == \"\" {", + "\t\tlog.Warn(\"No namespace configured for the probe daemonset. Defaulting to namespace %q\", defaultProbeDaemonSetNamespace)", + "\t\tconfiguration.ProbeDaemonSetNamespace = defaultProbeDaemonSetNamespace", + "\t} else {", + "\t\tlog.Info(\"Namespace for probe daemonset: %s\", configuration.ProbeDaemonSetNamespace)", + "\t}", + "", + "\tconfLoaded = true", + "\treturn configuration, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetAllOperatorGroups", + "kind": "function", + "source": [ + "func GetAllOperatorGroups() ([]*olmv1.OperatorGroup, error) {", + "\tclient := clientsholder.GetClientsHolder()", + "", + "\tlist, err := client.OlmClient.OperatorsV1().OperatorGroups(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil \u0026\u0026 !k8serrors.IsNotFound(err) {", + "\t\treturn nil, err", + "\t}", + "", + "\tif k8serrors.IsNotFound(err) {", + "\t\tlog.Warn(\"No OperatorGroup(s) found in the cluster\")", + "\t\treturn nil, nil", + "\t}", + "", + "\tif len(list.Items) == 0 {", + "\t\tlog.Warn(\"OperatorGroup API resource found but no OperatorGroup(s) found in the cluster\")", + "\t\treturn nil, nil", + "\t}", + "", + "\t// Collect all OperatorGroup pointers", + "\tvar operatorGroups []*olmv1.OperatorGroup", + "\tfor i := range list.Items {", + "\t\toperatorGroups = append(operatorGroups, \u0026list.Items[i])", + "\t}", + "", + "\treturn operatorGroups, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Operator.SetPreflightResults", + "kind": "function", + "source": [ + "func (op *Operator) SetPreflightResults(env *TestEnvironment) error {", + "\tif len(op.InstallPlans) == 0 {", + "\t\tlog.Warn(\"Operator %q has no InstallPlans. Skipping setting Preflight results\", op)", + "\t\treturn nil", + "\t}", + "", + "\tbundleImage := op.InstallPlans[0].BundleImage", + "\tindexImage := op.InstallPlans[0].IndexImage", + "\toc := clientsholder.GetClientsHolder()", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\topts := []plibOperator.Option{}", + "\topts = append(opts, plibOperator.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibOperator.WithInsecureConnection())", + "\t}", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibOperator.NewCheck(bundleImage, indexImage, oc.KubeConfig, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\te := os.RemoveAll(\"artifacts/\")", + "\tif e != nil {", + "\t\tlog.Fatal(\"%v\", e)", + "\t}", + "", + "\tlog.Info(\"Storing operator Preflight results into object for %q\", bundleImage)", + "\top.PreflightResults = GetPreflightResultsDB(\u0026results)", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.AffinityRequired", + "kind": "function", + "source": [ + "func (p *Pod) AffinityRequired() bool {", + "\tif val, ok := p.Labels[AffinityRequiredKey]; ok {", + "\t\tresult, err := strconv.ParseBool(val)", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"failure to parse bool %v\", val)", + "\t\t\treturn false", + "\t\t}", + "\t\treturn result", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createNodes", + "kind": "function", + "source": [ + "func createNodes(nodes []corev1.Node) map[string]Node {", + "\twrapperNodes := map[string]Node{}", + "", + "\t// machineConfigs is a helper map to avoid download \u0026 process the same mc twice.", + "\tmachineConfigs := map[string]MachineConfig{}", + "\tfor i := range nodes {", + "\t\tnode := \u0026nodes[i]", + "", + "\t\tif !IsOCPCluster() {", + "\t\t\t// Avoid getting Mc info for non ocp clusters.", + "\t\t\twrapperNodes[node.Name] = Node{Data: node}", + "\t\t\tlog.Warn(\"Non-OCP cluster detected. MachineConfig retrieval for node %q skipped.\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Get Node's machineConfig name", + "\t\tmcName, exists := node.Annotations[\"machineconfiguration.openshift.io/currentConfig\"]", + "\t\tif !exists {", + "\t\t\tlog.Error(\"Failed to get machineConfig name for node %q\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "\t\tlog.Info(\"Node %q - mc name %q\", node.Name, mcName)", + "\t\tmc, err := getMachineConfig(mcName, machineConfigs)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get machineConfig %q, err: %v\", mcName, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\twrapperNodes[node.Name] = Node{", + "\t\t\tData: node,", + "\t\t\tMc: mc,", + "\t\t}", + "\t}", + "", + "\treturn wrapperNodes", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createOperators", + "kind": "function", + "source": [ + "func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion,", + "\tallSubscriptions []olmv1Alpha.Subscription,", + "\tallPackageManifests []*olmpkgv1.PackageManifest,", + "\tallInstallPlans []*olmv1Alpha.InstallPlan,", + "\tallCatalogSources []*olmv1Alpha.CatalogSource,", + "\tsucceededRequired,", + "\tkeepCsvDetails bool) []*Operator {", + "\tconst (", + "\t\tmaxSize = 2", + "\t)", + "", + "\toperators := []*Operator{}", + "", + "\t// Make map with unique csv names to original index in the env.Csvs slice.", + "\t// Otherwise, cluster-wide operators info will be repeated unnecessarily.", + "\tuniqueCsvs := getUniqueCsvListByName(csvs)", + "", + "\tfor _, csv := range uniqueCsvs {", + "\t\t// Skip CSVs that are not in the Succeeded phase if the flag is set.", + "\t\tif csv.Status.Phase != olmv1Alpha.CSVPhaseSucceeded \u0026\u0026 succeededRequired {", + "\t\t\tcontinue", + "\t\t}", + "\t\top := \u0026Operator{Name: csv.Name, Namespace: csv.Namespace}", + "\t\tif keepCsvDetails {", + "\t\t\top.Csv = csv", + "\t\t}", + "\t\top.Phase = csv.Status.Phase", + "\t\tpackageAndVersion := strings.SplitN(csv.Name, \".\", maxSize)", + "\t\tif len(packageAndVersion) == 0 {", + "\t\t\tlog.Debug(\"Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v\", csv)", + "\t\t\tcontinue", + "\t\t}", + "\t\top.PackageFromCsvName = packageAndVersion[0]", + "\t\top.Version = csv.Spec.Version.String()", + "\t\t// Get at least one subscription and update the Operator object with it.", + "\t\tif getAtLeastOneSubscription(op, csv, allSubscriptions, allPackageManifests) {", + "\t\t\ttargetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to get target namespaces for operator %s: %v\", csv.Name, err)", + "\t\t\t} else {", + "\t\t\t\top.TargetNamespaces = targetNamespaces", + "\t\t\t\top.IsClusterWide = len(targetNamespaces) == 0", + "\t\t\t}", + "\t\t} else {", + "\t\t\tlog.Warn(\"Subscription not found for CSV: %s (ns %s)\", csv.Name, csv.Namespace)", + "\t\t}", + "\t\tlog.Info(\"Getting installplans for op %s (subs %s ns %s)\", op.Name, op.SubscriptionName, op.SubscriptionNamespace)", + "\t\t// Get at least one Install Plan and update the Operator object with it.", + "\t\tgetAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources)", + "\t\toperators = append(operators, op)", + "\t}", + "\treturn operators", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getAtLeastOneCsv", + "kind": "function", + "source": [ + "func getAtLeastOneCsv(csv *olmv1Alpha.ClusterServiceVersion, installPlan *olmv1Alpha.InstallPlan) (atLeastOneCsv bool) {", + "\tatLeastOneCsv = false", + "\tfor _, csvName := range installPlan.Spec.ClusterServiceVersionNames {", + "\t\tif csv.Name != csvName {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif installPlan.Status.BundleLookups == nil {", + "\t\t\tlog.Warn(\"InstallPlan %s for csv %s (ns %s) does not have bundle lookups. It will be skipped.\", installPlan.Name, csv.Name, csv.Namespace)", + "\t\t\tcontinue", + "\t\t}", + "\t\tatLeastOneCsv = true", + "\t\tbreak", + "\t}", + "\treturn atLeastOneCsv", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getCatalogSourceBundleCountFromProbeContainer", + "kind": "function", + "source": [ + "func getCatalogSourceBundleCountFromProbeContainer(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// We need to use the probe container to get the bundle count", + "\t// This is because the package manifests are not available in the cluster", + "\t// for OCP versions \u003c= 4.12", + "\to := clientsholder.GetClientsHolder()", + "", + "\t// Find the kubernetes service associated with the catalog source", + "\tfor _, svc := range env.AllServices {", + "\t\t// Skip if the service is not associated with the catalog source", + "\t\tif svc.Spec.Selector[\"olm.catalogSource\"] != cs.Name {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tlog.Info(\"Found service %q associated with catalog source %q.\", svc.Name, cs.Name)", + "", + "\t\t// Use a probe pod to get the bundle count", + "\t\tfor _, probePod := range env.ProbePods {", + "\t\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\t\tcmd := \"grpcurl -plaintext \" + svc.Spec.ClusterIP + \":50051 api.Registry.ListBundles | jq -s 'length'\"", + "\t\t\tcmdValue, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\t\t\tif err != nil || errStr != \"\" {", + "\t\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cmd, probePod.String())", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Sanitize the command output", + "\t\t\tcmdValue = strings.TrimSpace(cmdValue)", + "\t\t\tcmdValue = strings.Trim(cmdValue, \"\\\"\")", + "", + "\t\t\t// Parse the command output", + "\t\t\tbundleCount, err := strconv.Atoi(cmdValue)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to convert bundle count to integer: %s\", cmdValue)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Try each probe pod until we get a valid bundle count (which should only be 1 probe pod)", + "\t\t\tlog.Info(\"Found bundle count via grpcurl %d for catalog source %q.\", bundleCount, cs.Name)", + "\t\t\treturn bundleCount", + "\t\t}", + "\t}", + "", + "\tlog.Warn(\"Warning: No services found associated with catalog source %q.\", cs.Name)", + "\treturn -1", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getPodContainers", + "kind": "function", + "source": [ + "func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Container) {", + "\tfor j := 0; j \u003c len(aPod.Spec.Containers); j++ {", + "\t\tcut := \u0026(aPod.Spec.Containers[j])", + "", + "\t\tvar cutStatus corev1.ContainerStatus", + "\t\t// get Status for current container", + "\t\tfor index := range aPod.Status.ContainerStatuses {", + "\t\t\tif aPod.Status.ContainerStatuses[index].Name == cut.Name {", + "\t\t\t\tcutStatus = aPod.Status.ContainerStatuses[index]", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\taRuntime, uid := GetRuntimeUID(\u0026cutStatus)", + "\t\tcontainer := Container{Podname: aPod.Name, Namespace: aPod.Namespace,", + "\t\t\tNodeName: aPod.Spec.NodeName, Container: cut, Status: cutStatus, Runtime: aRuntime, UID: uid,", + "\t\t\tContainerImageIdentifier: buildContainerImageSource(aPod.Spec.Containers[j].Image, cutStatus.ImageID)}", + "", + "\t\t// Warn if readiness probe did not succeeded yet.", + "\t\tif !cutStatus.Ready {", + "\t\t\tlog.Warn(\"Container %q is not ready yet.\", \u0026container)", + "\t\t}", + "", + "\t\t// Warn if container state is not running.", + "\t\tif state := \u0026cutStatus.State; state.Running == nil {", + "\t\t\treason := \"\"", + "\t\t\tswitch {", + "\t\t\tcase state.Waiting != nil:", + "\t\t\t\treason = \"waiting - \" + state.Waiting.Reason", + "\t\t\tcase state.Terminated != nil:", + "\t\t\t\treason = \"terminated - \" + state.Terminated.Reason", + "\t\t\tdefault:", + "\t\t\t\t// When no state was explicitly set, it's assumed to be in \"waiting state\".", + "\t\t\t\treason = \"waiting state reason unknown\"", + "\t\t\t}", + "", + "\t\t\tlog.Warn(\"Container %q is not running (reason: %s, restarts %d): some test cases might fail.\",", + "\t\t\t\t\u0026container, reason, cutStatus.RestartCount)", + "\t\t}", + "", + "\t\t// Build slices of containers based on whether or not we are \"ignoring\" them or not.", + "\t\tif useIgnoreList \u0026\u0026 container.HasIgnoredContainerName() {", + "\t\t\tcontinue", + "\t\t}", + "\t\tcontainerList = append(containerList, \u0026container)", + "\t}", + "\treturn containerList", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "Tester.TestNodeHugepagesWithMcSystemd", + "kind": "function", + "source": [ + "func (tester *Tester) TestNodeHugepagesWithMcSystemd() (bool, error) {", + "\t// Iterate through node's actual hugepages to make sure that each node's size that does not exist in the", + "\t// MachineConfig has a value of 0.", + "\tfor nodeNumaIdx, nodeCountBySize := range tester.nodeHugepagesByNuma {", + "\t\t// First, numa index should exist in MC", + "\t\tmcCountBySize, numaExistsInMc := tester.mcSystemdHugepagesByNuma[nodeNumaIdx]", + "\t\tif !numaExistsInMc {", + "\t\t\tlog.Warn(\"Numa %d does not exist in machine config. All hugepage count for all sizes must be zero.\", nodeNumaIdx)", + "\t\t\tfor _, count := range nodeCountBySize {", + "\t\t\t\tif count != 0 {", + "\t\t\t\t\treturn false, fmt.Errorf(\"node's numa %d hugepages config does not exist in node's machineconfig\", nodeNumaIdx)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Second, all sizes must exist in mc. If it does not exist (e.g. default 2MB size), its count should be 0.", + "\t\tfor nodeSize, nodeCount := range nodeCountBySize {", + "\t\t\tif _, sizeExistsInMc := mcCountBySize[nodeSize]; !sizeExistsInMc \u0026\u0026 nodeCount != 0 {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa %d hugepages size=%d does not appear in MC, but the count is not zero (%d)\",", + "\t\t\t\t\tnodeNumaIdx, nodeSize, nodeCount)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// Now, iterate through mc's numas and make sure they exist and have the same sizes and values in the node.", + "\tfor mcNumaIdx, mcCountBySize := range tester.mcSystemdHugepagesByNuma {", + "\t\tnodeCountBySize, numaExistsInNode := tester.nodeHugepagesByNuma[mcNumaIdx]", + "\t\t// First, numa index should exist in the node", + "\t\tif !numaExistsInNode {", + "\t\t\treturn false, fmt.Errorf(\"node does not have numa id %d found in the machine config\", mcNumaIdx)", + "\t\t}", + "", + "\t\t// For this numa, iterate through each of the mc's hugepages sizes and compare with node ones.", + "\t\tfor mcSize, mcCount := range mcCountBySize {", + "\t\t\tnodeCount, nodeSizeExistsInNode := nodeCountBySize[mcSize]", + "\t\t\tif !nodeSizeExistsInNode {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa id %d does not have size %d found in the machine config\",", + "\t\t\t\t\tmcNumaIdx, mcSize)", + "\t\t\t}", + "", + "\t\t\tif nodeCount != mcCount {", + "\t\t\t\treturn false, fmt.Errorf(\"mc numa=%d, hugepages count:%d, size:%d does not match node ones=%d\",", + "\t\t\t\t\tmcNumaIdx, mcCount, mcSize, nodeCount)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn true, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "getMcHugepagesFromMcKernelArguments", + "kind": "function", + "source": [ + "func getMcHugepagesFromMcKernelArguments(mc *provider.MachineConfig) (hugepagesPerSize map[int]int, defhugepagesz int) {", + "\tdefhugepagesz = RhelDefaultHugepagesz", + "\thugepagesPerSize = map[int]int{}", + "", + "\thugepagesz := 0", + "\tfor _, arg := range mc.Spec.KernelArguments {", + "\t\tkeyValueSlice := strings.Split(arg, \"=\")", + "\t\tif len(keyValueSlice) != KernArgsKeyValueSplitLen {", + "\t\t\t// Some kernel arguments do not come in name=value", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tkey, value := keyValueSlice[0], keyValueSlice[1]", + "\t\tif key == HugepagesParam \u0026\u0026 value != \"\" {", + "\t\t\thugepages, _ := strconv.Atoi(value)", + "\t\t\tif _, sizeFound := hugepagesPerSize[hugepagesz]; sizeFound {", + "\t\t\t\t// hugepagesz was parsed before.", + "\t\t\t\thugepagesPerSize[hugepagesz] = hugepages", + "\t\t\t} else {", + "\t\t\t\t// use RHEL's default size for this count.", + "\t\t\t\thugepagesPerSize[RhelDefaultHugepagesz] = hugepages", + "\t\t\t}", + "\t\t}", + "", + "\t\tif key == HugepageszParam \u0026\u0026 value != \"\" {", + "\t\t\thugepagesz = hugepageSizeToInt(value)", + "\t\t\t// Create new map entry for this size", + "\t\t\thugepagesPerSize[hugepagesz] = 0", + "\t\t}", + "", + "\t\tif key == DefaultHugepagesz \u0026\u0026 value != \"\" {", + "\t\t\tdefhugepagesz = hugepageSizeToInt(value)", + "\t\t\t// In case only default_hugepagesz and hugepages values are provided. The actual value should be", + "\t\t\t// parsed next and this default value overwritten.", + "\t\t\thugepagesPerSize[defhugepagesz] = RhelDefaultHugepages", + "\t\t\thugepagesz = defhugepagesz", + "\t\t}", + "\t}", + "", + "\tif len(hugepagesPerSize) == 0 {", + "\t\thugepagesPerSize[RhelDefaultHugepagesz] = RhelDefaultHugepages", + "\t\tlog.Warn(\"No hugepages size found in node's machineconfig. Defaulting to size=%dkB (count=%d)\", RhelDefaultHugepagesz, RhelDefaultHugepages)", + "\t}", + "", + "\tlogMcKernelArgumentsHugepages(hugepagesPerSize, defhugepagesz)", + "\treturn hugepagesPerSize, defhugepagesz", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "ShouldRun", + "kind": "function", + "source": [ + "func ShouldRun(labelsExpr string) bool {", + "\tenv = provider.GetTestEnvironment()", + "\tpreflightAllowedLabels := []string{common.PreflightTestKey, identifiers.TagPreflight}", + "", + "\tif !labelsAllowTestRun(labelsExpr, preflightAllowedLabels) {", + "\t\treturn false", + "\t}", + "", + "\t// Add safeguard against running the preflight tests if the docker config does not exist.", + "\tpreflightDockerConfigFile := configuration.GetTestParameters().PfltDockerconfig", + "\tif preflightDockerConfigFile == \"\" || preflightDockerConfigFile == \"NA\" {", + "\t\tlog.Warn(\"Skipping the preflight suite because the Docker Config file is not provided.\")", + "\t\tenv.SkipPreflight = true", + "\t}", + "", + "\treturn true", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "parseLevel", + "qualifiedName": "parseLevel", + "exported": false, + "signature": "func(string)(slog.Level, error)", + "doc": "parseLevel Converts a string into a slog logging level\n\nThe function takes a textual log level, normalizes it to lowercase, and\nmatches it against known levels such as debug, info, warn, error, and fatal.\nIf the input corresponds to one of these names, the matching slog.Level\nconstant is returned; otherwise an error is produced indicating the value is\ninvalid.", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:304", + "calls": [ + { + "pkgPath": "strings", + "name": "ToLower", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "SetupLogger", + "kind": "function", + "source": [ + "func SetupLogger(logWriter io.Writer, level string) {", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not parse log level, err: %v. Defaulting to DEBUG.\", err)", + "\t\tglobalLogLevel = slog.LevelInfo", + "\t} else {", + "\t\tglobalLogLevel = logLevel", + "\t}", + "", + "\topts := slog.HandlerOptions{", + "\t\tLevel: globalLogLevel,", + "\t\tReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {", + "\t\t\tif a.Key == slog.LevelKey {", + "\t\t\t\tlevel := a.Value.Any().(slog.Level)", + "\t\t\t\tlevelLabel, exists := CustomLevelNames[level]", + "\t\t\t\tif !exists {", + "\t\t\t\t\tlevelLabel = level.String()", + "\t\t\t\t}", + "", + "\t\t\t\ta.Value = slog.StringValue(levelLabel)", + "\t\t\t}", + "", + "\t\t\treturn a", + "\t\t},", + "\t}", + "", + "\tglobalLogger = \u0026Logger{", + "\t\tl: slog.New(NewCustomHandler(logWriter, \u0026opts)),", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func parseLevel(level string) (slog.Level, error) {", + "\tswitch strings.ToLower(level) {", + "\tcase \"debug\":", + "\t\treturn slog.LevelDebug, nil", + "\tcase \"info\":", + "\t\treturn slog.LevelInfo, nil", + "\tcase \"warn\", \"warning\":", + "\t\treturn slog.LevelWarn, nil", + "\tcase \"error\":", + "\t\treturn slog.LevelError, nil", + "\tcase \"fatal\":", + "\t\treturn CustomLevelFatal, nil", + "\t}", + "", + "\treturn 0, fmt.Errorf(\"not a valid slog Level: %q\", level)", + "}" + ] + } + ], + "globals": [ + { + "name": "CustomLevelNames", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/internal/log/custom_handler.go:17" + }, + { + "name": "globalLogFile", + "exported": false, + "type": "*os.File", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:41" + }, + { + "name": "globalLogLevel", + "exported": false, + "type": "slog.Level", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:40" + }, + { + "name": "globalLogger", + "exported": false, + "type": "*Logger", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:39" + } + ], + "consts": [ + { + "name": "CustomLevelFatal", + "exported": true, + "doc": "Custom log levels", + "position": "/Users/deliedit/dev/certsuite/internal/log/custom_handler.go:15" + }, + { + "name": "LevelDebug", + "exported": true, + "doc": "Log levels", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:21" + }, + { + "name": "LevelError", + "exported": true, + "doc": "Log levels", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:24" + }, + { + "name": "LevelFatal", + "exported": true, + "doc": "Log levels", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:25" + }, + { + "name": "LevelInfo", + "exported": true, + "doc": "Log levels", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:22" + }, + { + "name": "LevelWarn", + "exported": true, + "doc": "Log levels", + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:23" + }, + { + "name": "LogFileName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:15" + }, + { + "name": "LogFilePermissions", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/internal/log/log.go:16" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "results", + "files": 4, + "imports": [ + "archive/tar", + "bytes", + "compress/gzip", + "embed", + "encoding/json", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "io", + "mime/multipart", + "net/http", + "net/url", + "os", + "path/filepath", + "strings", + "time" + ], + "structs": [ + { + "name": "CertIDResponse", + "exported": true, + "doc": "CertIDResponse Represents a certification case response from RHConnect\n\nThis struct holds information returned by the RHConnect API for a\ncertification request, including the internal ID, external case number,\nstatus, level, URL, and whether the partner initiated it. It also embeds a\nnested type providing the certification category's identifier and name.", + "position": "/Users/deliedit/dev/certsuite/internal/results/rhconnect.go:48", + "fields": { + "CaseNumber": "string", + "CertificationLevel": "string", + "CertificationType": "struct{ID int; Name string}", + "HasStartedByPartner": "bool", + "ID": "int", + "RhcertURL": "string", + "Status": "string" + }, + "methodNames": null, + "source": [ + "type CertIDResponse struct {", + "\tID int `json:\"id\"`", + "\tCaseNumber string `json:\"caseNumber\"`", + "\tStatus string `json:\"status\"`", + "\tCertificationLevel string `json:\"certificationLevel\"`", + "\tRhcertURL string `json:\"rhcertUrl\"`", + "\tHasStartedByPartner bool `json:\"hasStartedByPartner\"`", + "\tCertificationType struct {", + "\t\tID int `json:\"id\"`", + "\t\tName string `json:\"name\"`", + "\t} `json:\"certificationType\"`", + "}" + ] + }, + { + "name": "UploadResult", + "exported": true, + "doc": "UploadResult Details the outcome of a file upload operation\n\nThis structure holds information about an uploaded artifact, including its\nunique identifier, type, name, size, MIME type, description, download link,\nuploader, upload timestamp, and related certificate ID. It is used to convey\nall relevant metadata back to clients or services that need to reference the\nstored file.", + "position": "/Users/deliedit/dev/certsuite/internal/results/rhconnect.go:134", + "fields": { + "CertID": "int", + "ContentType": "string", + "Desc": "string", + "DownloadURL": "string", + "Name": "string", + "Size": "int", + "Type": "string", + "UUID": "string", + "UploadedBy": "string", + "UploadedDate": "time.Time" + }, + "methodNames": null, + "source": [ + "type UploadResult struct {", + "\tUUID string `json:\"uuid\"`", + "\tType string `json:\"type\"`", + "\tName string `json:\"name\"`", + "\tSize int `json:\"size\"`", + "\tContentType string `json:\"contentType\"`", + "\tDesc string `json:\"desc\"`", + "\tDownloadURL string `json:\"downloadUrl\"`", + "\tUploadedBy string `json:\"uploadedBy\"`", + "\tUploadedDate time.Time `json:\"uploadedDate\"`", + "\tCertID int `json:\"certId\"`", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "CompressResultsArtifacts", + "qualifiedName": "CompressResultsArtifacts", + "exported": true, + "signature": "func(string, []string)(string, error)", + "doc": "CompressResultsArtifacts Creates a compressed archive of specified files\n\nThe function builds a zip file in the given output directory, including each\npath from the slice. It streams each file into a tar writer wrapped by gzip\nfor compression, handling errors during header creation or file access. The\nabsolute path to the resulting archive is returned.", + "position": "/Users/deliedit/dev/certsuite/internal/results/archiver.go:58", + "calls": [ + { + "name": "generateZipFileName", + "kind": "function", + "source": [ + "func generateZipFileName() string {", + "\treturn time.Now().Format(tarGzFileNamePrefixLayout) + \"-\" + tarGzFileNameSuffix", + "}" + ] + }, + { + "pkgPath": "path/filepath", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "os", + "name": "Create", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "compress/gzip", + "name": "NewWriter", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "pkgPath": "archive/tar", + "name": "NewWriter", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "getFileTarHeader", + "kind": "function", + "source": [ + "func getFileTarHeader(file string) (*tar.Header, error) {", + "\tinfo, err := os.Stat(file)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get file info from %s: %v\", file, err)", + "\t}", + "", + "\theader, err := tar.FileInfoHeader(info, info.Name())", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get file info header for %s: %v\", file, err)", + "\t}", + "", + "\treturn header, nil", + "}" + ] + }, + { + "name": "WriteHeader", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Open", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "io", + "name": "Copy", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "pkgPath": "path/filepath", + "name": "Abs", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CompressResultsArtifacts(outputDir string, filePaths []string) (string, error) {", + "\tzipFileName := generateZipFileName()", + "\tzipFilePath := filepath.Join(outputDir, zipFileName)", + "", + "\tlog.Info(\"Compressing results artifacts into %s\", zipFilePath)", + "\tzipFile, err := os.Create(zipFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed creating tar.gz file %s in dir %s (filepath=%s): %v\",", + "\t\t\tzipFileName, outputDir, zipFilePath, err)", + "\t}", + "", + "\tzipWriter := gzip.NewWriter(zipFile)", + "\tdefer zipWriter.Close()", + "", + "\ttarWriter := tar.NewWriter(zipWriter)", + "\tdefer tarWriter.Close()", + "", + "\tfor _, file := range filePaths {", + "\t\tlog.Debug(\"Zipping file %s\", file)", + "", + "\t\ttarHeader, err := getFileTarHeader(file)", + "\t\tif err != nil {", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\terr = tarWriter.WriteHeader(tarHeader)", + "\t\tif err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to write tar header for %s: %v\", file, err)", + "\t\t}", + "", + "\t\tf, err := os.Open(file)", + "\t\tif err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to open file %s: %v\", file, err)", + "\t\t}", + "", + "\t\tif _, err = io.Copy(tarWriter, f); err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to tar file %s: %v\", file, err)", + "\t\t}", + "", + "\t\tf.Close()", + "\t}", + "", + "\t// Create fully qualified path to the zip file", + "\tzipFilePath, err = filepath.Abs(zipFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get absolute path for %s: %v\", zipFilePath, err)", + "\t}", + "", + "\t// Return the entire path to the zip file", + "\treturn zipFilePath, nil", + "}" + ] + }, + { + "name": "CreateResultsWebFiles", + "qualifiedName": "CreateResultsWebFiles", + "exported": true, + "signature": "func(string, string)([]string, error)", + "doc": "CreateResultsWebFiles Creates HTML web assets for claim data\n\nThe function generates the JavaScript file that exposes the claim JSON\ncontent, writes a static results page, and returns their paths. It accepts an\noutput directory and a claim file name, constructs the necessary files,\nhandles any I/O errors, and collects the created file locations in a slice.\nThe returned slice contains the paths to all web artifacts for later use.", + "position": "/Users/deliedit/dev/certsuite/internal/results/html.go:52", + "calls": [ + { + "pkgPath": "path/filepath", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "path/filepath", + "name": "Join", + "kind": "function" + }, + { + "name": "createClaimJSFile", + "kind": "function", + "source": [ + "func createClaimJSFile(claimFilePath, outputDir string) (filePath string, err error) {", + "\t// Read claim.json content.", + "\tclaimContent, err := os.ReadFile(claimFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to read claim file %s content in %s: %v\", claimFilePath, outputDir, err)", + "\t}", + "", + "\t// Add the content as the value for the js variable.", + "\tjsClaimContent := \"var initialjson = \" + string(claimContent)", + "", + "\tfilePath = filepath.Join(outputDir, jsClaimVarFileName)", + "\terr = os.WriteFile(filePath, []byte(jsClaimContent), writeFilePerms)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to write file %s: %v\", filePath, err)", + "\t}", + "", + "\treturn filePath, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "WriteFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CreateResultsWebFiles(outputDir, claimFileName string) (filePaths []string, err error) {", + "\ttype file struct {", + "\t\tPath string", + "\t\tContent []byte", + "\t}", + "", + "\tstaticFiles := []file{", + "\t\t{", + "\t\t\tPath: filepath.Join(outputDir, htmlResultsFileName),", + "\t\t\tContent: htmlResultsFileContent,", + "\t\t},", + "\t}", + "", + "\tclaimFilePath := filepath.Join(outputDir, claimFileName)", + "\tclaimJSFilePath, err := createClaimJSFile(claimFilePath, outputDir)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to create file %s: %v\", jsClaimVarFileName, err)", + "\t}", + "", + "\tfilePaths = []string{claimJSFilePath}", + "\tfor _, f := range staticFiles {", + "\t\terr := os.WriteFile(f.Path, f.Content, writeFilePerms)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to create file %s: %v\", f.Path, err)", + "\t\t}", + "", + "\t\t// Add this file path to the slice.", + "\t\tfilePaths = append(filePaths, f.Path)", + "\t}", + "", + "\treturn filePaths, nil", + "}" + ] + }, + { + "name": "GetCertIDFromConnectAPI", + "qualifiedName": "GetCertIDFromConnectAPI", + "exported": true, + "signature": "func(string, string, string, string, string)(string, error)", + "doc": "GetCertIDFromConnectAPI Retrieves a certification identifier from the Red Hat Connect service\n\nThe function builds a JSON payload containing a project ID, sends it as a\nPOST request to the Connect API endpoint for certifications, and decodes the\nreturned JSON to extract the numeric certification ID. It supports optional\nproxy configuration, sanitizes input strings, and applies a timeout to the\nHTTP client. The resulting ID is returned as a string; errors are reported if\nany step fails.", + "position": "/Users/deliedit/dev/certsuite/internal/results/rhconnect.go:69", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "NewRequest", + "kind": "function" + }, + { + "pkgPath": "bytes", + "name": "NewBuffer", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Set", + "kind": "function" + }, + { + "name": "Set", + "kind": "function" + }, + { + "name": "Set", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "setProxy", + "kind": "function", + "source": [ + "func setProxy(client *http.Client, proxyURL, proxyPort string) {", + "\tif proxyURL != \"\" \u0026\u0026 proxyPort != \"\" {", + "\t\tlog.Debug(\"Proxy is set. Using proxy %s:%s\", proxyURL, proxyPort)", + "\t\tproxyURL := fmt.Sprintf(\"%s:%s\", proxyURL, proxyPort)", + "\t\tparsedURL, err := url.Parse(proxyURL)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse proxy URL: %v\", err)", + "\t\t}", + "\t\tlog.Debug(\"Proxy URL: %s\", parsedURL)", + "\t\tclient.Transport = \u0026http.Transport{Proxy: http.ProxyURL(parsedURL)}", + "\t}", + "}" + ] + }, + { + "name": "sendRequest", + "kind": "function", + "source": [ + "func sendRequest(req *http.Request, client *http.Client) (*http.Response, error) {", + "\t// print the request", + "\tlog.Debug(\"Sending request to %s\", req.URL)", + "", + "\tres, err := client.Do(req)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to send post request: %v\", err)", + "\t}", + "", + "\tif res.StatusCode != http.StatusOK {", + "\t\tlog.Debug(\"Response: %v\", res)", + "\t\treturn nil, fmt.Errorf(\"failed to send post request to the endpoint: %v\", res.Status)", + "\t}", + "", + "\treturn res, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "name": "Decode", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "NewDecoder", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetCertIDFromConnectAPI(apiKey, projectID, connectAPIBaseURL, proxyURL, proxyPort string) (string, error) {", + "\tlog.Info(\"Getting certification ID from Red Hat Connect API\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tprojectID = strings.ReplaceAll(projectID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectAPIBaseURL = strings.ReplaceAll(connectAPIBaseURL, \"\\\"\", \"\")", + "", + "\t// remove quotes from projectID", + "\tprojectIDJSON := fmt.Sprintf(`{ \"projectId\": %q }`, projectID)", + "", + "\t// Convert JSON to bytes", + "\tprojectIDJSONBytes := []byte(projectIDJSON)", + "", + "\t// Create the URL", + "\tcertIDURL := fmt.Sprintf(\"%s/projects/certifications\", connectAPIBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", certIDURL, bytes.NewBuffer(projectIDJSONBytes))", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\tlog.Debug(\"Request Body: %s\", req.Body)", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", \"application/json\")", + "\treq.Header.Set(\"Accept\", \"application/json\")", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// print the request", + "\tlog.Debug(\"Sending request to %s\", certIDURL)", + "", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tres, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer res.Body.Close()", + "", + "\t// Parse the response", + "\tvar certIDResponse CertIDResponse", + "\terr = json.NewDecoder(res.Body).Decode(\u0026certIDResponse)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Certification ID retrieved from the API: %d\", certIDResponse.ID)", + "", + "\t// Return the certification ID", + "\treturn fmt.Sprintf(\"%d\", certIDResponse.ID), nil", + "}" + ] + }, + { + "name": "SendResultsToConnectAPI", + "qualifiedName": "SendResultsToConnectAPI", + "exported": true, + "signature": "func(string, string, string, string, string, string)(error)", + "doc": "SendResultsToConnectAPI Uploads a ZIP file of test results to the Red Hat Connect API\n\nThe function takes a zip file path, an API key, base URL, certification ID,\nand optional proxy settings. It builds a multipart/form‑data POST request\ncontaining the file and metadata fields, then sends it with a\ntimeout‑limited HTTP client that can use a proxy if configured. On success\nit logs the returned download URL and upload date; otherwise it returns an\nerror describing what failed.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/internal/results/rhconnect.go:157", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "mime/multipart", + "name": "NewWriter", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "os", + "name": "Open", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "name": "CreateFormFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "io", + "name": "Copy", + "kind": "function" + }, + { + "name": "createFormField", + "kind": "function", + "source": [ + "func createFormField(w *multipart.Writer, field, value string) error {", + "\tfw, err := w.CreateFormField(field)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create form field: %v\", err)", + "\t}", + "", + "\t_, err = fw.Write([]byte(value))", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to write field %s: %v\", field, err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "createFormField", + "kind": "function", + "source": [ + "func createFormField(w *multipart.Writer, field, value string) error {", + "\tfw, err := w.CreateFormField(field)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create form field: %v\", err)", + "\t}", + "", + "\t_, err = fw.Write([]byte(value))", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to write field %s: %v\", field, err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "createFormField", + "kind": "function", + "source": [ + "func createFormField(w *multipart.Writer, field, value string) error {", + "\tfw, err := w.CreateFormField(field)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create form field: %v\", err)", + "\t}", + "", + "\t_, err = fw.Write([]byte(value))", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to write field %s: %v\", field, err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "Close", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "NewRequest", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Set", + "kind": "function" + }, + { + "name": "FormDataContentType", + "kind": "function" + }, + { + "name": "Set", + "kind": "function" + }, + { + "name": "setProxy", + "kind": "function", + "source": [ + "func setProxy(client *http.Client, proxyURL, proxyPort string) {", + "\tif proxyURL != \"\" \u0026\u0026 proxyPort != \"\" {", + "\t\tlog.Debug(\"Proxy is set. Using proxy %s:%s\", proxyURL, proxyPort)", + "\t\tproxyURL := fmt.Sprintf(\"%s:%s\", proxyURL, proxyPort)", + "\t\tparsedURL, err := url.Parse(proxyURL)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse proxy URL: %v\", err)", + "\t\t}", + "\t\tlog.Debug(\"Proxy URL: %s\", parsedURL)", + "\t\tclient.Transport = \u0026http.Transport{Proxy: http.ProxyURL(parsedURL)}", + "\t}", + "}" + ] + }, + { + "name": "sendRequest", + "kind": "function", + "source": [ + "func sendRequest(req *http.Request, client *http.Client) (*http.Response, error) {", + "\t// print the request", + "\tlog.Debug(\"Sending request to %s\", req.URL)", + "", + "\tres, err := client.Do(req)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to send post request: %v\", err)", + "\t}", + "", + "\tif res.StatusCode != http.StatusOK {", + "\t\tlog.Debug(\"Response: %v\", res)", + "\t\treturn nil, fmt.Errorf(\"failed to send post request to the endpoint: %v\", res.Status)", + "\t}", + "", + "\treturn res, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "name": "Decode", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "NewDecoder", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SendResultsToConnectAPI(zipFile, apiKey, connectBaseURL, certID, proxyURL, proxyPort string) error {", + "\tlog.Info(\"Sending results to Red Hat Connect\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tcertID = strings.ReplaceAll(certID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectBaseURL = strings.ReplaceAll(connectBaseURL, \"\\\"\", \"\")", + "", + "\tvar buffer bytes.Buffer", + "", + "\t// Create a new multipart writer", + "\tw := multipart.NewWriter(\u0026buffer)", + "", + "\tlog.Debug(\"Creating form file for %s\", zipFile)", + "", + "\tclaimFile, err := os.Open(zipFile)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tdefer claimFile.Close()", + "", + "\tfw, err := w.CreateFormFile(\"attachment\", zipFile)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create form file: %v\", err)", + "\t}", + "", + "\tif _, err = io.Copy(fw, claimFile); err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"type\", \"RhocpBestPracticeTestResult\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"certId\", certID)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"description\", \"CNF Test Results\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\tw.Close()", + "", + "\t// Create the URL", + "\tconnectAPIURL := fmt.Sprintf(\"%s/attachments/upload\", connectBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", connectAPIURL, \u0026buffer)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", w.FormDataContentType())", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// Create a client", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API upload", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tresponse, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer response.Body.Close()", + "", + "\t// Parse the result of the request", + "\tvar uploadResult UploadResult", + "\terr = json.NewDecoder(response.Body).Decode(\u0026uploadResult)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Download URL: %s\", uploadResult.DownloadURL)", + "\tlog.Info(\"Upload Date: %s\", uploadResult.UploadedDate)", + "\treturn nil", + "}" + ] + }, + { + "name": "createClaimJSFile", + "qualifiedName": "createClaimJSFile", + "exported": false, + "signature": "func(string, string)(string, error)", + "doc": "createClaimJSFile Creates a JavaScript file containing the claim JSON data\n\nThe function reads the contents of a specified claim.json file, prefixes it\nwith a JavaScript variable declaration, and writes this combined string to a\nnew file in the given output directory. It returns the path to the newly\ncreated file or an error if reading or writing fails.", + "position": "/Users/deliedit/dev/certsuite/internal/results/html.go:26", + "calls": [ + { + "pkgPath": "os", + "name": "ReadFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "path/filepath", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "WriteFile", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "CreateResultsWebFiles", + "kind": "function", + "source": [ + "func CreateResultsWebFiles(outputDir, claimFileName string) (filePaths []string, err error) {", + "\ttype file struct {", + "\t\tPath string", + "\t\tContent []byte", + "\t}", + "", + "\tstaticFiles := []file{", + "\t\t{", + "\t\t\tPath: filepath.Join(outputDir, htmlResultsFileName),", + "\t\t\tContent: htmlResultsFileContent,", + "\t\t},", + "\t}", + "", + "\tclaimFilePath := filepath.Join(outputDir, claimFileName)", + "\tclaimJSFilePath, err := createClaimJSFile(claimFilePath, outputDir)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to create file %s: %v\", jsClaimVarFileName, err)", + "\t}", + "", + "\tfilePaths = []string{claimJSFilePath}", + "\tfor _, f := range staticFiles {", + "\t\terr := os.WriteFile(f.Path, f.Content, writeFilePerms)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to create file %s: %v\", f.Path, err)", + "\t\t}", + "", + "\t\t// Add this file path to the slice.", + "\t\tfilePaths = append(filePaths, f.Path)", + "\t}", + "", + "\treturn filePaths, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createClaimJSFile(claimFilePath, outputDir string) (filePath string, err error) {", + "\t// Read claim.json content.", + "\tclaimContent, err := os.ReadFile(claimFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to read claim file %s content in %s: %v\", claimFilePath, outputDir, err)", + "\t}", + "", + "\t// Add the content as the value for the js variable.", + "\tjsClaimContent := \"var initialjson = \" + string(claimContent)", + "", + "\tfilePath = filepath.Join(outputDir, jsClaimVarFileName)", + "\terr = os.WriteFile(filePath, []byte(jsClaimContent), writeFilePerms)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to write file %s: %v\", filePath, err)", + "\t}", + "", + "\treturn filePath, nil", + "}" + ] + }, + { + "name": "createFormField", + "qualifiedName": "createFormField", + "exported": false, + "signature": "func(*multipart.Writer, string, string)(error)", + "doc": "createFormField Creates a single form field in a multipart payload\n\nThe function accepts a multipart writer, a field name, and its value. It uses\nthe writer to create the field and writes the provided string into it. Errors\nduring creation or writing are wrapped with context and returned.", + "position": "/Users/deliedit/dev/certsuite/internal/results/rhconnect.go:28", + "calls": [ + { + "name": "CreateFormField", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Write", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "SendResultsToConnectAPI", + "kind": "function", + "source": [ + "func SendResultsToConnectAPI(zipFile, apiKey, connectBaseURL, certID, proxyURL, proxyPort string) error {", + "\tlog.Info(\"Sending results to Red Hat Connect\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tcertID = strings.ReplaceAll(certID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectBaseURL = strings.ReplaceAll(connectBaseURL, \"\\\"\", \"\")", + "", + "\tvar buffer bytes.Buffer", + "", + "\t// Create a new multipart writer", + "\tw := multipart.NewWriter(\u0026buffer)", + "", + "\tlog.Debug(\"Creating form file for %s\", zipFile)", + "", + "\tclaimFile, err := os.Open(zipFile)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tdefer claimFile.Close()", + "", + "\tfw, err := w.CreateFormFile(\"attachment\", zipFile)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create form file: %v\", err)", + "\t}", + "", + "\tif _, err = io.Copy(fw, claimFile); err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"type\", \"RhocpBestPracticeTestResult\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"certId\", certID)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"description\", \"CNF Test Results\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\tw.Close()", + "", + "\t// Create the URL", + "\tconnectAPIURL := fmt.Sprintf(\"%s/attachments/upload\", connectBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", connectAPIURL, \u0026buffer)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", w.FormDataContentType())", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// Create a client", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API upload", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tresponse, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer response.Body.Close()", + "", + "\t// Parse the result of the request", + "\tvar uploadResult UploadResult", + "\terr = json.NewDecoder(response.Body).Decode(\u0026uploadResult)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Download URL: %s\", uploadResult.DownloadURL)", + "\tlog.Info(\"Upload Date: %s\", uploadResult.UploadedDate)", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createFormField(w *multipart.Writer, field, value string) error {", + "\tfw, err := w.CreateFormField(field)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create form field: %v\", err)", + "\t}", + "", + "\t_, err = fw.Write([]byte(value))", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to write field %s: %v\", field, err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "generateZipFileName", + "qualifiedName": "generateZipFileName", + "exported": false, + "signature": "func()(string)", + "doc": "generateZipFileName creates a timestamped name for the archive file\n\nThe function generates a string by formatting the current time with a\npredefined layout and appending a suffix to produce a unique filename. It\nuses the system clock to ensure each call returns a different value, suitable\nfor naming compressed result artifacts. The returned string is later combined\nwith a directory path to create the full file location.", + "position": "/Users/deliedit/dev/certsuite/internal/results/archiver.go:28", + "calls": [ + { + "name": "Format", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "CompressResultsArtifacts", + "kind": "function", + "source": [ + "func CompressResultsArtifacts(outputDir string, filePaths []string) (string, error) {", + "\tzipFileName := generateZipFileName()", + "\tzipFilePath := filepath.Join(outputDir, zipFileName)", + "", + "\tlog.Info(\"Compressing results artifacts into %s\", zipFilePath)", + "\tzipFile, err := os.Create(zipFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed creating tar.gz file %s in dir %s (filepath=%s): %v\",", + "\t\t\tzipFileName, outputDir, zipFilePath, err)", + "\t}", + "", + "\tzipWriter := gzip.NewWriter(zipFile)", + "\tdefer zipWriter.Close()", + "", + "\ttarWriter := tar.NewWriter(zipWriter)", + "\tdefer tarWriter.Close()", + "", + "\tfor _, file := range filePaths {", + "\t\tlog.Debug(\"Zipping file %s\", file)", + "", + "\t\ttarHeader, err := getFileTarHeader(file)", + "\t\tif err != nil {", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\terr = tarWriter.WriteHeader(tarHeader)", + "\t\tif err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to write tar header for %s: %v\", file, err)", + "\t\t}", + "", + "\t\tf, err := os.Open(file)", + "\t\tif err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to open file %s: %v\", file, err)", + "\t\t}", + "", + "\t\tif _, err = io.Copy(tarWriter, f); err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to tar file %s: %v\", file, err)", + "\t\t}", + "", + "\t\tf.Close()", + "\t}", + "", + "\t// Create fully qualified path to the zip file", + "\tzipFilePath, err = filepath.Abs(zipFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get absolute path for %s: %v\", zipFilePath, err)", + "\t}", + "", + "\t// Return the entire path to the zip file", + "\treturn zipFilePath, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func generateZipFileName() string {", + "\treturn time.Now().Format(tarGzFileNamePrefixLayout) + \"-\" + tarGzFileNameSuffix", + "}" + ] + }, + { + "name": "getFileTarHeader", + "qualifiedName": "getFileTarHeader", + "exported": false, + "signature": "func(string)(*tar.Header, error)", + "doc": "getFileTarHeader Creates a tar header for a given file\n\nThe function retrieves the file’s metadata using the operating system, then\nconverts that information into a tar header structure suitable for archiving.\nIt returns the header or an error if either the stat call or the conversion\nfails.", + "position": "/Users/deliedit/dev/certsuite/internal/results/archiver.go:38", + "calls": [ + { + "pkgPath": "os", + "name": "Stat", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "archive/tar", + "name": "FileInfoHeader", + "kind": "function" + }, + { + "name": "Name", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "CompressResultsArtifacts", + "kind": "function", + "source": [ + "func CompressResultsArtifacts(outputDir string, filePaths []string) (string, error) {", + "\tzipFileName := generateZipFileName()", + "\tzipFilePath := filepath.Join(outputDir, zipFileName)", + "", + "\tlog.Info(\"Compressing results artifacts into %s\", zipFilePath)", + "\tzipFile, err := os.Create(zipFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed creating tar.gz file %s in dir %s (filepath=%s): %v\",", + "\t\t\tzipFileName, outputDir, zipFilePath, err)", + "\t}", + "", + "\tzipWriter := gzip.NewWriter(zipFile)", + "\tdefer zipWriter.Close()", + "", + "\ttarWriter := tar.NewWriter(zipWriter)", + "\tdefer tarWriter.Close()", + "", + "\tfor _, file := range filePaths {", + "\t\tlog.Debug(\"Zipping file %s\", file)", + "", + "\t\ttarHeader, err := getFileTarHeader(file)", + "\t\tif err != nil {", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\terr = tarWriter.WriteHeader(tarHeader)", + "\t\tif err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to write tar header for %s: %v\", file, err)", + "\t\t}", + "", + "\t\tf, err := os.Open(file)", + "\t\tif err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to open file %s: %v\", file, err)", + "\t\t}", + "", + "\t\tif _, err = io.Copy(tarWriter, f); err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to tar file %s: %v\", file, err)", + "\t\t}", + "", + "\t\tf.Close()", + "\t}", + "", + "\t// Create fully qualified path to the zip file", + "\tzipFilePath, err = filepath.Abs(zipFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get absolute path for %s: %v\", zipFilePath, err)", + "\t}", + "", + "\t// Return the entire path to the zip file", + "\treturn zipFilePath, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getFileTarHeader(file string) (*tar.Header, error) {", + "\tinfo, err := os.Stat(file)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get file info from %s: %v\", file, err)", + "\t}", + "", + "\theader, err := tar.FileInfoHeader(info, info.Name())", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get file info header for %s: %v\", file, err)", + "\t}", + "", + "\treturn header, nil", + "}" + ] + }, + { + "name": "sendRequest", + "qualifiedName": "sendRequest", + "exported": false, + "signature": "func(*http.Request, *http.Client)(*http.Response, error)", + "doc": "sendRequest Sends an HTTP request using a client and checks for success\n\nThis function logs the target URL, executes the request with the provided\nhttp.Client, and returns the response if the status code is 200 OK. If an\nerror occurs during execution or the status code differs from OK, it returns\na formatted error describing the failure.", + "position": "/Users/deliedit/dev/certsuite/internal/results/rhconnect.go:251", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Do", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "GetCertIDFromConnectAPI", + "kind": "function", + "source": [ + "func GetCertIDFromConnectAPI(apiKey, projectID, connectAPIBaseURL, proxyURL, proxyPort string) (string, error) {", + "\tlog.Info(\"Getting certification ID from Red Hat Connect API\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tprojectID = strings.ReplaceAll(projectID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectAPIBaseURL = strings.ReplaceAll(connectAPIBaseURL, \"\\\"\", \"\")", + "", + "\t// remove quotes from projectID", + "\tprojectIDJSON := fmt.Sprintf(`{ \"projectId\": %q }`, projectID)", + "", + "\t// Convert JSON to bytes", + "\tprojectIDJSONBytes := []byte(projectIDJSON)", + "", + "\t// Create the URL", + "\tcertIDURL := fmt.Sprintf(\"%s/projects/certifications\", connectAPIBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", certIDURL, bytes.NewBuffer(projectIDJSONBytes))", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\tlog.Debug(\"Request Body: %s\", req.Body)", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", \"application/json\")", + "\treq.Header.Set(\"Accept\", \"application/json\")", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// print the request", + "\tlog.Debug(\"Sending request to %s\", certIDURL)", + "", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tres, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer res.Body.Close()", + "", + "\t// Parse the response", + "\tvar certIDResponse CertIDResponse", + "\terr = json.NewDecoder(res.Body).Decode(\u0026certIDResponse)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Certification ID retrieved from the API: %d\", certIDResponse.ID)", + "", + "\t// Return the certification ID", + "\treturn fmt.Sprintf(\"%d\", certIDResponse.ID), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "SendResultsToConnectAPI", + "kind": "function", + "source": [ + "func SendResultsToConnectAPI(zipFile, apiKey, connectBaseURL, certID, proxyURL, proxyPort string) error {", + "\tlog.Info(\"Sending results to Red Hat Connect\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tcertID = strings.ReplaceAll(certID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectBaseURL = strings.ReplaceAll(connectBaseURL, \"\\\"\", \"\")", + "", + "\tvar buffer bytes.Buffer", + "", + "\t// Create a new multipart writer", + "\tw := multipart.NewWriter(\u0026buffer)", + "", + "\tlog.Debug(\"Creating form file for %s\", zipFile)", + "", + "\tclaimFile, err := os.Open(zipFile)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tdefer claimFile.Close()", + "", + "\tfw, err := w.CreateFormFile(\"attachment\", zipFile)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create form file: %v\", err)", + "\t}", + "", + "\tif _, err = io.Copy(fw, claimFile); err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"type\", \"RhocpBestPracticeTestResult\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"certId\", certID)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"description\", \"CNF Test Results\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\tw.Close()", + "", + "\t// Create the URL", + "\tconnectAPIURL := fmt.Sprintf(\"%s/attachments/upload\", connectBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", connectAPIURL, \u0026buffer)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", w.FormDataContentType())", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// Create a client", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API upload", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tresponse, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer response.Body.Close()", + "", + "\t// Parse the result of the request", + "\tvar uploadResult UploadResult", + "\terr = json.NewDecoder(response.Body).Decode(\u0026uploadResult)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Download URL: %s\", uploadResult.DownloadURL)", + "\tlog.Info(\"Upload Date: %s\", uploadResult.UploadedDate)", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func sendRequest(req *http.Request, client *http.Client) (*http.Response, error) {", + "\t// print the request", + "\tlog.Debug(\"Sending request to %s\", req.URL)", + "", + "\tres, err := client.Do(req)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to send post request: %v\", err)", + "\t}", + "", + "\tif res.StatusCode != http.StatusOK {", + "\t\tlog.Debug(\"Response: %v\", res)", + "\t\treturn nil, fmt.Errorf(\"failed to send post request to the endpoint: %v\", res.Status)", + "\t}", + "", + "\treturn res, nil", + "}" + ] + }, + { + "name": "setProxy", + "qualifiedName": "setProxy", + "exported": false, + "signature": "func(*http.Client, string, string)()", + "doc": "setProxy configures an HTTP client to use a proxy when provided\n\nWhen both the proxy address and port are supplied, the function builds a\nproxy URL, parses it, logs the configuration, and assigns a transport with\nthat proxy to the client. If parsing fails, an error is logged but no panic\noccurs. The client remains unchanged if either value is empty.", + "position": "/Users/deliedit/dev/certsuite/internal/results/rhconnect.go:274", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "net/url", + "name": "Parse", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "net/http", + "name": "ProxyURL", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "GetCertIDFromConnectAPI", + "kind": "function", + "source": [ + "func GetCertIDFromConnectAPI(apiKey, projectID, connectAPIBaseURL, proxyURL, proxyPort string) (string, error) {", + "\tlog.Info(\"Getting certification ID from Red Hat Connect API\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tprojectID = strings.ReplaceAll(projectID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectAPIBaseURL = strings.ReplaceAll(connectAPIBaseURL, \"\\\"\", \"\")", + "", + "\t// remove quotes from projectID", + "\tprojectIDJSON := fmt.Sprintf(`{ \"projectId\": %q }`, projectID)", + "", + "\t// Convert JSON to bytes", + "\tprojectIDJSONBytes := []byte(projectIDJSON)", + "", + "\t// Create the URL", + "\tcertIDURL := fmt.Sprintf(\"%s/projects/certifications\", connectAPIBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", certIDURL, bytes.NewBuffer(projectIDJSONBytes))", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\tlog.Debug(\"Request Body: %s\", req.Body)", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", \"application/json\")", + "\treq.Header.Set(\"Accept\", \"application/json\")", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// print the request", + "\tlog.Debug(\"Sending request to %s\", certIDURL)", + "", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tres, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer res.Body.Close()", + "", + "\t// Parse the response", + "\tvar certIDResponse CertIDResponse", + "\terr = json.NewDecoder(res.Body).Decode(\u0026certIDResponse)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Certification ID retrieved from the API: %d\", certIDResponse.ID)", + "", + "\t// Return the certification ID", + "\treturn fmt.Sprintf(\"%d\", certIDResponse.ID), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "SendResultsToConnectAPI", + "kind": "function", + "source": [ + "func SendResultsToConnectAPI(zipFile, apiKey, connectBaseURL, certID, proxyURL, proxyPort string) error {", + "\tlog.Info(\"Sending results to Red Hat Connect\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tcertID = strings.ReplaceAll(certID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectBaseURL = strings.ReplaceAll(connectBaseURL, \"\\\"\", \"\")", + "", + "\tvar buffer bytes.Buffer", + "", + "\t// Create a new multipart writer", + "\tw := multipart.NewWriter(\u0026buffer)", + "", + "\tlog.Debug(\"Creating form file for %s\", zipFile)", + "", + "\tclaimFile, err := os.Open(zipFile)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tdefer claimFile.Close()", + "", + "\tfw, err := w.CreateFormFile(\"attachment\", zipFile)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create form file: %v\", err)", + "\t}", + "", + "\tif _, err = io.Copy(fw, claimFile); err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"type\", \"RhocpBestPracticeTestResult\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"certId\", certID)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"description\", \"CNF Test Results\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\tw.Close()", + "", + "\t// Create the URL", + "\tconnectAPIURL := fmt.Sprintf(\"%s/attachments/upload\", connectBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", connectAPIURL, \u0026buffer)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", w.FormDataContentType())", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// Create a client", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API upload", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tresponse, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer response.Body.Close()", + "", + "\t// Parse the result of the request", + "\tvar uploadResult UploadResult", + "\terr = json.NewDecoder(response.Body).Decode(\u0026uploadResult)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Download URL: %s\", uploadResult.DownloadURL)", + "\tlog.Info(\"Upload Date: %s\", uploadResult.UploadedDate)", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func setProxy(client *http.Client, proxyURL, proxyPort string) {", + "\tif proxyURL != \"\" \u0026\u0026 proxyPort != \"\" {", + "\t\tlog.Debug(\"Proxy is set. Using proxy %s:%s\", proxyURL, proxyPort)", + "\t\tproxyURL := fmt.Sprintf(\"%s:%s\", proxyURL, proxyPort)", + "\t\tparsedURL, err := url.Parse(proxyURL)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse proxy URL: %v\", err)", + "\t\t}", + "\t\tlog.Debug(\"Proxy URL: %s\", parsedURL)", + "\t\tclient.Transport = \u0026http.Transport{Proxy: http.ProxyURL(parsedURL)}", + "\t}", + "}" + ] + } + ], + "globals": [ + { + "name": "htmlResultsFileContent", + "exported": false, + "type": "[]byte", + "doc": "go:embed html/results.html", + "position": "/Users/deliedit/dev/certsuite/internal/results/html.go:18" + } + ], + "consts": [ + { + "name": "htmlResultsFileName", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/internal/results/html.go:11" + }, + { + "name": "jsClaimVarFileName", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/internal/results/html.go:12" + }, + { + "name": "redHatConnectAPITimeout", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/internal/results/rhconnect.go:20" + }, + { + "name": "tarGzFileNamePrefixLayout", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/internal/results/archiver.go:17" + }, + { + "name": "tarGzFileNameSuffix", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/internal/results/archiver.go:18" + }, + { + "name": "writeFilePerms", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/internal/results/html.go:14" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper", + "name": "arrayhelper", + "files": 1, + "imports": [ + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "ArgListToMap", + "qualifiedName": "ArgListToMap", + "exported": true, + "signature": "func([]string)(map[string]string)", + "doc": "ArgListToMap Converts key=value strings into a map\n\nIt receives an array of strings, each representing a kernel argument or\nconfiguration pair. For every entry it removes surrounding quotes, splits on\nthe first equals sign, and stores the key with its corresponding in a new\nmap. The resulting map is returned for further processing.", + "position": "/Users/deliedit/dev/certsuite/pkg/arrayhelper/arrayhelper.go:29", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "GetMcKernelArguments", + "kind": "function", + "source": [ + "func GetMcKernelArguments(env *provider.TestEnvironment, nodeName string) (aMap map[string]string) {", + "\tmcKernelArgumentsMap := arrayhelper.ArgListToMap(env.Nodes[nodeName].Mc.Spec.KernelArguments)", + "\treturn mcKernelArgumentsMap", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "getCurrentKernelCmdlineArgs", + "kind": "function", + "source": [ + "func getCurrentKernelCmdlineArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) {", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tcurrentKernelCmdlineArgs, errStr, err := o.ExecCommandContainer(ctx, kernelArgscommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn aMap, fmt.Errorf(\"cannot execute %s on probe pod container %s, err=%s, stderr=%s\", grubKernelArgsCommand, env.ProbePods[nodeName].Name, err, errStr)", + "\t}", + "\tcurrentSplitKernelCmdlineArgs := strings.Split(strings.TrimSuffix(currentKernelCmdlineArgs, \"\\n\"), \" \")", + "\treturn arrayhelper.ArgListToMap(currentSplitKernelCmdlineArgs), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "getGrubKernelArgs", + "kind": "function", + "source": [ + "func getGrubKernelArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) {", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tbootConfig, errStr, err := o.ExecCommandContainer(ctx, grubKernelArgsCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn aMap, fmt.Errorf(\"cannot execute %s on probe pod %s, err=%s, stderr=%s\", grubKernelArgsCommand, env.ProbePods[nodeName], err, errStr)", + "\t}", + "", + "\tsplitBootConfig := strings.Split(bootConfig, \"\\n\")", + "\tfilteredBootConfig := arrayhelper.FilterArray(splitBootConfig, func(line string) bool {", + "\t\treturn strings.HasPrefix(line, \"options\")", + "\t})", + "\tif len(filteredBootConfig) != 1 {", + "\t\treturn aMap, fmt.Errorf(\"filteredBootConfig!=1\")", + "\t}", + "\tgrubKernelConfig := filteredBootConfig[0]", + "\tgrubSplitKernelConfig := strings.Split(grubKernelConfig, \" \")", + "\tgrubSplitKernelConfig = grubSplitKernelConfig[1:]", + "\treturn arrayhelper.ArgListToMap(grubSplitKernelConfig), nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ArgListToMap(lst []string) map[string]string {", + "\tretval := make(map[string]string)", + "\tfor _, arg := range lst {", + "\t\targ = strings.ReplaceAll(arg, `\"`, ``)", + "\t\tsplitArgs := strings.Split(arg, \"=\")", + "\t\tif len(splitArgs) == 1 {", + "\t\t\tretval[splitArgs[0]] = \"\"", + "\t\t} else {", + "\t\t\tretval[splitArgs[0]] = splitArgs[1]", + "\t\t}", + "\t}", + "\treturn retval", + "}" + ] + }, + { + "name": "FilterArray", + "qualifiedName": "FilterArray", + "exported": true, + "signature": "func([]string, func(string) bool)([]string)", + "doc": "FilterArray Filters elements of a slice based on a predicate\n\nIt iterates over each string in the input slice, applies the provided\nfunction to decide if an element should be kept, and collects those that\nsatisfy the condition into a new slice which is then returned.", + "position": "/Users/deliedit/dev/certsuite/pkg/arrayhelper/arrayhelper.go:48", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "f", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "getGrubKernelArgs", + "kind": "function", + "source": [ + "func getGrubKernelArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) {", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tbootConfig, errStr, err := o.ExecCommandContainer(ctx, grubKernelArgsCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn aMap, fmt.Errorf(\"cannot execute %s on probe pod %s, err=%s, stderr=%s\", grubKernelArgsCommand, env.ProbePods[nodeName], err, errStr)", + "\t}", + "", + "\tsplitBootConfig := strings.Split(bootConfig, \"\\n\")", + "\tfilteredBootConfig := arrayhelper.FilterArray(splitBootConfig, func(line string) bool {", + "\t\treturn strings.HasPrefix(line, \"options\")", + "\t})", + "\tif len(filteredBootConfig) != 1 {", + "\t\treturn aMap, fmt.Errorf(\"filteredBootConfig!=1\")", + "\t}", + "\tgrubKernelConfig := filteredBootConfig[0]", + "\tgrubSplitKernelConfig := strings.Split(grubKernelConfig, \" \")", + "\tgrubSplitKernelConfig = grubSplitKernelConfig[1:]", + "\treturn arrayhelper.ArgListToMap(grubSplitKernelConfig), nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func FilterArray(vs []string, f func(string) bool) []string {", + "\tvsf := make([]string, 0)", + "\tfor _, v := range vs {", + "\t\tif f(v) {", + "\t\t\tvsf = append(vsf, v)", + "\t\t}", + "\t}", + "\treturn vsf", + "}" + ] + }, + { + "name": "Unique", + "qualifiedName": "Unique", + "exported": true, + "signature": "func([]string)([]string)", + "doc": "Unique Eliminates duplicate strings from a slice\n\nThe function receives a slice of strings and returns a new slice containing\neach distinct element exactly once. It builds a map to track seen values,\nthen collects the unique keys into a result slice. The order of elements is\nnot preserved.", + "position": "/Users/deliedit/dev/certsuite/pkg/arrayhelper/arrayhelper.go:64", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "GetSuitesFromIdentifiers", + "kind": "function", + "source": [ + "func GetSuitesFromIdentifiers(keys []claim.Identifier) []string {", + "\tvar suites []string", + "\tfor _, i := range keys {", + "\t\tsuites = append(suites, i.Suite)", + "\t}", + "\treturn arrayhelper.Unique(suites)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "GetSuitesFromIdentifiers", + "kind": "function", + "source": [ + "func GetSuitesFromIdentifiers(keys []claim.Identifier) []string {", + "\tvar suites []string", + "\tfor _, i := range keys {", + "\t\tsuites = append(suites, i.Suite)", + "\t}", + "\treturn arrayhelper.Unique(suites)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Unique(slice []string) []string {", + "\t// create a map with all the values as key", + "\tuniqMap := make(map[string]struct{})", + "\tfor _, v := range slice {", + "\t\tuniqMap[v] = struct{}{}", + "\t}", + "", + "\t// turn the map keys into a slice", + "\tuniqSlice := make([]string, 0, len(uniqMap))", + "\tfor v := range uniqMap {", + "\t\tuniqSlice = append(uniqSlice, v)", + "\t}", + "\treturn uniqSlice", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "autodiscover", + "files": 19, + "imports": [ + "context", + "errors", + "fmt", + "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1", + "github.com/mittwald/go-helm-client", + "github.com/openshift/api/config/v1", + "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1", + "github.com/operator-framework/api/pkg/operators/v1alpha1", + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1alpha1", + "github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/apis/operators/v1", + "github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/client/clientset/versioned/typed/operators/v1", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "helm.sh/helm/v3/pkg/release", + "k8s.io/api/apps/v1", + "k8s.io/api/autoscaling/v1", + "k8s.io/api/core/v1", + "k8s.io/api/networking/v1", + "k8s.io/api/policy/v1", + "k8s.io/api/rbac/v1", + "k8s.io/api/storage/v1", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/types", + "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/typed/apps/v1", + "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/kubernetes/typed/networking/v1", + "k8s.io/client-go/kubernetes/typed/policy/v1", + "k8s.io/client-go/kubernetes/typed/rbac/v1", + "k8s.io/client-go/kubernetes/typed/storage/v1", + "k8s.io/client-go/rest", + "k8s.io/client-go/scale", + "path", + "regexp", + "strings", + "time" + ], + "structs": [ + { + "name": "DiscoveredTestData", + "exported": true, + "doc": "DiscoveredTestData Contains all resources discovered during test setup\n\nThe structure holds metadata, configuration parameters, and collections of\nKubernetes objects such as pods, services, CRDs, and operator information\ncollected by the autodiscovery routine. It aggregates stateful data like pod\nstatuses, resource quotas, network policies, and role bindings to provide a\ncomprehensive snapshot of the cluster for testing purposes. The fields are\nused downstream to evaluate test conditions and report results.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:82", + "fields": { + "AbnormalEvents": "[]corev1.Event", + "AllCatalogSources": "[]*olmv1Alpha.CatalogSource", + "AllCrds": "[]*apiextv1.CustomResourceDefinition", + "AllCsvs": "[]*olmv1Alpha.ClusterServiceVersion", + "AllInstallPlans": "[]*olmv1Alpha.InstallPlan", + "AllNamespaces": "[]string", + "AllPackageManifests": "[]*olmPkgv1.PackageManifest", + "AllPods": "[]corev1.Pod", + "AllServiceAccounts": "[]*corev1.ServiceAccount", + "AllServices": "[]*corev1.Service", + "AllSriovNetworkNodePolicies": "[]unstructured.Unstructured", + "AllSriovNetworks": "[]unstructured.Unstructured", + "AllSubscriptions": "[]olmv1Alpha.Subscription", + "CSVToPodListMap": "map[types.NamespacedName][]*corev1.Pod", + "ClusterOperators": "[]configv1.ClusterOperator", + "ClusterRoleBindings": "[]rbacv1.ClusterRoleBinding", + "CollectorAppEndpoint": "string", + "CollectorAppPassword": "string", + "ConnectAPIBaseURL": "string", + "ConnectAPIKey": "string", + "ConnectAPIProxyPort": "string", + "ConnectAPIProxyURL": "string", + "ConnectProjectID": "string", + "Crds": "[]*apiextv1.CustomResourceDefinition", + "Csvs": "[]*olmv1Alpha.ClusterServiceVersion", + "Deployments": "[]appsv1.Deployment", + "Env": "configuration.TestParameters", + "ExecutedBy": "string", + "HelmChartReleases": "map[string][]*release.Release", + "Hpas": "[]*scalingv1.HorizontalPodAutoscaler", + "IstioServiceMeshFound": "bool", + "K8sVersion": "string", + "Namespaces": "[]string", + "NetworkAttachmentDefinitions": "[]nadClient.NetworkAttachmentDefinition", + "NetworkPolicies": "[]networkingv1.NetworkPolicy", + "Nodes": "*corev1.NodeList", + "OCPStatus": "string", + "OpenshiftVersion": "string", + "OperandPods": "[]*corev1.Pod", + "PartnerName": "string", + "PersistentVolumeClaims": "[]corev1.PersistentVolumeClaim", + "PersistentVolumes": "[]corev1.PersistentVolume", + "PodDisruptionBudgets": "[]policyv1.PodDisruptionBudget", + "PodStates": "PodStates", + "Pods": "[]corev1.Pod", + "ProbePods": "[]corev1.Pod", + "ResourceQuotaItems": "[]corev1.ResourceQuota", + "RoleBindings": "[]rbacv1.RoleBinding", + "Roles": "[]rbacv1.Role", + "ScaleCrUnderTest": "[]ScaleObject", + "ServiceAccounts": "[]*corev1.ServiceAccount", + "Services": "[]*corev1.Service", + "ServicesIgnoreList": "[]string", + "SriovNetworkNodePolicies": "[]unstructured.Unstructured", + "SriovNetworks": "[]unstructured.Unstructured", + "StatefulSet": "[]appsv1.StatefulSet", + "StorageClasses": "[]storagev1.StorageClass", + "Subscriptions": "[]olmv1Alpha.Subscription", + "ValidProtocolNames": "[]string" + }, + "methodNames": null, + "source": [ + "type DiscoveredTestData struct {", + "\tEnv configuration.TestParameters", + "\tPodStates PodStates", + "\tPods []corev1.Pod", + "\tAllPods []corev1.Pod", + "\tProbePods []corev1.Pod", + "\tCSVToPodListMap map[types.NamespacedName][]*corev1.Pod", + "\tOperandPods []*corev1.Pod", + "\tResourceQuotaItems []corev1.ResourceQuota", + "\tPodDisruptionBudgets []policyv1.PodDisruptionBudget", + "\tNetworkPolicies []networkingv1.NetworkPolicy", + "\tCrds []*apiextv1.CustomResourceDefinition", + "\tNamespaces []string", + "\tAllNamespaces []string", + "\tAbnormalEvents []corev1.Event", + "\tCsvs []*olmv1Alpha.ClusterServiceVersion", + "\tAllCrds []*apiextv1.CustomResourceDefinition", + "\tAllCsvs []*olmv1Alpha.ClusterServiceVersion", + "\tAllInstallPlans []*olmv1Alpha.InstallPlan", + "\tAllCatalogSources []*olmv1Alpha.CatalogSource", + "\tAllPackageManifests []*olmPkgv1.PackageManifest", + "\tClusterOperators []configv1.ClusterOperator", + "\tSriovNetworks []unstructured.Unstructured", + "\tSriovNetworkNodePolicies []unstructured.Unstructured", + "\tAllSriovNetworks []unstructured.Unstructured", + "\tAllSriovNetworkNodePolicies []unstructured.Unstructured", + "\tNetworkAttachmentDefinitions []nadClient.NetworkAttachmentDefinition", + "\tDeployments []appsv1.Deployment", + "\tStatefulSet []appsv1.StatefulSet", + "\tPersistentVolumes []corev1.PersistentVolume", + "\tPersistentVolumeClaims []corev1.PersistentVolumeClaim", + "\tClusterRoleBindings []rbacv1.ClusterRoleBinding", + "\tRoleBindings []rbacv1.RoleBinding // Contains all rolebindings from all namespaces", + "\tRoles []rbacv1.Role // Contains all roles from all namespaces", + "\tServices []*corev1.Service", + "\tAllServices []*corev1.Service", + "\tServiceAccounts []*corev1.ServiceAccount", + "\tAllServiceAccounts []*corev1.ServiceAccount", + "\tHpas []*scalingv1.HorizontalPodAutoscaler", + "\tSubscriptions []olmv1Alpha.Subscription", + "\tAllSubscriptions []olmv1Alpha.Subscription", + "\tHelmChartReleases map[string][]*release.Release", + "\tK8sVersion string", + "\tOpenshiftVersion string", + "\tOCPStatus string", + "\tNodes *corev1.NodeList", + "\tIstioServiceMeshFound bool", + "\tValidProtocolNames []string", + "\tStorageClasses []storagev1.StorageClass", + "\tServicesIgnoreList []string", + "\tScaleCrUnderTest []ScaleObject", + "\tExecutedBy string", + "\tPartnerName string", + "\tCollectorAppPassword string", + "\tCollectorAppEndpoint string", + "\tConnectAPIKey string", + "\tConnectProjectID string", + "\tConnectAPIBaseURL string", + "\tConnectAPIProxyURL string", + "\tConnectAPIProxyPort string", + "}" + ] + }, + { + "name": "PodStates", + "exported": true, + "doc": "PodStates Tracks pod counts before and after execution\n\nThis structure holds two maps that record the number of pods per namespace or\nlabel set before an operation begins and after it completes. The keys\nrepresent identifiers such as namespace names, while the values are integer\ncounters. By comparing these maps, callers can determine how many pods were\nadded, removed, or remained unchanged during the execution phase.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:69", + "fields": { + "AfterExecution": "map[string]int", + "BeforeExecution": "map[string]int" + }, + "methodNames": null, + "source": [ + "type PodStates struct {", + "\tBeforeExecution map[string]int", + "\tAfterExecution map[string]int", + "}" + ] + }, + { + "name": "ScaleObject", + "exported": true, + "doc": "ScaleObject represents a scalable custom resource\n\nThis structure holds the scale subresource of a custom resource, along with\nits group‑resource identity. It is used to read or modify the replica count\nfor that resource via the Kubernetes scaling API.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_scales.go:20", + "fields": { + "GroupResourceSchema": "schema.GroupResource", + "Scale": "*scalingv1.Scale" + }, + "methodNames": null, + "source": [ + "type ScaleObject struct {", + "\tScale *scalingv1.Scale", + "\tGroupResourceSchema schema.GroupResource", + "}" + ] + }, + { + "name": "labelObject", + "exported": false, + "doc": "labelObject Represents a single key/value pair used to identify Kubernetes resources\n\nThis structure holds the label's key and its corresponding value, allowing\ncode to match or filter objects such as Pods, Deployments, or Operators based\non those labels. It is used throughout the discovery logic to build selectors\nfor listing resources that satisfy one or more specified label conditions.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:150", + "fields": { + "LabelKey": "string", + "LabelValue": "string" + }, + "methodNames": null, + "source": [ + "type labelObject struct {", + "\tLabelKey string", + "\tLabelValue string", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "CountPodsByStatus", + "qualifiedName": "CountPodsByStatus", + "exported": true, + "signature": "func([]corev1.Pod)(map[string]int)", + "doc": "CountPodsByStatus Counts running versus non‑running pods\n\nThe function iterates over a slice of pod objects, incrementing counters for\nthose in the Running phase versus all others. It returns a map with keys\n\"ready\" and \"non-ready\" holding the respective counts. The result is used to\ntrack pod state before and after test execution.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_pods.go:99", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CountPodsByStatus(allPods []corev1.Pod) map[string]int {", + "\tpodStates := map[string]int{", + "\t\t\"ready\": 0,", + "\t\t\"non-ready\": 0,", + "\t}", + "", + "\tfor i := range allPods {", + "\t\tif allPods[i].Status.Phase == corev1.PodRunning {", + "\t\t\tpodStates[\"ready\"]++", + "\t\t} else {", + "\t\t\tpodStates[\"non-ready\"]++", + "\t\t}", + "\t}", + "", + "\treturn podStates", + "}" + ] + }, + { + "name": "CreateLabels", + "qualifiedName": "CreateLabels", + "exported": true, + "signature": "func([]string)([]labelObject)", + "doc": "CreateLabels Parses label expressions into key-value objects\n\nThe function iterates over a slice of strings, each representing a label in\nthe form \"key=value\". It uses a regular expression to extract the key and\nvalue; if parsing fails it logs an error and skips that entry. Valid pairs\nare wrapped into labelObject structs and collected into a slice that is\nreturned.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:167", + "calls": [ + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "name": "FindStringSubmatch", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CreateLabels(labelStrings []string) (labelObjects []labelObject) {", + "\tfor _, label := range labelStrings {", + "\t\tr := regexp.MustCompile(labelRegex)", + "", + "\t\tvalues := r.FindStringSubmatch(label)", + "\t\tif len(values) != labelRegexMatches {", + "\t\t\tlog.Error(\"Failed to parse label %q. It will not be used!, \", label)", + "\t\t\tcontinue", + "\t\t}", + "\t\tvar aLabel labelObject", + "\t\taLabel.LabelKey = values[1]", + "\t\taLabel.LabelValue = values[2]", + "\t\tlabelObjects = append(labelObjects, aLabel)", + "\t}", + "\treturn labelObjects", + "}" + ] + }, + { + "name": "DoAutoDiscover", + "qualifiedName": "DoAutoDiscover", + "exported": true, + "signature": "func(*configuration.TestConfiguration)(DiscoveredTestData)", + "doc": "DoAutoDiscover Collects comprehensive Kubernetes and OpenShift discovery data\n\nThe function gathers a wide range of cluster information such as namespaces,\npods, operators, subscriptions, CRDs, storage classes, network policies, role\nbindings, and more. It uses client holders to query the API, applies label\nfiltering for test objects, handles errors with fatal logging, and populates\na DiscoveredTestData structure that is later used to build the test\nenvironment.\n\nnolint:funlen,gocyclo", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:194", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "getAllStorageClasses", + "kind": "function", + "source": [ + "func getAllStorageClasses(client storagev1typed.StorageV1Interface) ([]storagev1.StorageClass, error) {", + "\tstorageclasslist, err := client.StorageClasses().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Error when listing storage classes, err: %v\", err)", + "\t\treturn nil, err", + "\t}", + "\treturn storageclasslist.Items, nil", + "}" + ] + }, + { + "name": "StorageV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "CreateLabels", + "kind": "function", + "source": [ + "func CreateLabels(labelStrings []string) (labelObjects []labelObject) {", + "\tfor _, label := range labelStrings {", + "\t\tr := regexp.MustCompile(labelRegex)", + "", + "\t\tvalues := r.FindStringSubmatch(label)", + "\t\tif len(values) != labelRegexMatches {", + "\t\t\tlog.Error(\"Failed to parse label %q. It will not be used!, \", label)", + "\t\t\tcontinue", + "\t\t}", + "\t\tvar aLabel labelObject", + "\t\taLabel.LabelKey = values[1]", + "\t\taLabel.LabelValue = values[2]", + "\t\tlabelObjects = append(labelObjects, aLabel)", + "\t}", + "\treturn labelObjects", + "}" + ] + }, + { + "name": "CreateLabels", + "kind": "function", + "source": [ + "func CreateLabels(labelStrings []string) (labelObjects []labelObject) {", + "\tfor _, label := range labelStrings {", + "\t\tr := regexp.MustCompile(labelRegex)", + "", + "\t\tvalues := r.FindStringSubmatch(label)", + "\t\tif len(values) != labelRegexMatches {", + "\t\t\tlog.Error(\"Failed to parse label %q. It will not be used!, \", label)", + "\t\t\tcontinue", + "\t\t}", + "\t\tvar aLabel labelObject", + "\t\taLabel.LabelKey = values[1]", + "\t\taLabel.LabelValue = values[2]", + "\t\tlabelObjects = append(labelObjects, aLabel)", + "\t}", + "\treturn labelObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "getAllNamespaces", + "kind": "function", + "source": [ + "func getAllNamespaces(oc corev1client.CoreV1Interface) (allNs []string, err error) {", + "\tnsList, err := oc.Namespaces().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn allNs, fmt.Errorf(\"error getting all namespaces, err: %v\", err)", + "\t}", + "\tfor index := range nsList.Items {", + "\t\tallNs = append(allNs, nsList.Items[index].Name)", + "\t}", + "\treturn allNs, nil", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "findSubscriptions", + "kind": "function", + "source": [ + "func findSubscriptions(olmClient v1alpha1.OperatorsV1alpha1Interface, namespaces []string) []olmv1Alpha.Subscription {", + "\tsubscriptions := []olmv1Alpha.Subscription{}", + "\tfor _, ns := range namespaces {", + "\t\tdisplayNs := ns", + "\t\tif ns == \"\" {", + "\t\t\tdisplayNs = \"All Namespaces\"", + "\t\t}", + "\t\tlog.Debug(\"Searching subscriptions in namespace %q\", displayNs)", + "\t\tsubscription, err := olmClient.Subscriptions(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing subscriptions in namespace %q\", ns)", + "\t\t\tcontinue", + "\t\t}", + "\t\tsubscriptions = append(subscriptions, subscription.Items...)", + "\t}", + "", + "\tfor i := range subscriptions {", + "\t\tlog.Info(\"Found subscription %q (ns %q)\", subscriptions[i].Name, subscriptions[i].Namespace)", + "\t}", + "\treturn subscriptions", + "}" + ] + }, + { + "name": "OperatorsV1alpha1", + "kind": "function" + }, + { + "name": "getAllOperators", + "kind": "function", + "source": [ + "func getAllOperators(olmClient v1alpha1.OperatorsV1alpha1Interface) ([]*olmv1Alpha.ClusterServiceVersion, error) {", + "\tcsvs := []*olmv1Alpha.ClusterServiceVersion{}", + "", + "\tcsvList, err := olmClient.ClusterServiceVersions(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"error when listing CSVs in all namespaces, err: %v\", err)", + "\t}", + "\tfor i := range csvList.Items {", + "\t\tcsvs = append(csvs, \u0026csvList.Items[i])", + "\t}", + "", + "\tfor i := range csvs {", + "\t\tlog.Info(\"Found CSV %q (ns %q)\", csvs[i].Name, csvs[i].Namespace)", + "\t}", + "\treturn csvs, nil", + "}" + ] + }, + { + "name": "OperatorsV1alpha1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "getAllInstallPlans", + "kind": "function", + "source": [ + "func getAllInstallPlans(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*olmv1Alpha.InstallPlan) {", + "\tinstallPlanList, err := olmClient.InstallPlans(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Unable get installplans in cluster, err: %v\", err)", + "\t\treturn out", + "\t}", + "\tfor index := range installPlanList.Items {", + "\t\tout = append(out, \u0026installPlanList.Items[index])", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "OperatorsV1alpha1", + "kind": "function" + }, + { + "name": "getAllCatalogSources", + "kind": "function", + "source": [ + "func getAllCatalogSources(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*olmv1Alpha.CatalogSource) {", + "\tcatalogSourcesList, err := olmClient.CatalogSources(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Unable get CatalogSources in cluster, err: %v\", err)", + "\t\treturn out", + "\t}", + "\tfor index := range catalogSourcesList.Items {", + "\t\tout = append(out, \u0026catalogSourcesList.Items[index])", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "OperatorsV1alpha1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "getAllPackageManifests", + "kind": "function", + "source": [ + "func getAllPackageManifests(olmPkgClient olmpkgclient.PackageManifestInterface) (out []*olmpkgv1.PackageManifest) {", + "\tpackageManifestsList, err := olmPkgClient.List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Unable get Package Manifests in cluster, err: %v\", err)", + "\t\treturn out", + "\t}", + "\tfor index := range packageManifestsList.Items {", + "\t\tout = append(out, \u0026packageManifestsList.Items[index])", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "PackageManifests", + "kind": "function" + }, + { + "name": "namespacesListToStringList", + "kind": "function", + "source": [ + "func namespacesListToStringList(namespaceList []configuration.Namespace) (stringList []string) {", + "\tfor _, ns := range namespaceList {", + "\t\tstringList = append(stringList, ns.Name)", + "\t}", + "\treturn stringList", + "}" + ] + }, + { + "name": "FindPodsByLabels", + "kind": "function", + "source": [ + "func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, namespaces []string) (runningPods, allPods []corev1.Pod) {", + "\trunningPods = []corev1.Pod{}", + "\tallPods = []corev1.Pod{}", + "\tallowNonRunning := configuration.GetTestParameters().AllowNonRunning", + "\t// Iterate through namespaces", + "\tfor _, ns := range namespaces {", + "\t\tvar pods *corev1.PodList", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tpods = findPodsMatchingAtLeastOneLabel(oc, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching Pods in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tpods, err = oc.Pods(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing pods in ns=%s, err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\t// Filter out any pod set to be deleted", + "\t\tfor i := 0; i \u003c len(pods.Items); i++ {", + "\t\t\tif pods.Items[i].DeletionTimestamp == nil {", + "\t\t\t\tif allowNonRunning || pods.Items[i].Status.Phase == corev1.PodRunning {", + "\t\t\t\t\trunningPods = append(runningPods, pods.Items[i])", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tallPods = append(allPods, pods.Items[i])", + "\t\t}", + "\t}", + "", + "\treturn runningPods, allPods", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "name": "CountPodsByStatus", + "kind": "function", + "source": [ + "func CountPodsByStatus(allPods []corev1.Pod) map[string]int {", + "\tpodStates := map[string]int{", + "\t\t\"ready\": 0,", + "\t\t\"non-ready\": 0,", + "\t}", + "", + "\tfor i := range allPods {", + "\t\tif allPods[i].Status.Phase == corev1.PodRunning {", + "\t\t\tpodStates[\"ready\"]++", + "\t\t} else {", + "\t\t\tpodStates[\"non-ready\"]++", + "\t\t}", + "\t}", + "", + "\treturn podStates", + "}" + ] + }, + { + "name": "findAbnormalEvents", + "kind": "function", + "source": [ + "func findAbnormalEvents(oc corev1client.CoreV1Interface, namespaces []string) (abnormalEvents []corev1.Event) {", + "\tabnormalEvents = []corev1.Event{}", + "\tfor _, ns := range namespaces {", + "\t\tsomeAbnormalEvents, err := oc.Events(ns).List(context.TODO(), metav1.ListOptions{FieldSelector: \"type!=Normal\"})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get event list for namespace %q, err: %v\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tabnormalEvents = append(abnormalEvents, someAbnormalEvents.Items...)", + "\t}", + "\treturn abnormalEvents", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "name": "FindPodsByLabels", + "kind": "function", + "source": [ + "func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, namespaces []string) (runningPods, allPods []corev1.Pod) {", + "\trunningPods = []corev1.Pod{}", + "\tallPods = []corev1.Pod{}", + "\tallowNonRunning := configuration.GetTestParameters().AllowNonRunning", + "\t// Iterate through namespaces", + "\tfor _, ns := range namespaces {", + "\t\tvar pods *corev1.PodList", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tpods = findPodsMatchingAtLeastOneLabel(oc, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching Pods in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tpods, err = oc.Pods(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing pods in ns=%s, err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\t// Filter out any pod set to be deleted", + "\t\tfor i := 0; i \u003c len(pods.Items); i++ {", + "\t\t\tif pods.Items[i].DeletionTimestamp == nil {", + "\t\t\t\tif allowNonRunning || pods.Items[i].Status.Phase == corev1.PodRunning {", + "\t\t\t\t\trunningPods = append(runningPods, pods.Items[i])", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tallPods = append(allPods, pods.Items[i])", + "\t\t}", + "\t}", + "", + "\treturn runningPods, allPods", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "name": "getResourceQuotas", + "kind": "function", + "source": [ + "func getResourceQuotas(oc corev1client.CoreV1Interface) ([]corev1.ResourceQuota, error) {", + "\trql, err := oc.ResourceQuotas(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treturn rql.Items, nil", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getPodDisruptionBudgets", + "kind": "function", + "source": [ + "func getPodDisruptionBudgets(oc policyv1client.PolicyV1Interface, namespaces []string) ([]policyv1.PodDisruptionBudget, error) {", + "\tpodDisruptionBudgets := []policyv1.PodDisruptionBudget{}", + "\tfor _, ns := range namespaces {", + "\t\tpdbs, err := oc.PodDisruptionBudgets(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\treturn nil, err", + "\t\t}", + "\t\tpodDisruptionBudgets = append(podDisruptionBudgets, pdbs.Items...)", + "\t}", + "", + "\treturn podDisruptionBudgets, nil", + "}" + ] + }, + { + "name": "PolicyV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getNetworkPolicies", + "kind": "function", + "source": [ + "func getNetworkPolicies(oc networkingv1client.NetworkingV1Interface) ([]networkingv1.NetworkPolicy, error) {", + "\tnps, err := oc.NetworkPolicies(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treturn nps.Items, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getClusterCrdNames", + "kind": "function", + "source": [ + "func getClusterCrdNames() ([]*apiextv1.CustomResourceDefinition, error) {", + "\toc := clientsholder.GetClientsHolder()", + "\tcrds, err := oc.APIExtClient.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to get cluster CRDs, err: %v\", err)", + "\t}", + "", + "\tvar crdList []*apiextv1.CustomResourceDefinition", + "\tfor idx := range crds.Items {", + "\t\tcrdList = append(crdList, \u0026crds.Items[idx])", + "\t}", + "\treturn crdList, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "FindTestCrdNames", + "kind": "function", + "source": [ + "func FindTestCrdNames(clusterCrds []*apiextv1.CustomResourceDefinition, crdFilters []configuration.CrdFilter) (targetCrds []*apiextv1.CustomResourceDefinition) {", + "\tif len(clusterCrds) == 0 {", + "\t\tlog.Error(\"Cluster does not have any CRDs\")", + "\t\treturn []*apiextv1.CustomResourceDefinition{}", + "\t}", + "\tfor _, crd := range clusterCrds {", + "\t\tfor _, crdFilter := range crdFilters {", + "\t\t\tif strings.HasSuffix(crd.Name, crdFilter.NameSuffix) {", + "\t\t\t\ttargetCrds = append(targetCrds, crd)", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn targetCrds", + "}" + ] + }, + { + "name": "GetScaleCrUnderTest", + "kind": "function", + "source": [ + "func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDefinition) []ScaleObject {", + "\tdynamicClient := clientsholder.GetClientsHolder().DynamicClient", + "", + "\tvar scaleObjects []ScaleObject", + "\tfor _, crd := range crds {", + "\t\tif crd.Spec.Scope != apiextv1.NamespaceScoped {", + "\t\t\tlog.Warn(\"Target CRD %q is cluster-wide scoped. Skipping search of scale objects.\", crd.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range crd.Spec.Versions {", + "\t\t\tcrdVersion := crd.Spec.Versions[i]", + "\t\t\tgvr := schema.GroupVersionResource{", + "\t\t\t\tGroup: crd.Spec.Group,", + "\t\t\t\tVersion: crdVersion.Name,", + "\t\t\t\tResource: crd.Spec.Names.Plural,", + "\t\t\t}", + "", + "\t\t\t// Filter out non-scalable CRDs.", + "\t\t\tif crdVersion.Subresources == nil || crdVersion.Subresources.Scale == nil {", + "\t\t\t\tlog.Info(\"Target CRD %q is not scalable. Skipping search of scalable CRs.\", crd.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Looking for Scalable CRs of CRD %q (api version %q, group %q, plural %q) in target namespaces.\",", + "\t\t\t\tcrd.Name, crdVersion.Name, crd.Spec.Group, crd.Spec.Names.Plural)", + "", + "\t\t\tfor _, ns := range namespaces {", + "\t\t\t\tcrs, err := dynamicClient.Resource(gvr).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tlog.Fatal(\"Error getting CRs of CRD %q in namespace %q, err: %v\", crd.Name, ns, err)", + "\t\t\t\t}", + "", + "\t\t\t\tif len(crs.Items) \u003e 0 {", + "\t\t\t\t\tscaleObjects = append(scaleObjects, getCrScaleObjects(crs.Items, crd)...)", + "\t\t\t\t} else {", + "\t\t\t\t\tlog.Warn(\"No CRs of CRD %q found in the target namespaces.\", crd.Name)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn scaleObjects", + "}" + ] + }, + { + "name": "findOperatorsByLabels", + "kind": "function", + "source": [ + "func findOperatorsByLabels(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespaces []configuration.Namespace) (csvs []*olmv1Alpha.ClusterServiceVersion) {", + "\tconst nsAnnotation = \"olm.operatorNamespace\"", + "", + "\t// Helper namespaces map to do quick search of the operator's controller namespace.", + "\tnamespacesMap := map[string]bool{}", + "\tfor _, ns := range namespaces {", + "\t\tnamespacesMap[ns.Name] = true", + "\t}", + "", + "\tcsvs = []*olmv1Alpha.ClusterServiceVersion{}", + "\tvar csvList *olmv1Alpha.ClusterServiceVersionList", + "\tfor _, ns := range namespaces {", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tcsvList = findOperatorsMatchingAtLeastOneLabel(olmClient, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching CSVs in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tcsvList, err = olmClient.ClusterServiceVersions(ns.Name).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing csvs in namespace %q , err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\tfor i := range csvList.Items {", + "\t\t\tcsv := \u0026csvList.Items[i]", + "", + "\t\t\t// Filter out CSV if operator's controller pod/s is/are not running in any configured/test namespace.", + "\t\t\tcontrollerNamespace, found := csv.Annotations[nsAnnotation]", + "\t\t\tif !found {", + "\t\t\t\tlog.Error(\"Failed to get ns annotation %q from csv %v/%v\", nsAnnotation, csv.Namespace, csv.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tif namespacesMap[controllerNamespace] {", + "\t\t\t\tcsvs = append(csvs, csv)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range csvs {", + "\t\tlog.Info(\"Found CSV %q (namespace %q)\", csvs[i].Name, csvs[i].Namespace)", + "\t}", + "\treturn csvs", + "}" + ] + }, + { + "name": "OperatorsV1alpha1", + "kind": "function" + }, + { + "name": "findSubscriptions", + "kind": "function", + "source": [ + "func findSubscriptions(olmClient v1alpha1.OperatorsV1alpha1Interface, namespaces []string) []olmv1Alpha.Subscription {", + "\tsubscriptions := []olmv1Alpha.Subscription{}", + "\tfor _, ns := range namespaces {", + "\t\tdisplayNs := ns", + "\t\tif ns == \"\" {", + "\t\t\tdisplayNs = \"All Namespaces\"", + "\t\t}", + "\t\tlog.Debug(\"Searching subscriptions in namespace %q\", displayNs)", + "\t\tsubscription, err := olmClient.Subscriptions(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing subscriptions in namespace %q\", ns)", + "\t\t\tcontinue", + "\t\t}", + "\t\tsubscriptions = append(subscriptions, subscription.Items...)", + "\t}", + "", + "\tfor i := range subscriptions {", + "\t\tlog.Info(\"Found subscription %q (ns %q)\", subscriptions[i].Name, subscriptions[i].Namespace)", + "\t}", + "\treturn subscriptions", + "}" + ] + }, + { + "name": "OperatorsV1alpha1", + "kind": "function" + }, + { + "name": "getHelmList", + "kind": "function", + "source": [ + "func getHelmList(restConfig *rest.Config, namespaces []string) map[string][]*release.Release {", + "\thelmChartReleases := map[string][]*release.Release{}", + "\tfor _, ns := range namespaces {", + "\t\topt := \u0026helmclient.RestConfClientOptions{", + "\t\t\tOptions: \u0026helmclient.Options{", + "\t\t\t\tNamespace: ns,", + "\t\t\t\tRepositoryCache: \"/tmp/.helmcache\",", + "\t\t\t\tRepositoryConfig: \"/tmp/.helmrepo\",", + "\t\t\t\tDebug: true,", + "\t\t\t\tLinting: true,", + "\t\t\t\tDebugLog: log.Info,", + "\t\t\t},", + "\t\t\tRestConfig: restConfig,", + "\t\t}", + "", + "\t\thelmClient, err := helmclient.NewClientFromRestConf(opt)", + "\t\tif err != nil {", + "\t\t\tpanic(err)", + "\t\t}", + "\t\tnsHelmchartreleases, _ := helmClient.ListDeployedReleases()", + "\t\thelmChartReleases[ns] = nsHelmchartreleases", + "\t}", + "\treturn helmChartReleases", + "}" + ] + }, + { + "name": "findClusterOperators", + "kind": "function", + "source": [ + "func findClusterOperators(client clientconfigv1.ClusterOperatorInterface) ([]configv1.ClusterOperator, error) {", + "\tclusterOperators, err := client.List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil \u0026\u0026 !k8serrors.IsNotFound(err) {", + "\t\treturn nil, err", + "\t}", + "", + "\tif k8serrors.IsNotFound(err) {", + "\t\tlog.Debug(\"ClusterOperator CR not found in the cluster\")", + "\t\treturn nil, nil", + "\t}", + "", + "\treturn clusterOperators.Items, nil", + "}" + ] + }, + { + "name": "ClusterOperators", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getOperatorCsvPods", + "kind": "function", + "source": [ + "func getOperatorCsvPods(csvList []*olmv1Alpha.ClusterServiceVersion) (map[types.NamespacedName][]*corev1.Pod, error) {", + "\tconst nsAnnotation = \"olm.operatorNamespace\"", + "", + "\tclient := clientsholder.GetClientsHolder()", + "\tcsvToPodsMapping := make(map[types.NamespacedName][]*corev1.Pod)", + "", + "\t// The operator's pod (controller) should run in the subscription/operatorgroup ns.", + "\tfor _, csv := range csvList {", + "\t\tns, found := csv.Annotations[nsAnnotation]", + "\t\tif !found {", + "\t\t\treturn nil, fmt.Errorf(\"failed to get ns annotation %q from csv %v/%v\", nsAnnotation, csv.Namespace, csv.Name)", + "\t\t}", + "", + "\t\tpods, err := getPodsOwnedByCsv(csv.Name, strings.TrimSpace(ns), client)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to get pods from ns %v: %v\", ns, err)", + "\t\t}", + "", + "\t\tcsvToPodsMapping[types.NamespacedName{Name: csv.Name, Namespace: csv.Namespace}] = pods", + "\t}", + "\treturn csvToPodsMapping, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "FindPodsByLabels", + "kind": "function", + "source": [ + "func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, namespaces []string) (runningPods, allPods []corev1.Pod) {", + "\trunningPods = []corev1.Pod{}", + "\tallPods = []corev1.Pod{}", + "\tallowNonRunning := configuration.GetTestParameters().AllowNonRunning", + "\t// Iterate through namespaces", + "\tfor _, ns := range namespaces {", + "\t\tvar pods *corev1.PodList", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tpods = findPodsMatchingAtLeastOneLabel(oc, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching Pods in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tpods, err = oc.Pods(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing pods in ns=%s, err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\t// Filter out any pod set to be deleted", + "\t\tfor i := 0; i \u003c len(pods.Items); i++ {", + "\t\t\tif pods.Items[i].DeletionTimestamp == nil {", + "\t\t\t\tif allowNonRunning || pods.Items[i].Status.Phase == corev1.PodRunning {", + "\t\t\t\t\trunningPods = append(runningPods, pods.Items[i])", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tallPods = append(allPods, pods.Items[i])", + "\t\t}", + "\t}", + "", + "\treturn runningPods, allPods", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getOperandPodsFromTestCsvs", + "kind": "function", + "source": [ + "func getOperandPodsFromTestCsvs(testCsvs []*olmv1Alpha.ClusterServiceVersion, pods []corev1.Pod) ([]*corev1.Pod, error) {", + "\t// Helper var to store all the managed crds from the operators under test", + "\t// They map key is \"Kind.group/version\" or \"Kind.APIversion\", which should be the same.", + "\t// e.g.: \"Subscription.operators.coreos.com/v1alpha1\"", + "\tcrds := map[string]*olmv1Alpha.ClusterServiceVersion{}", + "", + "\t// First, iterate on each testCsv to fill the helper crds map.", + "\tfor _, csv := range testCsvs {", + "\t\townedCrds := csv.Spec.CustomResourceDefinitions.Owned", + "\t\tif len(ownedCrds) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range ownedCrds {", + "\t\t\tcrd := \u0026ownedCrds[i]", + "", + "\t\t\t_, group, found := strings.Cut(crd.Name, \".\")", + "\t\t\tif !found {", + "\t\t\t\treturn nil, fmt.Errorf(\"failed to parse resources and group from crd name %q\", crd.Name)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"CSV %q owns crd %v\", csv.Name, crd.Kind+\"/\"+group+\"/\"+crd.Version)", + "", + "\t\t\tcrdPath := path.Join(crd.Kind, group, crd.Version)", + "\t\t\tcrds[crdPath] = csv", + "\t\t}", + "\t}", + "", + "\t// Now, iterate on every pod in the list to check whether they're owned by any of the CRs that", + "\t// the csvs are managing.", + "\toperandPods := []*corev1.Pod{}", + "\tfor i := range pods {", + "\t\tpod := \u0026pods[i]", + "\t\towners, err := podhelper.GetPodTopOwner(pod.Namespace, pod.OwnerReferences)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to get top owners of pod %v/%v: %v\", pod.Namespace, pod.Name, err)", + "\t\t}", + "", + "\t\tfor _, owner := range owners {", + "\t\t\tversionedCrdPath := path.Join(owner.Kind, owner.APIVersion)", + "", + "\t\t\tvar csv *olmv1Alpha.ClusterServiceVersion", + "\t\t\tif csv = crds[versionedCrdPath]; csv == nil {", + "\t\t\t\t// The owner is not a CR or it's not a CR owned by any operator under test", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Pod %v/%v has owner CR %s of CRD %q (CSV %v)\", pod.Namespace, pod.Name,", + "\t\t\t\towner.Name, versionedCrdPath, csv.Name)", + "", + "\t\t\toperandPods = append(operandPods, pod)", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\treturn operandPods, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getOpenshiftVersion", + "kind": "function", + "source": [ + "func getOpenshiftVersion(oClient clientconfigv1.ConfigV1Interface) (ver string, err error) {", + "\tvar clusterOperator *configv1.ClusterOperator", + "\tclusterOperator, err = oClient.ClusterOperators().Get(context.TODO(), \"openshift-apiserver\", metav1.GetOptions{})", + "\tif err != nil {", + "\t\tswitch {", + "\t\tcase kerrors.IsNotFound(err):", + "\t\t\tlog.Warn(\"Unable to get ClusterOperator CR from openshift-apiserver. Running in a non-OCP cluster.\")", + "\t\t\treturn NonOpenshiftClusterVersion, nil", + "\t\tdefault:", + "\t\t\treturn \"\", err", + "\t\t}", + "\t}", + "", + "\tfor _, ver := range clusterOperator.Status.Versions {", + "\t\tif ver.Name == tnfCsvTargetLabelName {", + "\t\t\t// openshift-apiserver does not report version,", + "\t\t\t// clusteroperator/openshift-apiserver does, and only version number", + "\t\t\tlog.Info(\"OpenShift Version found: %v\", ver.Version)", + "\t\t\treturn ver.Version, nil", + "\t\t}", + "\t}", + "", + "\treturn \"\", errors.New(\"could not get openshift version from clusterOperator\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "ServerVersion", + "kind": "function" + }, + { + "name": "Discovery", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "name": "DetermineOCPStatus", + "kind": "function", + "source": [ + "func DetermineOCPStatus(version string, date time.Time) string {", + "\t// Safeguard against empty values being passed in", + "\tif version == \"\" || date.IsZero() {", + "\t\treturn OCPStatusUnknown", + "\t}", + "", + "\t// Split the incoming version on the \".\" and make sure we are only looking at major.minor.", + "\tsplitVersion := strings.Split(version, \".\")", + "\tversion = splitVersion[0] + \".\" + splitVersion[1]", + "", + "\t// Check if the version exists in our local map", + "\tlifecycleDates := GetLifeCycleDates()", + "\tif entry, ok := lifecycleDates[version]; ok {", + "\t\t// Safeguard against the latest versions not having a date set for FSEDate set.", + "\t\t// See the OpenShift lifecycle website link (above) for more details on this.", + "\t\tif entry.FSEDate.IsZero() {", + "\t\t\tentry.FSEDate = entry.MSEDate", + "\t\t}", + "", + "\t\t// Pre-GA", + "\t\tif date.Before(entry.GADate) {", + "\t\t\treturn OCPStatusPreGA", + "\t\t}", + "\t\t// Generally Available", + "\t\tif date.Equal(entry.GADate) || date.After(entry.GADate) \u0026\u0026 date.Before(entry.FSEDate) {", + "\t\t\treturn OCPStatusGA", + "\t\t}", + "\t\t// Maintenance Support", + "\t\tif date.Equal(entry.FSEDate) || (date.After(entry.FSEDate) \u0026\u0026 date.Before(entry.MSEDate)) {", + "\t\t\treturn OCPStatusMS", + "\t\t}", + "\t\t// End of Life", + "\t\tif date.Equal(entry.MSEDate) || date.After(entry.MSEDate) {", + "\t\t\treturn OCPStatusEOL", + "\t\t}", + "\t}", + "", + "\treturn OCPStatusUnknown", + "}" + ] + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "name": "findDeploymentsByLabels", + "kind": "function", + "source": [ + "func findDeploymentsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.Deployment {", + "\tallDeployments := []appsv1.Deployment{}", + "\tfor _, ns := range namespaces {", + "\t\tdps, err := appClient.Deployments(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list deployments in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(dps.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any deployments in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(dps.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The deployment is added only once if at least one pod matches one label in the Deployment", + "\t\t\t\tif isDeploymentsPodsMatchingAtLeastOneLabel(labels, ns, \u0026dps.Items[i]) {", + "\t\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all deployments in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in deployment %q found in ns %q without label\", dps.Items[i].Name, ns)", + "\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\tlog.Info(\"Deployment %s found in ns=%s\", dps.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allDeployments) == 0 {", + "\t\tlog.Warn(\"Did not find any deployment in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allDeployments", + "}" + ] + }, + { + "name": "AppsV1", + "kind": "function" + }, + { + "name": "findStatefulSetsByLabels", + "kind": "function", + "source": [ + "func findStatefulSetsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.StatefulSet {", + "\tallStatefulSets := []appsv1.StatefulSet{}", + "\tfor _, ns := range namespaces {", + "\t\tstatefulSet, err := appClient.StatefulSets(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list statefulsets in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(statefulSet.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any statefulSet in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(statefulSet.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The StatefulSet is added only once if at least one pod matches one label in the Statefulset", + "\t\t\t\tif isStatefulSetsMatchingAtLeastOneLabel(labels, ns, \u0026statefulSet.Items[i]) {", + "\t\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all statefulsets in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in statefulset %q found in ns %q without label\", statefulSet.Items[i].Name, ns)", + "\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\tlog.Info(\"StatefulSet %s found in ns=%s\", statefulSet.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allStatefulSets) == 0 {", + "\t\tlog.Warn(\"Did not find any statefulset in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allStatefulSets", + "}" + ] + }, + { + "name": "AppsV1", + "kind": "function" + }, + { + "name": "isIstioServiceMeshInstalled", + "kind": "function", + "source": [ + "func isIstioServiceMeshInstalled(appClient appv1client.AppsV1Interface, allNs []string) bool {", + "\t// The Istio namespace must be present", + "\tif !stringhelper.StringInSlice(allNs, istioNamespace, false) {", + "\t\tlog.Info(\"Istio Service Mesh not present (the namespace %q does not exists)\", istioNamespace)", + "\t\treturn false", + "\t}", + "", + "\t// The Deployment \"istiod\" must be present in an active service mesh", + "\t_, err := appClient.Deployments(istioNamespace).Get(context.TODO(), istioDeploymentName, metav1.GetOptions{})", + "\tif errors.IsNotFound(err) {", + "\t\tlog.Warn(\"The Istio Deployment %q is missing (but the Istio namespace exists)\", istioDeploymentName)", + "\t\treturn false", + "\t} else if err != nil {", + "\t\tlog.Error(\"Failed getting Deployment %q\", istioDeploymentName)", + "\t\treturn false", + "\t}", + "", + "\tlog.Info(\"Istio Service Mesh detected\")", + "", + "\treturn true", + "}" + ] + }, + { + "name": "AppsV1", + "kind": "function" + }, + { + "name": "getClusterRoleBindings", + "kind": "function", + "source": [ + "func getClusterRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.ClusterRoleBinding, error) {", + "\t// Get all of the clusterrolebindings from the cluster", + "\t// These are not namespaced so we want all of them", + "\tcrbList, crbErr := client.ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{})", + "\tif crbErr != nil {", + "\t\tlog.Error(\"Executing clusterrolebinding command failed with error: %v\", crbErr)", + "\t\treturn nil, crbErr", + "\t}", + "\treturn crbList.Items, nil", + "}" + ] + }, + { + "name": "RbacV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getRoleBindings", + "kind": "function", + "source": [ + "func getRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.RoleBinding, error) {", + "\t// Get all of the rolebindings from all namespaces", + "\troleList, roleErr := client.RoleBindings(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif roleErr != nil {", + "\t\tlog.Error(\"Executing rolebinding command failed with error: %v\", roleErr)", + "\t\treturn nil, roleErr", + "\t}", + "\treturn roleList.Items, nil", + "}" + ] + }, + { + "name": "RbacV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getRoles", + "kind": "function", + "source": [ + "func getRoles(client rbacv1typed.RbacV1Interface) ([]rbacv1.Role, error) {", + "\t// Get all of the roles from all namespaces", + "\troleList, roleErr := client.Roles(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif roleErr != nil {", + "\t\tlog.Error(\"Executing roles command failed with error: %v\", roleErr)", + "\t\treturn nil, roleErr", + "\t}", + "\treturn roleList.Items, nil", + "}" + ] + }, + { + "name": "RbacV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "findHpaControllers", + "kind": "function", + "source": [ + "func findHpaControllers(cs kubernetes.Interface, namespaces []string) []*scalingv1.HorizontalPodAutoscaler {", + "\tvar m []*scalingv1.HorizontalPodAutoscaler", + "\tfor _, ns := range namespaces {", + "\t\thpas, err := cs.AutoscalingV1().HorizontalPodAutoscalers(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Cannot list HorizontalPodAutoscalers on namespace %q, err: %v\", ns, err)", + "\t\t\treturn m", + "\t\t}", + "\t\tfor i := 0; i \u003c len(hpas.Items); i++ {", + "\t\t\tm = append(m, \u0026hpas.Items[i])", + "\t\t}", + "\t}", + "\tif len(m) == 0 {", + "\t\tlog.Info(\"Cannot find any deployed HorizontalPodAutoscaler\")", + "\t}", + "\treturn m", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "Nodes", + "kind": "function" + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getPersistentVolumes", + "kind": "function", + "source": [ + "func getPersistentVolumes(oc corev1client.CoreV1Interface) ([]corev1.PersistentVolume, error) {", + "\tpvs, err := oc.PersistentVolumes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treturn pvs.Items, nil", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getPersistentVolumeClaims", + "kind": "function", + "source": [ + "func getPersistentVolumeClaims(oc corev1client.CoreV1Interface) ([]corev1.PersistentVolumeClaim, error) {", + "\tpvcs, err := oc.PersistentVolumeClaims(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treturn pvcs.Items, nil", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getServices", + "kind": "function", + "source": [ + "func getServices(oc corev1client.CoreV1Interface, namespaces, ignoreList []string) (allServices []*corev1.Service, err error) {", + "\tfor _, ns := range namespaces {", + "\t\ts, err := oc.Services(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\treturn allServices, err", + "\t\t}", + "\t\tfor i := range s.Items {", + "\t\t\tif stringhelper.StringInSlice(ignoreList, s.Items[i].Name, false) {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tallServices = append(allServices, \u0026s.Items[i])", + "\t\t}", + "\t}", + "\treturn allServices, nil", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getServices", + "kind": "function", + "source": [ + "func getServices(oc corev1client.CoreV1Interface, namespaces, ignoreList []string) (allServices []*corev1.Service, err error) {", + "\tfor _, ns := range namespaces {", + "\t\ts, err := oc.Services(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\treturn allServices, err", + "\t\t}", + "\t\tfor i := range s.Items {", + "\t\t\tif stringhelper.StringInSlice(ignoreList, s.Items[i].Name, false) {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tallServices = append(allServices, \u0026s.Items[i])", + "\t\t}", + "\t}", + "\treturn allServices, nil", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getServiceAccounts", + "kind": "function", + "source": [ + "func getServiceAccounts(oc corev1client.CoreV1Interface, namespaces []string) (servicesAccounts []*corev1.ServiceAccount, err error) {", + "\tfor _, ns := range namespaces {", + "\t\ts, err := oc.ServiceAccounts(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\treturn servicesAccounts, err", + "\t\t}", + "\t\tfor i := range s.Items {", + "\t\t\tservicesAccounts = append(servicesAccounts, \u0026s.Items[i])", + "\t\t}", + "\t}", + "\treturn servicesAccounts, nil", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getServiceAccounts", + "kind": "function", + "source": [ + "func getServiceAccounts(oc corev1client.CoreV1Interface, namespaces []string) (servicesAccounts []*corev1.ServiceAccount, err error) {", + "\tfor _, ns := range namespaces {", + "\t\ts, err := oc.ServiceAccounts(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\treturn servicesAccounts, err", + "\t\t}", + "\t\tfor i := range s.Items {", + "\t\t\tservicesAccounts = append(servicesAccounts, \u0026s.Items[i])", + "\t\t}", + "\t}", + "\treturn servicesAccounts, nil", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getSriovNetworks", + "kind": "function", + "source": [ + "func getSriovNetworks(client *clientsholder.ClientsHolder, namespaces []string) (sriovNetworks []unstructured.Unstructured, err error) {", + "\t// Check for nil client or DynamicClient to prevent panic", + "\tif client == nil || client.DynamicClient == nil {", + "\t\treturn []unstructured.Unstructured{}, nil", + "\t}", + "", + "\tvar sriovNetworkList []unstructured.Unstructured", + "", + "\tfor _, ns := range namespaces {", + "\t\tsnl, err := client.DynamicClient.Resource(SriovNetworkGVR).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil \u0026\u0026 !kerrors.IsNotFound(err) {", + "\t\t\treturn nil, err", + "\t\t}", + "", + "\t\t// Append the list of sriovNetworks to the sriovNetworks slice", + "\t\tif snl != nil {", + "\t\t\tsriovNetworkList = append(sriovNetworkList, snl.Items...)", + "\t\t}", + "\t}", + "\treturn sriovNetworkList, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getSriovNetworkNodePolicies", + "kind": "function", + "source": [ + "func getSriovNetworkNodePolicies(client *clientsholder.ClientsHolder, namespaces []string) (sriovNetworkNodePolicies []unstructured.Unstructured, err error) {", + "\t// Check for nil client or DynamicClient to prevent panic", + "\tif client == nil || client.DynamicClient == nil {", + "\t\treturn []unstructured.Unstructured{}, nil", + "\t}", + "", + "\tvar sriovNetworkNodePolicyList []unstructured.Unstructured", + "", + "\tfor _, ns := range namespaces {", + "\t\tsnnp, err := client.DynamicClient.Resource(SriovNetworkNodePolicyGVR).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil \u0026\u0026 !kerrors.IsNotFound(err) {", + "\t\t\treturn nil, err", + "\t\t}", + "", + "\t\t// Append the list of sriovNetworkNodePolicies to the sriovNetworkNodePolicies slice", + "\t\tif snnp != nil {", + "\t\t\tsriovNetworkNodePolicyList = append(sriovNetworkNodePolicyList, snnp.Items...)", + "\t\t}", + "\t}", + "\treturn sriovNetworkNodePolicyList, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getSriovNetworks", + "kind": "function", + "source": [ + "func getSriovNetworks(client *clientsholder.ClientsHolder, namespaces []string) (sriovNetworks []unstructured.Unstructured, err error) {", + "\t// Check for nil client or DynamicClient to prevent panic", + "\tif client == nil || client.DynamicClient == nil {", + "\t\treturn []unstructured.Unstructured{}, nil", + "\t}", + "", + "\tvar sriovNetworkList []unstructured.Unstructured", + "", + "\tfor _, ns := range namespaces {", + "\t\tsnl, err := client.DynamicClient.Resource(SriovNetworkGVR).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil \u0026\u0026 !kerrors.IsNotFound(err) {", + "\t\t\treturn nil, err", + "\t\t}", + "", + "\t\t// Append the list of sriovNetworks to the sriovNetworks slice", + "\t\tif snl != nil {", + "\t\t\tsriovNetworkList = append(sriovNetworkList, snl.Items...)", + "\t\t}", + "\t}", + "\treturn sriovNetworkList, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getSriovNetworkNodePolicies", + "kind": "function", + "source": [ + "func getSriovNetworkNodePolicies(client *clientsholder.ClientsHolder, namespaces []string) (sriovNetworkNodePolicies []unstructured.Unstructured, err error) {", + "\t// Check for nil client or DynamicClient to prevent panic", + "\tif client == nil || client.DynamicClient == nil {", + "\t\treturn []unstructured.Unstructured{}, nil", + "\t}", + "", + "\tvar sriovNetworkNodePolicyList []unstructured.Unstructured", + "", + "\tfor _, ns := range namespaces {", + "\t\tsnnp, err := client.DynamicClient.Resource(SriovNetworkNodePolicyGVR).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil \u0026\u0026 !kerrors.IsNotFound(err) {", + "\t\t\treturn nil, err", + "\t\t}", + "", + "\t\t// Append the list of sriovNetworkNodePolicies to the sriovNetworkNodePolicies slice", + "\t\tif snnp != nil {", + "\t\t\tsriovNetworkNodePolicyList = append(sriovNetworkNodePolicyList, snnp.Items...)", + "\t\t}", + "\t}", + "\treturn sriovNetworkNodePolicyList, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "getNetworkAttachmentDefinitions", + "kind": "function", + "source": [ + "func getNetworkAttachmentDefinitions(client *clientsholder.ClientsHolder, namespaces []string) ([]nadClient.NetworkAttachmentDefinition, error) {", + "\tvar nadList []nadClient.NetworkAttachmentDefinition", + "", + "\tfor _, ns := range namespaces {", + "\t\tnad, err := client.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil \u0026\u0026 !kerrors.IsNotFound(err) {", + "\t\t\treturn nil, err", + "\t\t}", + "", + "\t\t// Append the list of networkAttachmentDefinitions to the nadList slice", + "\t\tnadList = append(nadList, nad.Items...)", + "\t}", + "", + "\treturn nadList, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + }, + { + "name": "FindCrObjectByNameByNamespace", + "qualifiedName": "FindCrObjectByNameByNamespace", + "exported": true, + "signature": "func(scale.ScalesGetter, string, string, schema.GroupResource)(*scalingv1.Scale, error)", + "doc": "FindCrObjectByNameByNamespace Retrieves a scaling object for a given resource\n\nThe function queries the Kubernetes API to obtain a Scale resource identified\nby namespace, name, and group‑resource schema. It returns the retrieved\nscale object or an error if the request fails, logging a message on failure.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_podset.go:67", + "calls": [ + { + "name": "Get", + "kind": "function" + }, + { + "name": "Scales", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetUpdatedCrObject", + "kind": "function", + "source": [ + "func GetUpdatedCrObject(sg scale.ScalesGetter, namespace, name string, groupResourceSchema schema.GroupResource) (*CrScale, error) {", + "\tresult, err := autodiscover.FindCrObjectByNameByNamespace(sg, namespace, name, groupResourceSchema)", + "\treturn \u0026CrScale{", + "\t\tresult,", + "\t}, err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func FindCrObjectByNameByNamespace(scalesGetter scale.ScalesGetter, ns, name string, groupResourceSchema schema.GroupResource) (*scalingv1.Scale, error) {", + "\tcrScale, err := scalesGetter.Scales(ns).Get(context.TODO(), groupResourceSchema, name, metav1.GetOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot retrieve deployment in ns=%s name=%s\", ns, name)", + "\t\treturn nil, err", + "\t}", + "\treturn crScale, nil", + "}" + ] + }, + { + "name": "FindDeploymentByNameByNamespace", + "qualifiedName": "FindDeploymentByNameByNamespace", + "exported": true, + "signature": "func(appv1client.AppsV1Interface, string, string)(*appsv1.Deployment, error)", + "doc": "FindDeploymentByNameByNamespace Retrieves a deployment by name within a specified namespace\n\nThe function queries the Kubernetes API for a Deployment object using the\nprovided client, namespace, and name. If the query fails, it logs an error\nand returns the encountered error; otherwise it returns the retrieved\nDeployment pointer.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_podset.go:38", + "calls": [ + { + "name": "Get", + "kind": "function" + }, + { + "name": "Deployments", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetUpdatedDeployment", + "kind": "function", + "source": [ + "func GetUpdatedDeployment(ac appv1client.AppsV1Interface, namespace, name string) (*Deployment, error) {", + "\tresult, err := autodiscover.FindDeploymentByNameByNamespace(ac, namespace, name)", + "\treturn \u0026Deployment{", + "\t\tresult,", + "\t}, err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func FindDeploymentByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.Deployment, error) {", + "\tdp, err := appClient.Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot retrieve deployment in ns=%s name=%s\", namespace, name)", + "\t\treturn nil, err", + "\t}", + "\treturn dp, nil", + "}" + ] + }, + { + "name": "FindPodsByLabels", + "qualifiedName": "FindPodsByLabels", + "exported": true, + "signature": "func(corev1client.CoreV1Interface, []labelObject, []string)([]corev1.Pod)", + "doc": "FindPodsByLabels Retrieves pods matching specified labels across namespaces\n\nThe function queries each provided namespace for pods, optionally filtering\nby one or more label key/value pairs. It returns two slices: runningPods\ncontains only those that are not marked for deletion and either in the\nRunning phase or allowed non‑running per configuration; allPods includes\nevery pod found regardless of status. Errors during listing are logged and\nskipped.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_pods.go:60", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "findPodsMatchingAtLeastOneLabel", + "kind": "function", + "source": [ + "func findPodsMatchingAtLeastOneLabel(oc corev1client.CoreV1Interface, labels []labelObject, namespace string) *corev1.PodList {", + "\tallPods := \u0026corev1.PodList{}", + "\tfor _, l := range labels {", + "\t\tlog.Debug(\"Searching Pods in namespace %s with label %q\", namespace, l)", + "\t\tpods, err := oc.Pods(namespace).List(context.TODO(), metav1.ListOptions{", + "\t\t\tLabelSelector: l.LabelKey + \"=\" + l.LabelValue,", + "\t\t})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing pods in ns=%s label=%s, err: %v\", namespace, l.LabelKey+\"=\"+l.LabelValue, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tallPods.Items = append(allPods.Items, pods.Items...)", + "\t}", + "\treturn allPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "Pods", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, namespaces []string) (runningPods, allPods []corev1.Pod) {", + "\trunningPods = []corev1.Pod{}", + "\tallPods = []corev1.Pod{}", + "\tallowNonRunning := configuration.GetTestParameters().AllowNonRunning", + "\t// Iterate through namespaces", + "\tfor _, ns := range namespaces {", + "\t\tvar pods *corev1.PodList", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tpods = findPodsMatchingAtLeastOneLabel(oc, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching Pods in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tpods, err = oc.Pods(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing pods in ns=%s, err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\t// Filter out any pod set to be deleted", + "\t\tfor i := 0; i \u003c len(pods.Items); i++ {", + "\t\t\tif pods.Items[i].DeletionTimestamp == nil {", + "\t\t\t\tif allowNonRunning || pods.Items[i].Status.Phase == corev1.PodRunning {", + "\t\t\t\t\trunningPods = append(runningPods, pods.Items[i])", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tallPods = append(allPods, pods.Items[i])", + "\t\t}", + "\t}", + "", + "\treturn runningPods, allPods", + "}" + ] + }, + { + "name": "FindStatefulsetByNameByNamespace", + "qualifiedName": "FindStatefulsetByNameByNamespace", + "exported": true, + "signature": "func(appv1client.AppsV1Interface, string, string)(*appsv1.StatefulSet, error)", + "doc": "FindStatefulsetByNameByNamespace Retrieves a StatefulSet by name within a specified namespace\n\nThe function calls the Kubernetes API to fetch a StatefulSet resource using\nthe provided client, namespace, and name. If the retrieval fails, it logs an\nerror message and returns nil along with the encountered error; otherwise, it\nreturns the fetched StatefulSet and a nil error.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_podset.go:53", + "calls": [ + { + "name": "Get", + "kind": "function" + }, + { + "name": "StatefulSets", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetUpdatedStatefulset", + "kind": "function", + "source": [ + "func GetUpdatedStatefulset(ac appv1client.AppsV1Interface, namespace, name string) (*StatefulSet, error) {", + "\tresult, err := autodiscover.FindStatefulsetByNameByNamespace(ac, namespace, name)", + "\treturn \u0026StatefulSet{", + "\t\tresult,", + "\t}, err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func FindStatefulsetByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.StatefulSet, error) {", + "\tss, err := appClient.StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot retrieve deployment in ns=%s name=%s\", namespace, name)", + "\t\treturn nil, err", + "\t}", + "\treturn ss, nil", + "}" + ] + }, + { + "name": "FindTestCrdNames", + "qualifiedName": "FindTestCrdNames", + "exported": true, + "signature": "func([]*apiextv1.CustomResourceDefinition, []configuration.CrdFilter)([]*apiextv1.CustomResourceDefinition)", + "doc": "FindTestCrdNames Selects CRDs that match configured suffixes\n\nThe function scans a list of cluster CRDs, comparing each name against a set\nof suffix filters defined in the configuration. When a CRD’s name ends with\nany specified suffix, it is added to the result slice. If no CRDs are\npresent, an error is logged and an empty slice is returned.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_crds.go:60", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "HasSuffix", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func FindTestCrdNames(clusterCrds []*apiextv1.CustomResourceDefinition, crdFilters []configuration.CrdFilter) (targetCrds []*apiextv1.CustomResourceDefinition) {", + "\tif len(clusterCrds) == 0 {", + "\t\tlog.Error(\"Cluster does not have any CRDs\")", + "\t\treturn []*apiextv1.CustomResourceDefinition{}", + "\t}", + "\tfor _, crd := range clusterCrds {", + "\t\tfor _, crdFilter := range crdFilters {", + "\t\t\tif strings.HasSuffix(crd.Name, crdFilter.NameSuffix) {", + "\t\t\t\ttargetCrds = append(targetCrds, crd)", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn targetCrds", + "}" + ] + }, + { + "name": "GetScaleCrUnderTest", + "qualifiedName": "GetScaleCrUnderTest", + "exported": true, + "signature": "func([]string, []*apiextv1.CustomResourceDefinition)([]ScaleObject)", + "doc": "GetScaleCrUnderTest Retrieves scalable custom resources across specified namespaces\n\nIt iterates over a list of CustomResourceDefinitions, filtering for\nnamespace-scoped and having a scale subresource. For each qualifying CRD it\nlists the custom resources in the provided namespaces using a dynamic client,\nthen gathers their scale objects. The result is a slice of ScaleObject\ncontaining scaling information for each found resource.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_scales.go:32", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "Namespace", + "kind": "function" + }, + { + "name": "Resource", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "getCrScaleObjects", + "kind": "function", + "source": [ + "func getCrScaleObjects(crs []unstructured.Unstructured, crd *apiextv1.CustomResourceDefinition) []ScaleObject {", + "\tvar scaleObjects []ScaleObject", + "\tclients := clientsholder.GetClientsHolder()", + "\tfor _, cr := range crs {", + "\t\tgroupResourceSchema := schema.GroupResource{", + "\t\t\tGroup: crd.Spec.Group,", + "\t\t\tResource: crd.Spec.Names.Plural,", + "\t\t}", + "", + "\t\tname := cr.GetName()", + "\t\tnamespace := cr.GetNamespace()", + "\t\tcrScale, err := clients.ScalingClient.Scales(namespace).Get(context.TODO(), groupResourceSchema, name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Error while getting the scale of CR=%s (CRD=%s) in namespace %s: %v\", name, crd.Name, namespace, err)", + "\t\t}", + "", + "\t\tscaleObjects = append(scaleObjects, ScaleObject{Scale: crScale, GroupResourceSchema: groupResourceSchema})", + "\t}", + "\treturn scaleObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDefinition) []ScaleObject {", + "\tdynamicClient := clientsholder.GetClientsHolder().DynamicClient", + "", + "\tvar scaleObjects []ScaleObject", + "\tfor _, crd := range crds {", + "\t\tif crd.Spec.Scope != apiextv1.NamespaceScoped {", + "\t\t\tlog.Warn(\"Target CRD %q is cluster-wide scoped. Skipping search of scale objects.\", crd.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range crd.Spec.Versions {", + "\t\t\tcrdVersion := crd.Spec.Versions[i]", + "\t\t\tgvr := schema.GroupVersionResource{", + "\t\t\t\tGroup: crd.Spec.Group,", + "\t\t\t\tVersion: crdVersion.Name,", + "\t\t\t\tResource: crd.Spec.Names.Plural,", + "\t\t\t}", + "", + "\t\t\t// Filter out non-scalable CRDs.", + "\t\t\tif crdVersion.Subresources == nil || crdVersion.Subresources.Scale == nil {", + "\t\t\t\tlog.Info(\"Target CRD %q is not scalable. Skipping search of scalable CRs.\", crd.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Looking for Scalable CRs of CRD %q (api version %q, group %q, plural %q) in target namespaces.\",", + "\t\t\t\tcrd.Name, crdVersion.Name, crd.Spec.Group, crd.Spec.Names.Plural)", + "", + "\t\t\tfor _, ns := range namespaces {", + "\t\t\t\tcrs, err := dynamicClient.Resource(gvr).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tlog.Fatal(\"Error getting CRs of CRD %q in namespace %q, err: %v\", crd.Name, ns, err)", + "\t\t\t\t}", + "", + "\t\t\t\tif len(crs.Items) \u003e 0 {", + "\t\t\t\t\tscaleObjects = append(scaleObjects, getCrScaleObjects(crs.Items, crd)...)", + "\t\t\t\t} else {", + "\t\t\t\t\tlog.Warn(\"No CRs of CRD %q found in the target namespaces.\", crd.Name)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn scaleObjects", + "}" + ] + }, + { + "name": "findAbnormalEvents", + "qualifiedName": "findAbnormalEvents", + "exported": false, + "signature": "func(corev1client.CoreV1Interface, []string)([]corev1.Event)", + "doc": "findAbnormalEvents collects non-normal events from specified namespaces\n\nThe function iterates over each namespace provided, querying the Kubernetes\nAPI for events whose type is not Normal. It aggregates these events into a\nsingle slice, logging an error and skipping any namespace where the list\noperation fails. The resulting slice of corev1.Event objects is returned.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_events.go:34", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "Events", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func findAbnormalEvents(oc corev1client.CoreV1Interface, namespaces []string) (abnormalEvents []corev1.Event) {", + "\tabnormalEvents = []corev1.Event{}", + "\tfor _, ns := range namespaces {", + "\t\tsomeAbnormalEvents, err := oc.Events(ns).List(context.TODO(), metav1.ListOptions{FieldSelector: \"type!=Normal\"})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get event list for namespace %q, err: %v\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tabnormalEvents = append(abnormalEvents, someAbnormalEvents.Items...)", + "\t}", + "\treturn abnormalEvents", + "}" + ] + }, + { + "name": "findClusterOperators", + "qualifiedName": "findClusterOperators", + "exported": false, + "signature": "func(clientconfigv1.ClusterOperatorInterface)([]configv1.ClusterOperator, error)", + "doc": "findClusterOperators Retrieves all ClusterOperator resources from the cluster\n\nThe function calls the client to list ClusterOperator objects, handling\nerrors that may occur during the request. If the API returns a not‑found\nerror, it logs a debug message and returns nil without error. On success, it\nreturns a slice of the retrieved items.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_clusteroperators.go:19", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/api/errors", + "name": "IsNotFound", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/api/errors", + "name": "IsNotFound", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func findClusterOperators(client clientconfigv1.ClusterOperatorInterface) ([]configv1.ClusterOperator, error) {", + "\tclusterOperators, err := client.List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil \u0026\u0026 !k8serrors.IsNotFound(err) {", + "\t\treturn nil, err", + "\t}", + "", + "\tif k8serrors.IsNotFound(err) {", + "\t\tlog.Debug(\"ClusterOperator CR not found in the cluster\")", + "\t\treturn nil, nil", + "\t}", + "", + "\treturn clusterOperators.Items, nil", + "}" + ] + }, + { + "name": "findDeploymentsByLabels", + "qualifiedName": "findDeploymentsByLabels", + "exported": false, + "signature": "func(appv1client.AppsV1Interface, []labelObject, []string)([]appsv1.Deployment)", + "doc": "findDeploymentsByLabels collects deployments matching specified labels across namespaces\n\nThe function iterates over each namespace, listing all deployment objects.\nFor every deployment it checks whether any of the provided label key/value\npairs match the pod template labels; if so or if no labels were supplied, the\ndeployment is added to a result slice. Errors during listing are logged and\nskipped, and a warning is emitted when a namespace contains no deployments.\n\nnolint:dupl", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_podset.go:102", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "Deployments", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "isDeploymentsPodsMatchingAtLeastOneLabel", + "kind": "function", + "source": [ + "func isDeploymentsPodsMatchingAtLeastOneLabel(labels []labelObject, namespace string, deployment *appsv1.Deployment) bool {", + "\tfor _, aLabelObject := range labels {", + "\t\tlog.Debug(\"Searching pods in deployment %q found in ns %q using label %s=%s\", deployment.Name, namespace, aLabelObject.LabelKey, aLabelObject.LabelValue)", + "\t\tif deployment.Spec.Template.Labels[aLabelObject.LabelKey] == aLabelObject.LabelValue {", + "\t\t\tlog.Info(\"Deployment %s found in ns=%s\", deployment.Name, namespace)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func findDeploymentsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.Deployment {", + "\tallDeployments := []appsv1.Deployment{}", + "\tfor _, ns := range namespaces {", + "\t\tdps, err := appClient.Deployments(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list deployments in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(dps.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any deployments in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(dps.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The deployment is added only once if at least one pod matches one label in the Deployment", + "\t\t\t\tif isDeploymentsPodsMatchingAtLeastOneLabel(labels, ns, \u0026dps.Items[i]) {", + "\t\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all deployments in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in deployment %q found in ns %q without label\", dps.Items[i].Name, ns)", + "\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\tlog.Info(\"Deployment %s found in ns=%s\", dps.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allDeployments) == 0 {", + "\t\tlog.Warn(\"Did not find any deployment in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allDeployments", + "}" + ] + }, + { + "name": "findHpaControllers", + "qualifiedName": "findHpaControllers", + "exported": false, + "signature": "func(kubernetes.Interface, []string)([]*scalingv1.HorizontalPodAutoscaler)", + "doc": "findHpaControllers Collects all HorizontalPodAutoscaler objects across given namespaces\n\nThe function iterates over each namespace provided, listing the\nHorizontalPodAutoscalers in that namespace using the Kubernetes client. Each\ndiscovered HPA is appended to a slice which is returned after all namespaces\nare processed. If no HPAs are found or an error occurs during listing,\nappropriate log messages are emitted and an empty slice may be returned.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_podset.go:208", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "HorizontalPodAutoscalers", + "kind": "function" + }, + { + "name": "AutoscalingV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func findHpaControllers(cs kubernetes.Interface, namespaces []string) []*scalingv1.HorizontalPodAutoscaler {", + "\tvar m []*scalingv1.HorizontalPodAutoscaler", + "\tfor _, ns := range namespaces {", + "\t\thpas, err := cs.AutoscalingV1().HorizontalPodAutoscalers(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Cannot list HorizontalPodAutoscalers on namespace %q, err: %v\", ns, err)", + "\t\t\treturn m", + "\t\t}", + "\t\tfor i := 0; i \u003c len(hpas.Items); i++ {", + "\t\t\tm = append(m, \u0026hpas.Items[i])", + "\t\t}", + "\t}", + "\tif len(m) == 0 {", + "\t\tlog.Info(\"Cannot find any deployed HorizontalPodAutoscaler\")", + "\t}", + "\treturn m", + "}" + ] + }, + { + "name": "findOperatorsByLabels", + "qualifiedName": "findOperatorsByLabels", + "exported": false, + "signature": "func(v1alpha1.OperatorsV1alpha1Interface, []labelObject, []configuration.Namespace)([]*olmv1Alpha.ClusterServiceVersion)", + "doc": "findOperatorsByLabels Retrieves operator CSVs matching given labels across specified namespaces\n\nThe function iterates over each target namespace, collecting\nClusterServiceVersions that either match provided label selectors or are\nlisted without filters when no labels exist. It then verifies the\noperator’s controller pod resides in a configured test namespace by\nchecking an annotation and includes only those CSVs in the result set. Each\ndiscovered CSV is logged for visibility before being returned as a slice.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:110", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "findOperatorsMatchingAtLeastOneLabel", + "kind": "function", + "source": [ + "func findOperatorsMatchingAtLeastOneLabel(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespace configuration.Namespace) *olmv1Alpha.ClusterServiceVersionList {", + "\tcsvList := \u0026olmv1Alpha.ClusterServiceVersionList{}", + "\tfor _, l := range labels {", + "\t\tlog.Debug(\"Searching CSVs in namespace %q with label %q\", namespace, l)", + "\t\tcsv, err := olmClient.ClusterServiceVersions(namespace.Name).List(context.TODO(), metav1.ListOptions{", + "\t\t\tLabelSelector: l.LabelKey + \"=\" + l.LabelValue,", + "\t\t})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing csvs in namespace %q with label %q, err: %v\", namespace, l.LabelKey+\"=\"+l.LabelValue, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tcsvList.Items = append(csvList.Items, csv.Items...)", + "\t}", + "\treturn csvList", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "ClusterServiceVersions", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func findOperatorsByLabels(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespaces []configuration.Namespace) (csvs []*olmv1Alpha.ClusterServiceVersion) {", + "\tconst nsAnnotation = \"olm.operatorNamespace\"", + "", + "\t// Helper namespaces map to do quick search of the operator's controller namespace.", + "\tnamespacesMap := map[string]bool{}", + "\tfor _, ns := range namespaces {", + "\t\tnamespacesMap[ns.Name] = true", + "\t}", + "", + "\tcsvs = []*olmv1Alpha.ClusterServiceVersion{}", + "\tvar csvList *olmv1Alpha.ClusterServiceVersionList", + "\tfor _, ns := range namespaces {", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tcsvList = findOperatorsMatchingAtLeastOneLabel(olmClient, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching CSVs in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tcsvList, err = olmClient.ClusterServiceVersions(ns.Name).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing csvs in namespace %q , err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\tfor i := range csvList.Items {", + "\t\t\tcsv := \u0026csvList.Items[i]", + "", + "\t\t\t// Filter out CSV if operator's controller pod/s is/are not running in any configured/test namespace.", + "\t\t\tcontrollerNamespace, found := csv.Annotations[nsAnnotation]", + "\t\t\tif !found {", + "\t\t\t\tlog.Error(\"Failed to get ns annotation %q from csv %v/%v\", nsAnnotation, csv.Namespace, csv.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tif namespacesMap[controllerNamespace] {", + "\t\t\t\tcsvs = append(csvs, csv)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range csvs {", + "\t\tlog.Info(\"Found CSV %q (namespace %q)\", csvs[i].Name, csvs[i].Namespace)", + "\t}", + "\treturn csvs", + "}" + ] + }, + { + "name": "findOperatorsMatchingAtLeastOneLabel", + "qualifiedName": "findOperatorsMatchingAtLeastOneLabel", + "exported": false, + "signature": "func(v1alpha1.OperatorsV1alpha1Interface, []labelObject, configuration.Namespace)(*olmv1Alpha.ClusterServiceVersionList)", + "doc": "findOperatorsMatchingAtLeastOneLabel Retrieves operators whose CSVs match any of the provided labels\n\nThe function queries the OLM client for ClusterServiceVersions in a specific\nnamespace, filtering by each label in turn and aggregating all matching CSV\nitems into a single list. It logs debug information for each search attempt\nand records errors if a query fails, continuing with remaining labels. The\nreturned list contains every CSV that satisfies at least one of the supplied\nlabel selectors.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:86", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "ClusterServiceVersions", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findOperatorsByLabels", + "kind": "function", + "source": [ + "func findOperatorsByLabels(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespaces []configuration.Namespace) (csvs []*olmv1Alpha.ClusterServiceVersion) {", + "\tconst nsAnnotation = \"olm.operatorNamespace\"", + "", + "\t// Helper namespaces map to do quick search of the operator's controller namespace.", + "\tnamespacesMap := map[string]bool{}", + "\tfor _, ns := range namespaces {", + "\t\tnamespacesMap[ns.Name] = true", + "\t}", + "", + "\tcsvs = []*olmv1Alpha.ClusterServiceVersion{}", + "\tvar csvList *olmv1Alpha.ClusterServiceVersionList", + "\tfor _, ns := range namespaces {", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tcsvList = findOperatorsMatchingAtLeastOneLabel(olmClient, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching CSVs in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tcsvList, err = olmClient.ClusterServiceVersions(ns.Name).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing csvs in namespace %q , err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\tfor i := range csvList.Items {", + "\t\t\tcsv := \u0026csvList.Items[i]", + "", + "\t\t\t// Filter out CSV if operator's controller pod/s is/are not running in any configured/test namespace.", + "\t\t\tcontrollerNamespace, found := csv.Annotations[nsAnnotation]", + "\t\t\tif !found {", + "\t\t\t\tlog.Error(\"Failed to get ns annotation %q from csv %v/%v\", nsAnnotation, csv.Namespace, csv.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tif namespacesMap[controllerNamespace] {", + "\t\t\t\tcsvs = append(csvs, csv)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range csvs {", + "\t\tlog.Info(\"Found CSV %q (namespace %q)\", csvs[i].Name, csvs[i].Namespace)", + "\t}", + "\treturn csvs", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func findOperatorsMatchingAtLeastOneLabel(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespace configuration.Namespace) *olmv1Alpha.ClusterServiceVersionList {", + "\tcsvList := \u0026olmv1Alpha.ClusterServiceVersionList{}", + "\tfor _, l := range labels {", + "\t\tlog.Debug(\"Searching CSVs in namespace %q with label %q\", namespace, l)", + "\t\tcsv, err := olmClient.ClusterServiceVersions(namespace.Name).List(context.TODO(), metav1.ListOptions{", + "\t\t\tLabelSelector: l.LabelKey + \"=\" + l.LabelValue,", + "\t\t})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing csvs in namespace %q with label %q, err: %v\", namespace, l.LabelKey+\"=\"+l.LabelValue, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tcsvList.Items = append(csvList.Items, csv.Items...)", + "\t}", + "\treturn csvList", + "}" + ] + }, + { + "name": "findPodsMatchingAtLeastOneLabel", + "qualifiedName": "findPodsMatchingAtLeastOneLabel", + "exported": false, + "signature": "func(corev1client.CoreV1Interface, []labelObject, string)(*corev1.PodList)", + "doc": "findPodsMatchingAtLeastOneLabel Retrieves pods that match any provided label in a namespace\n\nThe function iterates over each supplied label, querying the Kubernetes API\nfor pods that have the corresponding key-value pair. It accumulates all\nmatching pod objects into a single list, logging errors but continuing on\nfailures. The resulting list is returned, containing every pod that satisfies\nat least one of the specified labels.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_pods.go:36", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "Pods", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "FindPodsByLabels", + "kind": "function", + "source": [ + "func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, namespaces []string) (runningPods, allPods []corev1.Pod) {", + "\trunningPods = []corev1.Pod{}", + "\tallPods = []corev1.Pod{}", + "\tallowNonRunning := configuration.GetTestParameters().AllowNonRunning", + "\t// Iterate through namespaces", + "\tfor _, ns := range namespaces {", + "\t\tvar pods *corev1.PodList", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tpods = findPodsMatchingAtLeastOneLabel(oc, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching Pods in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tpods, err = oc.Pods(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing pods in ns=%s, err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\t// Filter out any pod set to be deleted", + "\t\tfor i := 0; i \u003c len(pods.Items); i++ {", + "\t\t\tif pods.Items[i].DeletionTimestamp == nil {", + "\t\t\t\tif allowNonRunning || pods.Items[i].Status.Phase == corev1.PodRunning {", + "\t\t\t\t\trunningPods = append(runningPods, pods.Items[i])", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tallPods = append(allPods, pods.Items[i])", + "\t\t}", + "\t}", + "", + "\treturn runningPods, allPods", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func findPodsMatchingAtLeastOneLabel(oc corev1client.CoreV1Interface, labels []labelObject, namespace string) *corev1.PodList {", + "\tallPods := \u0026corev1.PodList{}", + "\tfor _, l := range labels {", + "\t\tlog.Debug(\"Searching Pods in namespace %s with label %q\", namespace, l)", + "\t\tpods, err := oc.Pods(namespace).List(context.TODO(), metav1.ListOptions{", + "\t\t\tLabelSelector: l.LabelKey + \"=\" + l.LabelValue,", + "\t\t})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing pods in ns=%s label=%s, err: %v\", namespace, l.LabelKey+\"=\"+l.LabelValue, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tallPods.Items = append(allPods.Items, pods.Items...)", + "\t}", + "\treturn allPods", + "}" + ] + }, + { + "name": "findStatefulSetsByLabels", + "qualifiedName": "findStatefulSetsByLabels", + "exported": false, + "signature": "func(appv1client.AppsV1Interface, []labelObject, []string)([]appsv1.StatefulSet)", + "doc": "findStatefulSetsByLabels Retrieves statefulsets matching specified labels across namespaces\n\nThe function iterates over each provided namespace, listing all StatefulSet\nobjects via the client interface. It then filters those sets by checking if\nany of the supplied label key/value pairs match the pod template labels\ninside a StatefulSet; if no labels are given it includes every set found.\nMatching or included StatefulSets are collected into a slice that is\nreturned, with warnings logged when none are found.\n\nnolint:dupl", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_podset.go:165", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "StatefulSets", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "isStatefulSetsMatchingAtLeastOneLabel", + "kind": "function", + "source": [ + "func isStatefulSetsMatchingAtLeastOneLabel(labels []labelObject, namespace string, statefulSet *appsv1.StatefulSet) bool {", + "\tfor _, aLabelObject := range labels {", + "\t\tlog.Debug(\"Searching pods in statefulset %q found in ns %q using label %s=%s\", statefulSet.Name, namespace, aLabelObject.LabelKey, aLabelObject.LabelValue)", + "\t\tif statefulSet.Spec.Template.Labels[aLabelObject.LabelKey] == aLabelObject.LabelValue {", + "\t\t\tlog.Info(\"StatefulSet %s found in ns=%s\", statefulSet.Name, namespace)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func findStatefulSetsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.StatefulSet {", + "\tallStatefulSets := []appsv1.StatefulSet{}", + "\tfor _, ns := range namespaces {", + "\t\tstatefulSet, err := appClient.StatefulSets(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list statefulsets in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(statefulSet.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any statefulSet in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(statefulSet.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The StatefulSet is added only once if at least one pod matches one label in the Statefulset", + "\t\t\t\tif isStatefulSetsMatchingAtLeastOneLabel(labels, ns, \u0026statefulSet.Items[i]) {", + "\t\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all statefulsets in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in statefulset %q found in ns %q without label\", statefulSet.Items[i].Name, ns)", + "\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\tlog.Info(\"StatefulSet %s found in ns=%s\", statefulSet.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allStatefulSets) == 0 {", + "\t\tlog.Warn(\"Did not find any statefulset in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allStatefulSets", + "}" + ] + }, + { + "name": "findSubscriptions", + "qualifiedName": "findSubscriptions", + "exported": false, + "signature": "func(v1alpha1.OperatorsV1alpha1Interface, []string)([]olmv1Alpha.Subscription)", + "doc": "findSubscriptions Collects operator subscriptions across specified namespaces\n\nThis routine iterates over a list of namespace identifiers, querying the\nOpenShift Operator Lifecycle Manager for Subscription objects in each. It\nlogs debug information for each namespace, handles errors by logging them and\nskipping problematic ones, and aggregates all found subscriptions into a\nsingle slice. After gathering, it emits informational logs detailing each\nsubscription’s name and namespace before returning the compiled collection.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:203", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "Subscriptions", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func findSubscriptions(olmClient v1alpha1.OperatorsV1alpha1Interface, namespaces []string) []olmv1Alpha.Subscription {", + "\tsubscriptions := []olmv1Alpha.Subscription{}", + "\tfor _, ns := range namespaces {", + "\t\tdisplayNs := ns", + "\t\tif ns == \"\" {", + "\t\t\tdisplayNs = \"All Namespaces\"", + "\t\t}", + "\t\tlog.Debug(\"Searching subscriptions in namespace %q\", displayNs)", + "\t\tsubscription, err := olmClient.Subscriptions(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error when listing subscriptions in namespace %q\", ns)", + "\t\t\tcontinue", + "\t\t}", + "\t\tsubscriptions = append(subscriptions, subscription.Items...)", + "\t}", + "", + "\tfor i := range subscriptions {", + "\t\tlog.Info(\"Found subscription %q (ns %q)\", subscriptions[i].Name, subscriptions[i].Namespace)", + "\t}", + "\treturn subscriptions", + "}" + ] + }, + { + "name": "getAllCatalogSources", + "qualifiedName": "getAllCatalogSources", + "exported": false, + "signature": "func(v1alpha1.OperatorsV1alpha1Interface)([]*olmv1Alpha.CatalogSource)", + "doc": "getAllCatalogSources Retrieves all CatalogSource objects from the cluster\n\nThe function queries the operator lifecycle manager for catalog sources in\nevery namespace, handling any errors by logging them and returning an empty\nslice. It iterates over the returned list, appending pointers to each item\ninto a result slice which is then returned.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:280", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "CatalogSources", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getAllCatalogSources(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*olmv1Alpha.CatalogSource) {", + "\tcatalogSourcesList, err := olmClient.CatalogSources(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Unable get CatalogSources in cluster, err: %v\", err)", + "\t\treturn out", + "\t}", + "\tfor index := range catalogSourcesList.Items {", + "\t\tout = append(out, \u0026catalogSourcesList.Items[index])", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "getAllInstallPlans", + "qualifiedName": "getAllInstallPlans", + "exported": false, + "signature": "func(v1alpha1.OperatorsV1alpha1Interface)([]*olmv1Alpha.InstallPlan)", + "doc": "getAllInstallPlans Retrieves all operator install plans from the cluster\n\nThe function queries the OpenShift Operator Lifecycle Manager for every\nInstallPlan resource across all namespaces. If the API call fails, it logs an\nerror and returns an empty slice; otherwise it collects pointers to each\nInstallPlan item into a new slice and returns that list.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:262", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "InstallPlans", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getAllInstallPlans(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*olmv1Alpha.InstallPlan) {", + "\tinstallPlanList, err := olmClient.InstallPlans(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Unable get installplans in cluster, err: %v\", err)", + "\t\treturn out", + "\t}", + "\tfor index := range installPlanList.Items {", + "\t\tout = append(out, \u0026installPlanList.Items[index])", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "getAllNamespaces", + "qualifiedName": "getAllNamespaces", + "exported": false, + "signature": "func(corev1client.CoreV1Interface)([]string, error)", + "doc": "getAllNamespaces Retrieves the names of all namespaces in a cluster\n\nThe function queries the Kubernetes API for every namespace, collects each\nname into a slice, and returns that list. If the list request fails, it wraps\nthe error with context before returning. The returned slice contains plain\nstring names and may be empty if no namespaces exist.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:161", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "Namespaces", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getAllNamespaces(oc corev1client.CoreV1Interface) (allNs []string, err error) {", + "\tnsList, err := oc.Namespaces().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn allNs, fmt.Errorf(\"error getting all namespaces, err: %v\", err)", + "\t}", + "\tfor index := range nsList.Items {", + "\t\tallNs = append(allNs, nsList.Items[index].Name)", + "\t}", + "\treturn allNs, nil", + "}" + ] + }, + { + "name": "getAllOperators", + "qualifiedName": "getAllOperators", + "exported": false, + "signature": "func(v1alpha1.OperatorsV1alpha1Interface)([]*olmv1Alpha.ClusterServiceVersion, error)", + "doc": "getAllOperators Retrieves all operator CSVs from every namespace\n\nThe function queries the OLM client for ClusterServiceVersion objects across\nall namespaces, collecting them into a slice. It logs each found CSV name and\nnamespace for visibility. Errors during listing are wrapped with context and\nreturned to the caller.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:178", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "ClusterServiceVersions", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getAllOperators(olmClient v1alpha1.OperatorsV1alpha1Interface) ([]*olmv1Alpha.ClusterServiceVersion, error) {", + "\tcsvs := []*olmv1Alpha.ClusterServiceVersion{}", + "", + "\tcsvList, err := olmClient.ClusterServiceVersions(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"error when listing CSVs in all namespaces, err: %v\", err)", + "\t}", + "\tfor i := range csvList.Items {", + "\t\tcsvs = append(csvs, \u0026csvList.Items[i])", + "\t}", + "", + "\tfor i := range csvs {", + "\t\tlog.Info(\"Found CSV %q (ns %q)\", csvs[i].Name, csvs[i].Namespace)", + "\t}", + "\treturn csvs, nil", + "}" + ] + }, + { + "name": "getAllPackageManifests", + "qualifiedName": "getAllPackageManifests", + "exported": false, + "signature": "func(olmpkgclient.PackageManifestInterface)([]*olmpkgv1.PackageManifest)", + "doc": "getAllPackageManifests Retrieves all PackageManifest resources from the cluster\n\nThe function calls the client’s List method to obtain a list of\nPackageManifests, handling any error by logging it and returning an empty\nslice. It then iterates over the returned items, appending pointers to each\nmanifest into a new slice. The resulting slice of pointers is returned to the\ncaller.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:299", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getAllPackageManifests(olmPkgClient olmpkgclient.PackageManifestInterface) (out []*olmpkgv1.PackageManifest) {", + "\tpackageManifestsList, err := olmPkgClient.List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Unable get Package Manifests in cluster, err: %v\", err)", + "\t\treturn out", + "\t}", + "\tfor index := range packageManifestsList.Items {", + "\t\tout = append(out, \u0026packageManifestsList.Items[index])", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "getAllStorageClasses", + "qualifiedName": "getAllStorageClasses", + "exported": false, + "signature": "func(storagev1typed.StorageV1Interface)([]storagev1.StorageClass, error)", + "doc": "getAllStorageClasses Retrieves all storage classes from the cluster\n\nThe function queries the Kubernetes API for a list of StorageClass objects\nusing the provided client interface. It returns the slice of discovered\nstorage classes or an error if the list operation fails, logging any errors\nencountered.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_pv.go:63", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "StorageClasses", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getAllStorageClasses(client storagev1typed.StorageV1Interface) ([]storagev1.StorageClass, error) {", + "\tstorageclasslist, err := client.StorageClasses().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Error when listing storage classes, err: %v\", err)", + "\t\treturn nil, err", + "\t}", + "\treturn storageclasslist.Items, nil", + "}" + ] + }, + { + "name": "getClusterCrdNames", + "qualifiedName": "getClusterCrdNames", + "exported": false, + "signature": "func()([]*apiextv1.CustomResourceDefinition, error)", + "doc": "getClusterCrdNames Retrieves all CustomResourceDefinition objects from the cluster\n\nThe function obtains a client holder, lists CRDs via the API extensions\nclient, and returns a slice of pointers to each CustomResourceDefinition. If\nlisting fails it wraps the error with context. The result is used by\nautodiscovery to filter relevant CRDs.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_crds.go:40", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "CustomResourceDefinitions", + "kind": "function" + }, + { + "name": "ApiextensionsV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getClusterCrdNames() ([]*apiextv1.CustomResourceDefinition, error) {", + "\toc := clientsholder.GetClientsHolder()", + "\tcrds, err := oc.APIExtClient.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to get cluster CRDs, err: %v\", err)", + "\t}", + "", + "\tvar crdList []*apiextv1.CustomResourceDefinition", + "\tfor idx := range crds.Items {", + "\t\tcrdList = append(crdList, \u0026crds.Items[idx])", + "\t}", + "\treturn crdList, nil", + "}" + ] + }, + { + "name": "getClusterRoleBindings", + "qualifiedName": "getClusterRoleBindings", + "exported": false, + "signature": "func(rbacv1typed.RbacV1Interface)([]rbacv1.ClusterRoleBinding, error)", + "doc": "getClusterRoleBindings retrieves all cluster‑level role bindings\n\nThis function calls the Kubernetes RBAC API to list every ClusterRoleBinding\nin the cluster, ignoring namespaces because they are cluster scoped. It\nreturns a slice of the bindings or an error if the request fails, logging any\nfailure for debugging purposes.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_rbac.go:50", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "ClusterRoleBindings", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getClusterRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.ClusterRoleBinding, error) {", + "\t// Get all of the clusterrolebindings from the cluster", + "\t// These are not namespaced so we want all of them", + "\tcrbList, crbErr := client.ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{})", + "\tif crbErr != nil {", + "\t\tlog.Error(\"Executing clusterrolebinding command failed with error: %v\", crbErr)", + "\t\treturn nil, crbErr", + "\t}", + "\treturn crbList.Items, nil", + "}" + ] + }, + { + "name": "getCrScaleObjects", + "qualifiedName": "getCrScaleObjects", + "exported": false, + "signature": "func([]unstructured.Unstructured, *apiextv1.CustomResourceDefinition)([]ScaleObject)", + "doc": "getCrScaleObjects Retrieves scaling information for custom resources\n\nThis function iterates over a list of unstructured custom resources, querying\nthe Kubernetes scaling API to obtain each resource's scale subresource. It\nconstructs a group-resource schema from the CRD metadata and appends each\nretrieved ScaleObject to a slice. Errors during retrieval are logged fatally,\nensuring only successfully fetched scales are returned.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_scales.go:84", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "GetName", + "kind": "function" + }, + { + "name": "GetNamespace", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "name": "Scales", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "GetScaleCrUnderTest", + "kind": "function", + "source": [ + "func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDefinition) []ScaleObject {", + "\tdynamicClient := clientsholder.GetClientsHolder().DynamicClient", + "", + "\tvar scaleObjects []ScaleObject", + "\tfor _, crd := range crds {", + "\t\tif crd.Spec.Scope != apiextv1.NamespaceScoped {", + "\t\t\tlog.Warn(\"Target CRD %q is cluster-wide scoped. Skipping search of scale objects.\", crd.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range crd.Spec.Versions {", + "\t\t\tcrdVersion := crd.Spec.Versions[i]", + "\t\t\tgvr := schema.GroupVersionResource{", + "\t\t\t\tGroup: crd.Spec.Group,", + "\t\t\t\tVersion: crdVersion.Name,", + "\t\t\t\tResource: crd.Spec.Names.Plural,", + "\t\t\t}", + "", + "\t\t\t// Filter out non-scalable CRDs.", + "\t\t\tif crdVersion.Subresources == nil || crdVersion.Subresources.Scale == nil {", + "\t\t\t\tlog.Info(\"Target CRD %q is not scalable. Skipping search of scalable CRs.\", crd.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Looking for Scalable CRs of CRD %q (api version %q, group %q, plural %q) in target namespaces.\",", + "\t\t\t\tcrd.Name, crdVersion.Name, crd.Spec.Group, crd.Spec.Names.Plural)", + "", + "\t\t\tfor _, ns := range namespaces {", + "\t\t\t\tcrs, err := dynamicClient.Resource(gvr).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tlog.Fatal(\"Error getting CRs of CRD %q in namespace %q, err: %v\", crd.Name, ns, err)", + "\t\t\t\t}", + "", + "\t\t\t\tif len(crs.Items) \u003e 0 {", + "\t\t\t\t\tscaleObjects = append(scaleObjects, getCrScaleObjects(crs.Items, crd)...)", + "\t\t\t\t} else {", + "\t\t\t\t\tlog.Warn(\"No CRs of CRD %q found in the target namespaces.\", crd.Name)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn scaleObjects", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getCrScaleObjects(crs []unstructured.Unstructured, crd *apiextv1.CustomResourceDefinition) []ScaleObject {", + "\tvar scaleObjects []ScaleObject", + "\tclients := clientsholder.GetClientsHolder()", + "\tfor _, cr := range crs {", + "\t\tgroupResourceSchema := schema.GroupResource{", + "\t\t\tGroup: crd.Spec.Group,", + "\t\t\tResource: crd.Spec.Names.Plural,", + "\t\t}", + "", + "\t\tname := cr.GetName()", + "\t\tnamespace := cr.GetNamespace()", + "\t\tcrScale, err := clients.ScalingClient.Scales(namespace).Get(context.TODO(), groupResourceSchema, name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Error while getting the scale of CR=%s (CRD=%s) in namespace %s: %v\", name, crd.Name, namespace, err)", + "\t\t}", + "", + "\t\tscaleObjects = append(scaleObjects, ScaleObject{Scale: crScale, GroupResourceSchema: groupResourceSchema})", + "\t}", + "\treturn scaleObjects", + "}" + ] + }, + { + "name": "getHelmList", + "qualifiedName": "getHelmList", + "exported": false, + "signature": "func(*rest.Config, []string)(map[string][]*release.Release)", + "doc": "getHelmList Collects deployed Helm releases from given namespaces\n\nThe function creates a Helm client for each namespace using the provided REST\nconfiguration, then retrieves all deployed releases in that namespace.\nResults are stored in a map keyed by namespace name. If client creation fails\nit panics; otherwise the mapping of namespace to release slices is returned.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:231", + "calls": [ + { + "pkgPath": "github.com/mittwald/go-helm-client", + "name": "NewClientFromRestConf", + "kind": "function" + }, + { + "name": "panic", + "kind": "function" + }, + { + "name": "ListDeployedReleases", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getHelmList(restConfig *rest.Config, namespaces []string) map[string][]*release.Release {", + "\thelmChartReleases := map[string][]*release.Release{}", + "\tfor _, ns := range namespaces {", + "\t\topt := \u0026helmclient.RestConfClientOptions{", + "\t\t\tOptions: \u0026helmclient.Options{", + "\t\t\t\tNamespace: ns,", + "\t\t\t\tRepositoryCache: \"/tmp/.helmcache\",", + "\t\t\t\tRepositoryConfig: \"/tmp/.helmrepo\",", + "\t\t\t\tDebug: true,", + "\t\t\t\tLinting: true,", + "\t\t\t\tDebugLog: log.Info,", + "\t\t\t},", + "\t\t\tRestConfig: restConfig,", + "\t\t}", + "", + "\t\thelmClient, err := helmclient.NewClientFromRestConf(opt)", + "\t\tif err != nil {", + "\t\t\tpanic(err)", + "\t\t}", + "\t\tnsHelmchartreleases, _ := helmClient.ListDeployedReleases()", + "\t\thelmChartReleases[ns] = nsHelmchartreleases", + "\t}", + "\treturn helmChartReleases", + "}" + ] + }, + { + "name": "getNetworkAttachmentDefinitions", + "qualifiedName": "getNetworkAttachmentDefinitions", + "exported": false, + "signature": "func(*clientsholder.ClientsHolder, []string)([]nadClient.NetworkAttachmentDefinition, error)", + "doc": "getNetworkAttachmentDefinitions Retrieves all network attachment definitions from specified namespaces\n\nThe function iterates over a list of namespace names, querying each for its\nNetworkAttachmentDefinition resources via the CNCF networking client. It\ncollects any found items into a single slice, handling missing namespaces\ngracefully by ignoring not‑found errors. The resulting slice and an are\nreturned to the caller.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_nads.go:19", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "NetworkAttachmentDefinitions", + "kind": "function" + }, + { + "name": "K8sCniCncfIoV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/api/errors", + "name": "IsNotFound", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getNetworkAttachmentDefinitions(client *clientsholder.ClientsHolder, namespaces []string) ([]nadClient.NetworkAttachmentDefinition, error) {", + "\tvar nadList []nadClient.NetworkAttachmentDefinition", + "", + "\tfor _, ns := range namespaces {", + "\t\tnad, err := client.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil \u0026\u0026 !kerrors.IsNotFound(err) {", + "\t\t\treturn nil, err", + "\t\t}", + "", + "\t\t// Append the list of networkAttachmentDefinitions to the nadList slice", + "\t\tnadList = append(nadList, nad.Items...)", + "\t}", + "", + "\treturn nadList, nil", + "}" + ] + }, + { + "name": "getNetworkPolicies", + "qualifiedName": "getNetworkPolicies", + "exported": false, + "signature": "func(networkingv1client.NetworkingV1Interface)([]networkingv1.NetworkPolicy, error)", + "doc": "getNetworkPolicies Retrieves all network policies in the cluster\n\nThe function calls the NetworkingV1 client to list network policies across\nevery namespace by using an empty string for the namespace parameter. It\nreturns a slice of NetworkPolicy objects and any error encountered during the\nAPI call.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_networkpolicies.go:33", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "NetworkPolicies", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getNetworkPolicies(oc networkingv1client.NetworkingV1Interface) ([]networkingv1.NetworkPolicy, error) {", + "\tnps, err := oc.NetworkPolicies(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treturn nps.Items, nil", + "}" + ] + }, + { + "name": "getOpenshiftVersion", + "qualifiedName": "getOpenshiftVersion", + "exported": false, + "signature": "func(clientconfigv1.ConfigV1Interface)(string, error)", + "doc": "getOpenshiftVersion retrieves the OpenShift version from the cluster\n\nThe function queries the openshift-apiserver ClusterOperator resource to\nobtain its status versions. It searches for a version entry matching a\nspecific label, logs the found version, and returns it. If the operator is\nmissing or no matching version exists, it returns an error or a sentinel\nvalue indicating a non‑OpenShift cluster.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:407", + "calls": [ + { + "name": "Get", + "kind": "function" + }, + { + "name": "ClusterOperators", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/api/errors", + "name": "IsNotFound", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "errors", + "name": "New", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getOpenshiftVersion(oClient clientconfigv1.ConfigV1Interface) (ver string, err error) {", + "\tvar clusterOperator *configv1.ClusterOperator", + "\tclusterOperator, err = oClient.ClusterOperators().Get(context.TODO(), \"openshift-apiserver\", metav1.GetOptions{})", + "\tif err != nil {", + "\t\tswitch {", + "\t\tcase kerrors.IsNotFound(err):", + "\t\t\tlog.Warn(\"Unable to get ClusterOperator CR from openshift-apiserver. Running in a non-OCP cluster.\")", + "\t\t\treturn NonOpenshiftClusterVersion, nil", + "\t\tdefault:", + "\t\t\treturn \"\", err", + "\t\t}", + "\t}", + "", + "\tfor _, ver := range clusterOperator.Status.Versions {", + "\t\tif ver.Name == tnfCsvTargetLabelName {", + "\t\t\t// openshift-apiserver does not report version,", + "\t\t\t// clusteroperator/openshift-apiserver does, and only version number", + "\t\t\tlog.Info(\"OpenShift Version found: %v\", ver.Version)", + "\t\t\treturn ver.Version, nil", + "\t\t}", + "\t}", + "", + "\treturn \"\", errors.New(\"could not get openshift version from clusterOperator\")", + "}" + ] + }, + { + "name": "getOperandPodsFromTestCsvs", + "qualifiedName": "getOperandPodsFromTestCsvs", + "exported": false, + "signature": "func([]*olmv1Alpha.ClusterServiceVersion, []corev1.Pod)([]*corev1.Pod, error)", + "doc": "getOperandPodsFromTestCsvs Identifies pods whose owner custom resources are managed by the provided operators\n\nThe function scans each supplied operator CSV to build a map of the CRDs it\nowns, then iterates through all pods, retrieving their top‑level owners. If\na pod’s owning CRD matches one in the map, that pod is added to the result\nlist. It returns the filtered slice and an error if any step fails.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:317", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Cut", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "path", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "name": "GetPodTopOwner", + "kind": "function", + "source": [ + "func GetPodTopOwner(podNamespace string, podOwnerReferences []metav1.OwnerReference) (topOwners map[string]TopOwner, err error) {", + "\ttopOwners = make(map[string]TopOwner)", + "\terr = followOwnerReferences(", + "\t\tclientsholder.GetClientsHolder().GroupResources,", + "\t\tclientsholder.GetClientsHolder().DynamicClient,", + "\t\ttopOwners,", + "\t\tpodNamespace,", + "\t\tpodOwnerReferences)", + "\tif err != nil {", + "\t\treturn topOwners, fmt.Errorf(\"could not get top owners, err: %v\", err)", + "\t}", + "\treturn topOwners, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "path", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getOperandPodsFromTestCsvs(testCsvs []*olmv1Alpha.ClusterServiceVersion, pods []corev1.Pod) ([]*corev1.Pod, error) {", + "\t// Helper var to store all the managed crds from the operators under test", + "\t// They map key is \"Kind.group/version\" or \"Kind.APIversion\", which should be the same.", + "\t// e.g.: \"Subscription.operators.coreos.com/v1alpha1\"", + "\tcrds := map[string]*olmv1Alpha.ClusterServiceVersion{}", + "", + "\t// First, iterate on each testCsv to fill the helper crds map.", + "\tfor _, csv := range testCsvs {", + "\t\townedCrds := csv.Spec.CustomResourceDefinitions.Owned", + "\t\tif len(ownedCrds) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range ownedCrds {", + "\t\t\tcrd := \u0026ownedCrds[i]", + "", + "\t\t\t_, group, found := strings.Cut(crd.Name, \".\")", + "\t\t\tif !found {", + "\t\t\t\treturn nil, fmt.Errorf(\"failed to parse resources and group from crd name %q\", crd.Name)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"CSV %q owns crd %v\", csv.Name, crd.Kind+\"/\"+group+\"/\"+crd.Version)", + "", + "\t\t\tcrdPath := path.Join(crd.Kind, group, crd.Version)", + "\t\t\tcrds[crdPath] = csv", + "\t\t}", + "\t}", + "", + "\t// Now, iterate on every pod in the list to check whether they're owned by any of the CRs that", + "\t// the csvs are managing.", + "\toperandPods := []*corev1.Pod{}", + "\tfor i := range pods {", + "\t\tpod := \u0026pods[i]", + "\t\towners, err := podhelper.GetPodTopOwner(pod.Namespace, pod.OwnerReferences)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to get top owners of pod %v/%v: %v\", pod.Namespace, pod.Name, err)", + "\t\t}", + "", + "\t\tfor _, owner := range owners {", + "\t\t\tversionedCrdPath := path.Join(owner.Kind, owner.APIVersion)", + "", + "\t\t\tvar csv *olmv1Alpha.ClusterServiceVersion", + "\t\t\tif csv = crds[versionedCrdPath]; csv == nil {", + "\t\t\t\t// The owner is not a CR or it's not a CR owned by any operator under test", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Pod %v/%v has owner CR %s of CRD %q (CSV %v)\", pod.Namespace, pod.Name,", + "\t\t\t\towner.Name, versionedCrdPath, csv.Name)", + "", + "\t\t\toperandPods = append(operandPods, pod)", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\treturn operandPods, nil", + "}" + ] + }, + { + "name": "getOperatorCsvPods", + "qualifiedName": "getOperatorCsvPods", + "exported": false, + "signature": "func([]*olmv1Alpha.ClusterServiceVersion)(map[types.NamespacedName][]*corev1.Pod, error)", + "doc": "getOperatorCsvPods Retrieves operator controller pods for each CSV\n\nFor every ClusterServiceVersion in the list, it looks up the namespace\nannotation to locate where the operator runs. It then lists all pods in that\nnamespace, filters those owned by the CSV, and builds a map keyed by the\nCSV’s namespaced name to its managed pods. Errors are returned if\nannotations or pod retrieval fail.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:439", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "getPodsOwnedByCsv", + "kind": "function", + "source": [ + "func getPodsOwnedByCsv(csvName, operatorNamespace string, client *clientsholder.ClientsHolder) (managedPods []*corev1.Pod, err error) {", + "\t// Get all pods from the target namespace", + "\tpodsList, err := client.K8sClient.CoreV1().Pods(operatorNamespace).List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tfor index := range podsList.Items {", + "\t\t// Get the top owners of the pod", + "\t\tpod := podsList.Items[index]", + "\t\ttopOwners, err := podhelper.GetPodTopOwner(pod.Namespace, pod.OwnerReferences)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"could not get top owners of Pod %s (in namespace %s), err=%v\", pod.Name, pod.Namespace, err)", + "\t\t}", + "", + "\t\t// check if owner matches with the csv", + "\t\tfor _, owner := range topOwners {", + "\t\t\t// The owner must be in the targetNamespace", + "\t\t\tif owner.Kind == olmv1Alpha.ClusterServiceVersionKind \u0026\u0026 owner.Namespace == operatorNamespace \u0026\u0026 owner.Name == csvName {", + "\t\t\t\tmanagedPods = append(managedPods, \u0026podsList.Items[index])", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn managedPods, nil", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getOperatorCsvPods(csvList []*olmv1Alpha.ClusterServiceVersion) (map[types.NamespacedName][]*corev1.Pod, error) {", + "\tconst nsAnnotation = \"olm.operatorNamespace\"", + "", + "\tclient := clientsholder.GetClientsHolder()", + "\tcsvToPodsMapping := make(map[types.NamespacedName][]*corev1.Pod)", + "", + "\t// The operator's pod (controller) should run in the subscription/operatorgroup ns.", + "\tfor _, csv := range csvList {", + "\t\tns, found := csv.Annotations[nsAnnotation]", + "\t\tif !found {", + "\t\t\treturn nil, fmt.Errorf(\"failed to get ns annotation %q from csv %v/%v\", nsAnnotation, csv.Namespace, csv.Name)", + "\t\t}", + "", + "\t\tpods, err := getPodsOwnedByCsv(csv.Name, strings.TrimSpace(ns), client)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to get pods from ns %v: %v\", ns, err)", + "\t\t}", + "", + "\t\tcsvToPodsMapping[types.NamespacedName{Name: csv.Name, Namespace: csv.Namespace}] = pods", + "\t}", + "\treturn csvToPodsMapping, nil", + "}" + ] + }, + { + "name": "getPersistentVolumeClaims", + "qualifiedName": "getPersistentVolumeClaims", + "exported": false, + "signature": "func(corev1client.CoreV1Interface)([]corev1.PersistentVolumeClaim, error)", + "doc": "getPersistentVolumeClaims Retrieves all PersistentVolumeClaim objects from the cluster\n\nThis function queries the Kubernetes API for every PersistentVolumeClaim\nacross all namespaces, returning a slice of claim objects or an error if the\nrequest fails. It performs a List operation with no namespace filter and uses\na context placeholder. The resulting claims are extracted from the response\nitems field.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_pv.go:49", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "PersistentVolumeClaims", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getPersistentVolumeClaims(oc corev1client.CoreV1Interface) ([]corev1.PersistentVolumeClaim, error) {", + "\tpvcs, err := oc.PersistentVolumeClaims(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treturn pvcs.Items, nil", + "}" + ] + }, + { + "name": "getPersistentVolumes", + "qualifiedName": "getPersistentVolumes", + "exported": false, + "signature": "func(corev1client.CoreV1Interface)([]corev1.PersistentVolume, error)", + "doc": "getPersistentVolumes Retrieves all persistent volumes in the cluster\n\nThe function calls the core V1 client to list PersistentVolume resources,\nreturning a slice of those objects or an error if the API call fails.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_pv.go:34", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "PersistentVolumes", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getPersistentVolumes(oc corev1client.CoreV1Interface) ([]corev1.PersistentVolume, error) {", + "\tpvs, err := oc.PersistentVolumes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treturn pvs.Items, nil", + "}" + ] + }, + { + "name": "getPodDisruptionBudgets", + "qualifiedName": "getPodDisruptionBudgets", + "exported": false, + "signature": "func(policyv1client.PolicyV1Interface, []string)([]policyv1.PodDisruptionBudget, error)", + "doc": "getPodDisruptionBudgets Collects pod disruption budgets across specified namespaces\n\nThe function iterates over a list of namespace names, requesting the pod\ndisruption budgets present in each one via the Kubernetes policy client. It\naggregates all retrieved items into a single slice and returns them along\nwith any error that occurs during listing. If an error is encountered for a\nnamespace, the function aborts immediately and propagates the error.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_pdbs.go:34", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "PodDisruptionBudgets", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getPodDisruptionBudgets(oc policyv1client.PolicyV1Interface, namespaces []string) ([]policyv1.PodDisruptionBudget, error) {", + "\tpodDisruptionBudgets := []policyv1.PodDisruptionBudget{}", + "\tfor _, ns := range namespaces {", + "\t\tpdbs, err := oc.PodDisruptionBudgets(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\treturn nil, err", + "\t\t}", + "\t\tpodDisruptionBudgets = append(podDisruptionBudgets, pdbs.Items...)", + "\t}", + "", + "\treturn podDisruptionBudgets, nil", + "}" + ] + }, + { + "name": "getPodsOwnedByCsv", + "qualifiedName": "getPodsOwnedByCsv", + "exported": false, + "signature": "func(string, string, *clientsholder.ClientsHolder)([]*corev1.Pod, error)", + "doc": "getPodsOwnedByCsv retrieves operator pods owned by a specified CSV\n\nThe function lists all pods in the given namespace, then checks each pod’s\ntop-level owner references to find those whose owner is a\nClusterServiceVersion matching the provided name and namespace. Matching pods\nare collected into a slice that is returned. If any error occurs while\nlisting pods or determining owners, it returns an error.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:469", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "Pods", + "kind": "function" + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "name": "GetPodTopOwner", + "kind": "function", + "source": [ + "func GetPodTopOwner(podNamespace string, podOwnerReferences []metav1.OwnerReference) (topOwners map[string]TopOwner, err error) {", + "\ttopOwners = make(map[string]TopOwner)", + "\terr = followOwnerReferences(", + "\t\tclientsholder.GetClientsHolder().GroupResources,", + "\t\tclientsholder.GetClientsHolder().DynamicClient,", + "\t\ttopOwners,", + "\t\tpodNamespace,", + "\t\tpodOwnerReferences)", + "\tif err != nil {", + "\t\treturn topOwners, fmt.Errorf(\"could not get top owners, err: %v\", err)", + "\t}", + "\treturn topOwners, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getOperatorCsvPods", + "kind": "function", + "source": [ + "func getOperatorCsvPods(csvList []*olmv1Alpha.ClusterServiceVersion) (map[types.NamespacedName][]*corev1.Pod, error) {", + "\tconst nsAnnotation = \"olm.operatorNamespace\"", + "", + "\tclient := clientsholder.GetClientsHolder()", + "\tcsvToPodsMapping := make(map[types.NamespacedName][]*corev1.Pod)", + "", + "\t// The operator's pod (controller) should run in the subscription/operatorgroup ns.", + "\tfor _, csv := range csvList {", + "\t\tns, found := csv.Annotations[nsAnnotation]", + "\t\tif !found {", + "\t\t\treturn nil, fmt.Errorf(\"failed to get ns annotation %q from csv %v/%v\", nsAnnotation, csv.Namespace, csv.Name)", + "\t\t}", + "", + "\t\tpods, err := getPodsOwnedByCsv(csv.Name, strings.TrimSpace(ns), client)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to get pods from ns %v: %v\", ns, err)", + "\t\t}", + "", + "\t\tcsvToPodsMapping[types.NamespacedName{Name: csv.Name, Namespace: csv.Namespace}] = pods", + "\t}", + "\treturn csvToPodsMapping, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getPodsOwnedByCsv(csvName, operatorNamespace string, client *clientsholder.ClientsHolder) (managedPods []*corev1.Pod, err error) {", + "\t// Get all pods from the target namespace", + "\tpodsList, err := client.K8sClient.CoreV1().Pods(operatorNamespace).List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tfor index := range podsList.Items {", + "\t\t// Get the top owners of the pod", + "\t\tpod := podsList.Items[index]", + "\t\ttopOwners, err := podhelper.GetPodTopOwner(pod.Namespace, pod.OwnerReferences)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"could not get top owners of Pod %s (in namespace %s), err=%v\", pod.Name, pod.Namespace, err)", + "\t\t}", + "", + "\t\t// check if owner matches with the csv", + "\t\tfor _, owner := range topOwners {", + "\t\t\t// The owner must be in the targetNamespace", + "\t\t\tif owner.Kind == olmv1Alpha.ClusterServiceVersionKind \u0026\u0026 owner.Namespace == operatorNamespace \u0026\u0026 owner.Name == csvName {", + "\t\t\t\tmanagedPods = append(managedPods, \u0026podsList.Items[index])", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn managedPods, nil", + "}" + ] + }, + { + "name": "getResourceQuotas", + "qualifiedName": "getResourceQuotas", + "exported": false, + "signature": "func(corev1client.CoreV1Interface)([]corev1.ResourceQuota, error)", + "doc": "getResourceQuotas Retrieves all resource quotas from the cluster\n\nThe function queries the Kubernetes API for every ResourceQuota object across\nall namespaces by calling List on the client’s ResourceQuotas interface\nwith an empty namespace and default list options. It returns a slice\ncontaining each quota found and propagates any error that occurs during the\nrequest. The result is used to populate autodiscovery data about cluster\nlimits.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_resources.go:35", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "ResourceQuotas", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getResourceQuotas(oc corev1client.CoreV1Interface) ([]corev1.ResourceQuota, error) {", + "\trql, err := oc.ResourceQuotas(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treturn rql.Items, nil", + "}" + ] + }, + { + "name": "getRoleBindings", + "qualifiedName": "getRoleBindings", + "exported": false, + "signature": "func(rbacv1typed.RbacV1Interface)([]rbacv1.RoleBinding, error)", + "doc": "getRoleBindings retrieves all rolebindings across every namespace\n\nThis function queries the Kubernetes RBAC API for RoleBinding objects in\nevery namespace by using an empty string selector. It returns a slice of\nRoleBinding instances or an error if the list operation fails, logging the\nfailure before propagating it.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_rbac.go:34", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "RoleBindings", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.RoleBinding, error) {", + "\t// Get all of the rolebindings from all namespaces", + "\troleList, roleErr := client.RoleBindings(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif roleErr != nil {", + "\t\tlog.Error(\"Executing rolebinding command failed with error: %v\", roleErr)", + "\t\treturn nil, roleErr", + "\t}", + "\treturn roleList.Items, nil", + "}" + ] + }, + { + "name": "getRoles", + "qualifiedName": "getRoles", + "exported": false, + "signature": "func(rbacv1typed.RbacV1Interface)([]rbacv1.Role, error)", + "doc": "getRoles retrieves all cluster roles\n\nThe function queries the Kubernetes RBAC API to list every Role resource\nacross all namespaces, returning a slice of role objects or an error if the\nrequest fails. It logs any errors encountered during the API call before\npropagating them to the caller.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_rbac.go:67", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "Roles", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getRoles(client rbacv1typed.RbacV1Interface) ([]rbacv1.Role, error) {", + "\t// Get all of the roles from all namespaces", + "\troleList, roleErr := client.Roles(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif roleErr != nil {", + "\t\tlog.Error(\"Executing roles command failed with error: %v\", roleErr)", + "\t\treturn nil, roleErr", + "\t}", + "\treturn roleList.Items, nil", + "}" + ] + }, + { + "name": "getServiceAccounts", + "qualifiedName": "getServiceAccounts", + "exported": false, + "signature": "func(corev1client.CoreV1Interface, []string)([]*corev1.ServiceAccount, error)", + "doc": "getServiceAccounts Collects all ServiceAccount objects from specified namespaces\n\nThe function iterates over each namespace in the input list, querying the\nKubernetes API to retrieve the ServiceAccounts present there. Each retrieved\naccount is appended to a slice of pointers that is returned to the caller. If\nany API call fails, the error is propagated immediately and no further\nnamespaces are processed.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_service_accounts.go:33", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "ServiceAccounts", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getServiceAccounts(oc corev1client.CoreV1Interface, namespaces []string) (servicesAccounts []*corev1.ServiceAccount, err error) {", + "\tfor _, ns := range namespaces {", + "\t\ts, err := oc.ServiceAccounts(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\treturn servicesAccounts, err", + "\t\t}", + "\t\tfor i := range s.Items {", + "\t\t\tservicesAccounts = append(servicesAccounts, \u0026s.Items[i])", + "\t\t}", + "\t}", + "\treturn servicesAccounts, nil", + "}" + ] + }, + { + "name": "getServices", + "qualifiedName": "getServices", + "exported": false, + "signature": "func(corev1client.CoreV1Interface, []string, []string)([]*corev1.Service, error)", + "doc": "getServices Retrieves services from specified namespaces while excluding ignored names\n\nThe function iterates over a list of namespace strings, querying the\nKubernetes API for services in each one. It filters out any service whose\nname appears in an ignore list using a helper that checks string membership.\nMatching services are collected into a slice and returned; if any API call\nfails, the error is propagated immediately.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_services.go:34", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "Services", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getServices(oc corev1client.CoreV1Interface, namespaces, ignoreList []string) (allServices []*corev1.Service, err error) {", + "\tfor _, ns := range namespaces {", + "\t\ts, err := oc.Services(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\treturn allServices, err", + "\t\t}", + "\t\tfor i := range s.Items {", + "\t\t\tif stringhelper.StringInSlice(ignoreList, s.Items[i].Name, false) {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tallServices = append(allServices, \u0026s.Items[i])", + "\t\t}", + "\t}", + "\treturn allServices, nil", + "}" + ] + }, + { + "name": "getSriovNetworkNodePolicies", + "qualifiedName": "getSriovNetworkNodePolicies", + "exported": false, + "signature": "func(*clientsholder.ClientsHolder, []string)([]unstructured.Unstructured, error)", + "doc": "getSriovNetworkNodePolicies Collects SR-IOV network node policies from specified namespaces\n\nThe function iterates over each provided namespace, querying the dynamic\nclient for SR‑IOV network node policy resources. It aggregates all found\nitems into a single slice, handling missing clients or non‑existent\nresources gracefully by returning an empty list instead of panicking. Errors\nunrelated to a resource not being found are propagated back to the caller.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_sriov.go:62", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "Namespace", + "kind": "function" + }, + { + "name": "Resource", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/api/errors", + "name": "IsNotFound", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getSriovNetworkNodePolicies(client *clientsholder.ClientsHolder, namespaces []string) (sriovNetworkNodePolicies []unstructured.Unstructured, err error) {", + "\t// Check for nil client or DynamicClient to prevent panic", + "\tif client == nil || client.DynamicClient == nil {", + "\t\treturn []unstructured.Unstructured{}, nil", + "\t}", + "", + "\tvar sriovNetworkNodePolicyList []unstructured.Unstructured", + "", + "\tfor _, ns := range namespaces {", + "\t\tsnnp, err := client.DynamicClient.Resource(SriovNetworkNodePolicyGVR).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil \u0026\u0026 !kerrors.IsNotFound(err) {", + "\t\t\treturn nil, err", + "\t\t}", + "", + "\t\t// Append the list of sriovNetworkNodePolicies to the sriovNetworkNodePolicies slice", + "\t\tif snnp != nil {", + "\t\t\tsriovNetworkNodePolicyList = append(sriovNetworkNodePolicyList, snnp.Items...)", + "\t\t}", + "\t}", + "\treturn sriovNetworkNodePolicyList, nil", + "}" + ] + }, + { + "name": "getSriovNetworks", + "qualifiedName": "getSriovNetworks", + "exported": false, + "signature": "func(*clientsholder.ClientsHolder, []string)([]unstructured.Unstructured, error)", + "doc": "getSriovNetworks Retrieves all SR‑IOV network resources from the specified namespaces\n\nThe function iterates over each namespace, using a dynamic client to list\nobjects of the SR‑IOV Network type. It skips namespaces where the resource\nis not found and aggregates the items into a single slice. If the client or\nits DynamicClient is nil, it safely returns an empty result without error.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_sriov.go:33", + "calls": [ + { + "name": "List", + "kind": "function" + }, + { + "name": "Namespace", + "kind": "function" + }, + { + "name": "Resource", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/api/errors", + "name": "IsNotFound", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getSriovNetworks(client *clientsholder.ClientsHolder, namespaces []string) (sriovNetworks []unstructured.Unstructured, err error) {", + "\t// Check for nil client or DynamicClient to prevent panic", + "\tif client == nil || client.DynamicClient == nil {", + "\t\treturn []unstructured.Unstructured{}, nil", + "\t}", + "", + "\tvar sriovNetworkList []unstructured.Unstructured", + "", + "\tfor _, ns := range namespaces {", + "\t\tsnl, err := client.DynamicClient.Resource(SriovNetworkGVR).Namespace(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil \u0026\u0026 !kerrors.IsNotFound(err) {", + "\t\t\treturn nil, err", + "\t\t}", + "", + "\t\t// Append the list of sriovNetworks to the sriovNetworks slice", + "\t\tif snl != nil {", + "\t\t\tsriovNetworkList = append(sriovNetworkList, snl.Items...)", + "\t\t}", + "\t}", + "\treturn sriovNetworkList, nil", + "}" + ] + }, + { + "name": "isDeploymentsPodsMatchingAtLeastOneLabel", + "qualifiedName": "isDeploymentsPodsMatchingAtLeastOneLabel", + "exported": false, + "signature": "func([]labelObject, string, *appsv1.Deployment)(bool)", + "doc": "isDeploymentsPodsMatchingAtLeastOneLabel checks if a deployment’s pod template contains any of the specified labels\n\nThe function iterates over each provided label object, comparing its\nkey/value pair against the labels defined in the deployment’s pod template.\nIf it finds a match, it logs the discovery and returns true immediately. If\nno labels match after examining all options, it returns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_podset.go:82", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findDeploymentsByLabels", + "kind": "function", + "source": [ + "func findDeploymentsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.Deployment {", + "\tallDeployments := []appsv1.Deployment{}", + "\tfor _, ns := range namespaces {", + "\t\tdps, err := appClient.Deployments(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list deployments in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(dps.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any deployments in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(dps.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The deployment is added only once if at least one pod matches one label in the Deployment", + "\t\t\t\tif isDeploymentsPodsMatchingAtLeastOneLabel(labels, ns, \u0026dps.Items[i]) {", + "\t\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all deployments in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in deployment %q found in ns %q without label\", dps.Items[i].Name, ns)", + "\t\t\t\tallDeployments = append(allDeployments, dps.Items[i])", + "\t\t\t\tlog.Info(\"Deployment %s found in ns=%s\", dps.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allDeployments) == 0 {", + "\t\tlog.Warn(\"Did not find any deployment in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allDeployments", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isDeploymentsPodsMatchingAtLeastOneLabel(labels []labelObject, namespace string, deployment *appsv1.Deployment) bool {", + "\tfor _, aLabelObject := range labels {", + "\t\tlog.Debug(\"Searching pods in deployment %q found in ns %q using label %s=%s\", deployment.Name, namespace, aLabelObject.LabelKey, aLabelObject.LabelValue)", + "\t\tif deployment.Spec.Template.Labels[aLabelObject.LabelKey] == aLabelObject.LabelValue {", + "\t\t\tlog.Info(\"Deployment %s found in ns=%s\", deployment.Name, namespace)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "isIstioServiceMeshInstalled", + "qualifiedName": "isIstioServiceMeshInstalled", + "exported": false, + "signature": "func(appv1client.AppsV1Interface, []string)(bool)", + "doc": "isIstioServiceMeshInstalled checks for an installed Istio service mesh\n\nThe function verifies that the special Istio namespace exists in the cluster\nand then looks for a Deployment named istiod within that namespace. If either\nthe namespace or deployment is missing, it logs appropriate messages and\nreturns false; otherwise it confirms detection with an info log and returns\ntrue.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:56", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Get", + "kind": "function" + }, + { + "name": "Deployments", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/api/errors", + "name": "IsNotFound", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isIstioServiceMeshInstalled(appClient appv1client.AppsV1Interface, allNs []string) bool {", + "\t// The Istio namespace must be present", + "\tif !stringhelper.StringInSlice(allNs, istioNamespace, false) {", + "\t\tlog.Info(\"Istio Service Mesh not present (the namespace %q does not exists)\", istioNamespace)", + "\t\treturn false", + "\t}", + "", + "\t// The Deployment \"istiod\" must be present in an active service mesh", + "\t_, err := appClient.Deployments(istioNamespace).Get(context.TODO(), istioDeploymentName, metav1.GetOptions{})", + "\tif errors.IsNotFound(err) {", + "\t\tlog.Warn(\"The Istio Deployment %q is missing (but the Istio namespace exists)\", istioDeploymentName)", + "\t\treturn false", + "\t} else if err != nil {", + "\t\tlog.Error(\"Failed getting Deployment %q\", istioDeploymentName)", + "\t\treturn false", + "\t}", + "", + "\tlog.Info(\"Istio Service Mesh detected\")", + "", + "\treturn true", + "}" + ] + }, + { + "name": "isStatefulSetsMatchingAtLeastOneLabel", + "qualifiedName": "isStatefulSetsMatchingAtLeastOneLabel", + "exported": false, + "signature": "func([]labelObject, string, *appsv1.StatefulSet)(bool)", + "doc": "isStatefulSetsMatchingAtLeastOneLabel checks if a StatefulSet contains at least one pod label that matches the given list\n\nThe function iterates over each supplied label object, comparing its key and\nvalue against the labels defined in the StatefulSet's pod template. If any\nmatch is found, it logs the discovery and returns true; otherwise it returns\nfalse after examining all labels.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_podset.go:144", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "findStatefulSetsByLabels", + "kind": "function", + "source": [ + "func findStatefulSetsByLabels(", + "\tappClient appv1client.AppsV1Interface,", + "\tlabels []labelObject,", + "\tnamespaces []string,", + ") []appsv1.StatefulSet {", + "\tallStatefulSets := []appsv1.StatefulSet{}", + "\tfor _, ns := range namespaces {", + "\t\tstatefulSet, err := appClient.StatefulSets(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to list statefulsets in ns=%s, err: %v . Trying to proceed.\", ns, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(statefulSet.Items) == 0 {", + "\t\t\tlog.Warn(\"Did not find any statefulSet in ns=%s\", ns)", + "\t\t}", + "\t\tfor i := 0; i \u003c len(statefulSet.Items); i++ {", + "\t\t\tif len(labels) \u003e 0 {", + "\t\t\t\t// The StatefulSet is added only once if at least one pod matches one label in the Statefulset", + "\t\t\t\tif isStatefulSetsMatchingAtLeastOneLabel(labels, ns, \u0026statefulSet.Items[i]) {", + "\t\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\t// If labels are not provided, all statefulsets in the namespaces under test, are tested by the CNF suite", + "\t\t\t\tlog.Debug(\"Searching pods in statefulset %q found in ns %q without label\", statefulSet.Items[i].Name, ns)", + "\t\t\t\tallStatefulSets = append(allStatefulSets, statefulSet.Items[i])", + "\t\t\t\tlog.Info(\"StatefulSet %s found in ns=%s\", statefulSet.Items[i].Name, ns)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tif len(allStatefulSets) == 0 {", + "\t\tlog.Warn(\"Did not find any statefulset in the configured namespaces %v\", namespaces)", + "\t}", + "\treturn allStatefulSets", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isStatefulSetsMatchingAtLeastOneLabel(labels []labelObject, namespace string, statefulSet *appsv1.StatefulSet) bool {", + "\tfor _, aLabelObject := range labels {", + "\t\tlog.Debug(\"Searching pods in statefulset %q found in ns %q using label %s=%s\", statefulSet.Name, namespace, aLabelObject.LabelKey, aLabelObject.LabelValue)", + "\t\tif statefulSet.Spec.Template.Labels[aLabelObject.LabelKey] == aLabelObject.LabelValue {", + "\t\t\tlog.Info(\"StatefulSet %s found in ns=%s\", statefulSet.Name, namespace)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "namespacesListToStringList", + "qualifiedName": "namespacesListToStringList", + "exported": false, + "signature": "func([]configuration.Namespace)([]string)", + "doc": "namespacesListToStringList Converts a list of namespace objects to a slice of their names\n\nThe function iterates over each Namespace in the input slice, extracting its\nName field and appending it to a new string slice. It returns this slice of\nstrings representing all namespace names. This conversion is used to provide\na simple list for further processing elsewhere in the package.", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:393", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func namespacesListToStringList(namespaceList []configuration.Namespace) (stringList []string) {", + "\tfor _, ns := range namespaceList {", + "\t\tstringList = append(stringList, ns.Name)", + "\t}", + "\treturn stringList", + "}" + ] + } + ], + "globals": [ + { + "name": "SriovNetworkGVR", + "exported": true, + "type": "", + "doc": "SriovNetworkGVR defines the GroupVersionResource for SriovNetwork", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_sriov.go:14" + }, + { + "name": "SriovNetworkNodePolicyGVR", + "exported": true, + "type": "", + "doc": "SriovNetworkNodePolicyGVR defines the GroupVersionResource for SriovNetworkNodePolicy", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_sriov.go:21" + }, + { + "name": "data", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:155" + } + ], + "consts": [ + { + "name": "NonOpenshiftClusterVersion", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:55" + }, + { + "name": "csvNameWithNamespaceFormatStr", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/constants.go:21" + }, + { + "name": "istioDeploymentName", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:46" + }, + { + "name": "istioNamespace", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover_operators.go:45" + }, + { + "name": "labelRegex", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:157" + }, + { + "name": "labelRegexMatches", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:158" + }, + { + "name": "labelTemplate", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:59" + }, + { + "name": "probeHelperPodsLabelName", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/constants.go:19" + }, + { + "name": "probeHelperPodsLabelValue", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/constants.go:20" + }, + { + "name": "tnfCsvTargetLabelName", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:56" + }, + { + "name": "tnfCsvTargetLabelValue", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:57" + }, + { + "name": "tnfLabelPrefix", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/autodiscover/autodiscover.go:58" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "certsuite", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/collector", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/manageability", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "os", + "path/filepath", + "strings", + "time" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "LoadChecksDB", + "qualifiedName": "LoadChecksDB", + "exported": true, + "signature": "func(string)()", + "doc": "LoadChecksDB Initializes test checks based on a label expression\n\nThe function loads internal check definitions, then evaluates whether\npreflight tests should run for the provided labels. If allowed, it triggers\nthe loading of preflight-specific checks. It performs no return value and\nrelies on side effects to prepare the checks database.", + "position": "/Users/deliedit/dev/certsuite/pkg/certsuite/certsuite.go:58", + "calls": [ + { + "name": "LoadInternalChecksDB", + "kind": "function", + "source": [ + "func LoadInternalChecksDB() {", + "\taccesscontrol.LoadChecks()", + "\tcertification.LoadChecks()", + "\tlifecycle.LoadChecks()", + "\tmanageability.LoadChecks()", + "\tnetworking.LoadChecks()", + "\tobservability.LoadChecks()", + "\tperformance.LoadChecks()", + "\tplatform.LoadChecks()", + "\toperator.LoadChecks()", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "ShouldRun", + "kind": "function", + "source": [ + "func ShouldRun(labelsExpr string) bool {", + "\tenv = provider.GetTestEnvironment()", + "\tpreflightAllowedLabels := []string{common.PreflightTestKey, identifiers.TagPreflight}", + "", + "\tif !labelsAllowTestRun(labelsExpr, preflightAllowedLabels) {", + "\t\treturn false", + "\t}", + "", + "\t// Add safeguard against running the preflight tests if the docker config does not exist.", + "\tpreflightDockerConfigFile := configuration.GetTestParameters().PfltDockerconfig", + "\tif preflightDockerConfigFile == \"\" || preflightDockerConfigFile == \"NA\" {", + "\t\tlog.Warn(\"Skipping the preflight suite because the Docker Config file is not provided.\")", + "\t\tenv.SkipPreflight = true", + "\t}", + "", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Running %s suite checks\", common.PreflightTestKey)", + "", + "\t// As the preflight lib's checks need to run here, we need to get the test environment now.", + "\tenv = provider.GetTestEnvironment()", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PreflightTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\ttestPreflightContainers(checksGroup, \u0026env)", + "\tif provider.IsOCPCluster() {", + "\t\tlog.Info(\"OCP cluster detected, allowing Preflight operator tests to run\")", + "\t\ttestPreflightOperators(checksGroup, \u0026env)", + "\t} else {", + "\t\tlog.Info(\"Skipping the Preflight operators test because it requires an OCP cluster to run against\")", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Startup", + "kind": "function", + "source": [ + "func Startup() {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Create an evaluator to filter test cases with labels", + "\tif err := checksdb.InitLabelsExprEvaluator(testParams.LabelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(testParams.OutputDir, testParams.LogLevel); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\t// Diagnostic functions will run when no labels are provided.", + "\tif testParams.LabelsFilter == noLabelsFilterExpr {", + "\t\tlog.Warn(\"The Best Practices Test Suite will run in diagnostic mode so no test case will be launched\")", + "\t}", + "", + "\t// Set clientsholder singleton with the filenames from the env vars.", + "\t_ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...)", + "\tLoadChecksDB(testParams.LabelsFilter)", + "", + "\tlog.Info(\"Certsuite Version: %v\", versions.GitVersion())", + "\tlog.Info(\"Claim Format Version: %s\", versions.ClaimFormatVersion)", + "\tlog.Info(\"Labels filter: %v\", testParams.LabelsFilter)", + "\tlog.Info(\"Log level: %s\", strings.ToUpper(testParams.LogLevel))", + "", + "\tcli.PrintBanner()", + "", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "\tfmt.Printf(\"Checks filter: %s\\n\", testParams.LabelsFilter)", + "\tfmt.Printf(\"Output folder: %s\\n\", testParams.OutputDir)", + "\tfmt.Printf(\"Log file: %s (level=%s)\\n\", log.LogFileName, testParams.LogLevel)", + "\tfmt.Printf(\"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "runHandler", + "kind": "function", + "source": [ + "func runHandler(w http.ResponseWriter, r *http.Request) {", + "\tbuf = bytes.NewBufferString(\"\")", + "\t// The log output will be written to the log file and to this buffer buf", + "\tlog.SetLogger(log.GetMultiLogger(buf))", + "", + "\tjsonData := r.FormValue(\"jsonData\") // \"jsonData\" is the name of the JSON input field", + "\tvar data RequestedData", + "\tif err := json.Unmarshal([]byte(jsonData), \u0026data); err != nil {", + "\t\tfmt.Println(\"Error:\", err)", + "\t}", + "\tflattenedOptions := data.SelectedOptions", + "", + "\t// Get the file from the request", + "\tfile, fileHeader, err := r.FormFile(\"kubeConfigPath\") // \"fileInput\" is the name of the file input field", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to retrieve file from form\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "\tdefer file.Close()", + "", + "\tlog.Info(\"Kubeconfig file name received: %s\", fileHeader.Filename)", + "\tkubeconfigTempFile, err := os.CreateTemp(\"\", \"webserver-kubeconfig-*\")", + "\tif err != nil {", + "\t\thttp.Error(w, \"Failed to create temp file to store the kubeconfig content.\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "", + "\tdefer func() {", + "\t\tlog.Info(\"Removing temporary kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\terr = os.Remove(kubeconfigTempFile.Name())", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to remove temp kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\t}", + "\t}()", + "", + "\t_, err = io.Copy(kubeconfigTempFile, file)", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to copy file\", http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t_ = kubeconfigTempFile.Close()", + "", + "\tlog.Info(\"Web Server kubeconfig file : %v (copied into %v)\", fileHeader.Filename, kubeconfigTempFile.Name())", + "\tlog.Info(\"Web Server Labels filter : %v\", flattenedOptions)", + "", + "\ttnfConfig, err := os.ReadFile(\"config/certsuite_config.yml\")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error reading YAML file: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t}", + "", + "\tnewData := updateTnf(tnfConfig, \u0026data)", + "", + "\t// Write the modified YAML data back to the file", + "\tvar filePerm fs.FileMode = 0o644 // owner can read/write, group and others can only read", + "\terr = os.WriteFile(\"config/certsuite_config.yml\", newData, filePerm)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing YAML file: %v\", err)", + "\t}", + "\tlabelsFilter := strings.Join(flattenedOptions, \",\")", + "", + "\t_ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name())", + "\tcertsuite.LoadChecksDB(labelsFilter)", + "", + "\toutputFolder := r.Context().Value(outputFolderCtxKey).(string)", + "", + "\tif err := checksdb.InitLabelsExprEvaluator(labelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(outputFolder, \"debug\"); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tlog.Info(\"Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s\", labelsFilter, outputFolder)", + "\terr = certsuite.Run(labelsFilter, outputFolder)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to run CNF Cert Suite: %v\", err)", + "\t}", + "", + "\t// Return the result as JSON", + "\tresponse := struct {", + "\t\tMessage string `json:\"Message\"`", + "\t}{", + "\t\tMessage: fmt.Sprintf(\"Succeeded to run %s\", strings.Join(flattenedOptions, \" \")),", + "\t}", + "\t// Serialize the response data to JSON", + "\tjsonResponse, err := json.Marshal(response)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to marshal jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t// Set the Content-Type header to specify that the response is JSON", + "\tw.Header().Set(\"Content-Type\", \"application/json\")", + "\t// Write the JSON response to the client", + "\tlog.Info(\"Sending web response: %v\", response)", + "\t_, err = w.Write(jsonResponse)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to write jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadChecksDB(labelsExpr string) {", + "\tLoadInternalChecksDB()", + "", + "\tif preflight.ShouldRun(labelsExpr) {", + "\t\tpreflight.LoadChecks()", + "\t}", + "}" + ] + }, + { + "name": "LoadInternalChecksDB", + "qualifiedName": "LoadInternalChecksDB", + "exported": true, + "signature": "func()()", + "doc": "LoadInternalChecksDB Initializes all test suites for internal checks\n\nThis function calls the LoadChecks functions of each test package,\nregistering their individual tests with the shared checks database. It\nensures that all internal test groups are available before any preflight or\nlabel filtering occurs. No return value is produced and it performs no error\nhandling itself.", + "position": "/Users/deliedit/dev/certsuite/pkg/certsuite/certsuite.go:40", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AffiliatedCertTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmVersionIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\ttestHelmVersion(check)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoOperatorsFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAllOperatorCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHelmCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerIsCertifiedDigestIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerCertificationStatusByDigest(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/manageability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ManageabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ManageabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainersImageTag)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImageTag(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPortNameFormat)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerPortNameFormat(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PerformanceTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestExclusiveCPUPool(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRtAppNoExecProbes)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUs).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestRtAppsNoExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSharedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoNonGuaranteedPodContainersWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetNonGuaranteedPodContainersWithoutHostPID(), scheduling.SharedCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsolatedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLimitedUseOfExecProbesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestLimitedUseOfExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "getMatchingTestIDs", + "kind": "function", + "source": [ + "func getMatchingTestIDs(labelExpr string) ([]string, error) {", + "\tif err := checksdb.InitLabelsExprEvaluator(labelExpr); err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to initialize a test case label evaluator, err: %v\", err)", + "\t}", + "\tcertsuite.LoadInternalChecksDB()", + "\ttestIDs, err := checksdb.FilterCheckIDs()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not list test cases, err: %v\", err)", + "\t}", + "", + "\treturn testIDs, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadChecksDB", + "kind": "function", + "source": [ + "func LoadChecksDB(labelsExpr string) {", + "\tLoadInternalChecksDB()", + "", + "\tif preflight.ShouldRun(labelsExpr) {", + "\t\tpreflight.LoadChecks()", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadInternalChecksDB() {", + "\taccesscontrol.LoadChecks()", + "\tcertification.LoadChecks()", + "\tlifecycle.LoadChecks()", + "\tmanageability.LoadChecks()", + "\tnetworking.LoadChecks()", + "\tobservability.LoadChecks()", + "\tperformance.LoadChecks()", + "\tplatform.LoadChecks()", + "\toperator.LoadChecks()", + "}" + ] + }, + { + "name": "Run", + "qualifiedName": "Run", + "exported": true, + "signature": "func(string, string)(error)", + "doc": "Run Executes the certification test suite and produces results artifacts\n\nThis function initiates discovery of CNF target resources, runs all\nconfigured checks with a timeout, and records pod states before and after\nexecution. It builds a claim file containing check outcomes, optionally\ngenerates JUnit XML, sanitizes claims based on label filters, and may send\nthe collected results to an external collector or Red Hat Connect API.\nFinally it creates HTML artifacts for viewing, compresses all outputs into a\nzip file if requested, and cleans up temporary files according to user\npreferences.\n\nnolint:funlen,gocyclo", + "position": "/Users/deliedit/dev/certsuite/pkg/certsuite/certsuite.go:176", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Print", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "RunChecks", + "kind": "function", + "source": [ + "func RunChecks(timeout time.Duration) (failedCtr int, err error) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\t// Timeout channel", + "\ttimeOutChan := time.After(timeout)", + "\t// SIGINT(ctrl+c)/SIGTERM capture channel.", + "\tconst SIGINTBufferLen = 10", + "\tsigIntChan := make(chan os.Signal, SIGINTBufferLen)", + "\tsignal.Notify(sigIntChan, syscall.SIGINT, syscall.SIGTERM)", + "\t// turn off ctrl-c capture on exit", + "\tdefer signal.Stop(sigIntChan)", + "", + "\tabort := false", + "\tvar abortReason string", + "\tvar errs []error", + "\tfor _, group := range dbByGroup {", + "\t\tif abort {", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t\tgroup.RecordChecksResults()", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Stop channel, so we can send a stop signal to group.RunChecks()", + "\t\tstopChan := make(chan bool, 1)", + "\t\tabortChan := make(chan string, 1)", + "", + "\t\t// Done channel for the goroutine that runs group.RunChecks().", + "\t\tgroupDone := make(chan bool)", + "\t\tgo func() {", + "\t\t\tchecks, failedCheckCtr := group.RunChecks(stopChan, abortChan)", + "\t\t\tfailedCtr += failedCheckCtr", + "\t\t\terrs = append(errs, checks...)", + "\t\t\tgroupDone \u003c- true", + "\t\t}()", + "", + "\t\tselect {", + "\t\tcase \u003c-groupDone:", + "\t\t\tlog.Debug(\"Group %s finished running checks.\", group.name)", + "\t\tcase abortReason = \u003c-abortChan:", + "\t\t\tlog.Warn(\"Group %s aborted.\", group.name)", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-timeOutChan:", + "\t\t\tlog.Warn(\"Running all checks timed-out.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"global time-out\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-sigIntChan:", + "\t\t\tlog.Warn(\"SIGINT/SIGTERM received.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"SIGINT/SIGTERM\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t}", + "", + "\t\tgroup.RecordChecksResults()", + "\t}", + "", + "\t// Print the results in the CLI", + "\tcli.PrintResultsTable(getResultsSummary())", + "\tprintFailedChecksLog()", + "", + "\tif len(errs) \u003e 0 {", + "\t\tlog.Error(\"RunChecks errors: %v\", errs)", + "\t\treturn 0, fmt.Errorf(\"%d errors found in checks/groups\", len(errs))", + "\t}", + "", + "\treturn failedCtr, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Sub", + "kind": "function" + }, + { + "pkgPath": "path/filepath", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "FindPodsByLabels", + "kind": "function", + "source": [ + "func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, namespaces []string) (runningPods, allPods []corev1.Pod) {", + "\trunningPods = []corev1.Pod{}", + "\tallPods = []corev1.Pod{}", + "\tallowNonRunning := configuration.GetTestParameters().AllowNonRunning", + "\t// Iterate through namespaces", + "\tfor _, ns := range namespaces {", + "\t\tvar pods *corev1.PodList", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tpods = findPodsMatchingAtLeastOneLabel(oc, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching Pods in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tpods, err = oc.Pods(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing pods in ns=%s, err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\t// Filter out any pod set to be deleted", + "\t\tfor i := 0; i \u003c len(pods.Items); i++ {", + "\t\t\tif pods.Items[i].DeletionTimestamp == nil {", + "\t\t\t\tif allowNonRunning || pods.Items[i].Status.Phase == corev1.PodRunning {", + "\t\t\t\t\trunningPods = append(runningPods, pods.Items[i])", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tallPods = append(allPods, pods.Items[i])", + "\t\t}", + "\t}", + "", + "\treturn runningPods, allPods", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "CreateLabels", + "kind": "function", + "source": [ + "func CreateLabels(labelStrings []string) (labelObjects []labelObject) {", + "\tfor _, label := range labelStrings {", + "\t\tr := regexp.MustCompile(labelRegex)", + "", + "\t\tvalues := r.FindStringSubmatch(label)", + "\t\tif len(values) != labelRegexMatches {", + "\t\t\tlog.Error(\"Failed to parse label %q. It will not be used!, \", label)", + "\t\t\tcontinue", + "\t\t}", + "\t\tvar aLabel labelObject", + "\t\taLabel.LabelKey = values[1]", + "\t\taLabel.LabelValue = values[2]", + "\t\tlabelObjects = append(labelObjects, aLabel)", + "\t}", + "\treturn labelObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "CountPodsByStatus", + "kind": "function", + "source": [ + "func CountPodsByStatus(allPods []corev1.Pod) map[string]int {", + "\tpodStates := map[string]int{", + "\t\t\"ready\": 0,", + "\t\t\"non-ready\": 0,", + "\t}", + "", + "\tfor i := range allPods {", + "\t\tif allPods[i].Status.Phase == corev1.PodRunning {", + "\t\t\tpodStates[\"ready\"]++", + "\t\t} else {", + "\t\t\tpodStates[\"non-ready\"]++", + "\t\t}", + "\t}", + "", + "\treturn podStates", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "NewClaimBuilder", + "kind": "function", + "source": [ + "func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) {", + "\tif os.Getenv(\"UNIT_TEST\") == \"true\" {", + "\t\treturn \u0026ClaimBuilder{", + "\t\t\tclaimRoot: CreateClaimRoot(),", + "\t\t}, nil", + "\t}", + "", + "\tlog.Debug(\"Creating claim file builder.\")", + "\tconfigurations, err := MarshalConfigurations(env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"configuration node missing because of: %v\", err)", + "\t}", + "", + "\tclaimConfigurations := map[string]interface{}{}", + "\tUnmarshalConfigurations(configurations, claimConfigurations)", + "", + "\troot := CreateClaimRoot()", + "", + "\troot.Claim.Configurations = claimConfigurations", + "\troot.Claim.Nodes = GenerateNodes()", + "\troot.Claim.Versions = \u0026claim.Versions{", + "\t\tCertSuite: versions.GitDisplayRelease,", + "\t\tCertSuiteGitCommit: versions.GitCommit,", + "\t\tOcClient: diagnostics.GetVersionOcClient(),", + "\t\tOcp: diagnostics.GetVersionOcp(),", + "\t\tK8s: diagnostics.GetVersionK8s(),", + "\t\tClaimFormat: versions.ClaimFormatVersion,", + "\t}", + "", + "\treturn \u0026ClaimBuilder{", + "\t\tclaimRoot: root,", + "\t}, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "Build", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "path/filepath", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "ToJUnitXML", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "SanitizeClaimFile", + "kind": "function", + "source": [ + "func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error) {", + "\tlog.Info(\"Sanitizing claim file %s\", claimFileName)", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tvar aRoot claim.Root", + "\tUnmarshalClaim(data, \u0026aRoot)", + "", + "\t// Remove the results that do not match the labels filter", + "\tfor testID := range aRoot.Claim.Results {", + "\t\tevaluator, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to create labels expression evaluator: %v\", err)", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\t_, gatheredLabels := identifiers.GetTestIDAndLabels(*aRoot.Claim.Results[testID].TestID)", + "", + "\t\tif !evaluator.Eval(gatheredLabels) {", + "\t\t\tlog.Info(\"Removing test ID: %s from the claim\", testID)", + "\t\t\tdelete(aRoot.Claim.Results, testID)", + "\t\t}", + "\t}", + "", + "\tWriteClaimOutput(claimFileName, MarshalClaimOutput(\u0026aRoot))", + "\treturn claimFileName, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/collector", + "name": "SendClaimFileToCollector", + "kind": "function", + "source": [ + "func SendClaimFileToCollector(endPoint, claimFilePath, executedBy, partnerName, password string) error {", + "\t// Temporary end point", + "\tpostReq, err := createSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partnerName, password)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\tclient := \u0026http.Client{", + "\t\tTimeout: collectorUploadTimeout, // 30 second timeout for collector uploads", + "\t}", + "\tresp, err := client.Do(postReq)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tdefer resp.Body.Close()", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "CreateResultsWebFiles", + "kind": "function", + "source": [ + "func CreateResultsWebFiles(outputDir, claimFileName string) (filePaths []string, err error) {", + "\ttype file struct {", + "\t\tPath string", + "\t\tContent []byte", + "\t}", + "", + "\tstaticFiles := []file{", + "\t\t{", + "\t\t\tPath: filepath.Join(outputDir, htmlResultsFileName),", + "\t\t\tContent: htmlResultsFileContent,", + "\t\t},", + "\t}", + "", + "\tclaimFilePath := filepath.Join(outputDir, claimFileName)", + "\tclaimJSFilePath, err := createClaimJSFile(claimFilePath, outputDir)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to create file %s: %v\", jsClaimVarFileName, err)", + "\t}", + "", + "\tfilePaths = []string{claimJSFilePath}", + "\tfor _, f := range staticFiles {", + "\t\terr := os.WriteFile(f.Path, f.Content, writeFilePerms)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to create file %s: %v\", f.Path, err)", + "\t\t}", + "", + "\t\t// Add this file path to the slice.", + "\t\tfilePaths = append(filePaths, f.Path)", + "\t}", + "", + "\treturn filePaths, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "path/filepath", + "name": "Join", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "path/filepath", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "CompressResultsArtifacts", + "kind": "function", + "source": [ + "func CompressResultsArtifacts(outputDir string, filePaths []string) (string, error) {", + "\tzipFileName := generateZipFileName()", + "\tzipFilePath := filepath.Join(outputDir, zipFileName)", + "", + "\tlog.Info(\"Compressing results artifacts into %s\", zipFilePath)", + "\tzipFile, err := os.Create(zipFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed creating tar.gz file %s in dir %s (filepath=%s): %v\",", + "\t\t\tzipFileName, outputDir, zipFilePath, err)", + "\t}", + "", + "\tzipWriter := gzip.NewWriter(zipFile)", + "\tdefer zipWriter.Close()", + "", + "\ttarWriter := tar.NewWriter(zipWriter)", + "\tdefer tarWriter.Close()", + "", + "\tfor _, file := range filePaths {", + "\t\tlog.Debug(\"Zipping file %s\", file)", + "", + "\t\ttarHeader, err := getFileTarHeader(file)", + "\t\tif err != nil {", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\terr = tarWriter.WriteHeader(tarHeader)", + "\t\tif err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to write tar header for %s: %v\", file, err)", + "\t\t}", + "", + "\t\tf, err := os.Open(file)", + "\t\tif err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to open file %s: %v\", file, err)", + "\t\t}", + "", + "\t\tif _, err = io.Copy(tarWriter, f); err != nil {", + "\t\t\treturn \"\", fmt.Errorf(\"failed to tar file %s: %v\", file, err)", + "\t\t}", + "", + "\t\tf.Close()", + "\t}", + "", + "\t// Create fully qualified path to the zip file", + "\tzipFilePath, err = filepath.Abs(zipFilePath)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get absolute path for %s: %v\", zipFilePath, err)", + "\t}", + "", + "\t// Return the entire path to the zip file", + "\treturn zipFilePath, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "GetCertIDFromConnectAPI", + "kind": "function", + "source": [ + "func GetCertIDFromConnectAPI(apiKey, projectID, connectAPIBaseURL, proxyURL, proxyPort string) (string, error) {", + "\tlog.Info(\"Getting certification ID from Red Hat Connect API\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tprojectID = strings.ReplaceAll(projectID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectAPIBaseURL = strings.ReplaceAll(connectAPIBaseURL, \"\\\"\", \"\")", + "", + "\t// remove quotes from projectID", + "\tprojectIDJSON := fmt.Sprintf(`{ \"projectId\": %q }`, projectID)", + "", + "\t// Convert JSON to bytes", + "\tprojectIDJSONBytes := []byte(projectIDJSON)", + "", + "\t// Create the URL", + "\tcertIDURL := fmt.Sprintf(\"%s/projects/certifications\", connectAPIBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", certIDURL, bytes.NewBuffer(projectIDJSONBytes))", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\tlog.Debug(\"Request Body: %s\", req.Body)", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", \"application/json\")", + "\treq.Header.Set(\"Accept\", \"application/json\")", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// print the request", + "\tlog.Debug(\"Sending request to %s\", certIDURL)", + "", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tres, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer res.Body.Close()", + "", + "\t// Parse the response", + "\tvar certIDResponse CertIDResponse", + "\terr = json.NewDecoder(res.Body).Decode(\u0026certIDResponse)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Certification ID retrieved from the API: %d\", certIDResponse.ID)", + "", + "\t// Return the certification ID", + "\treturn fmt.Sprintf(\"%d\", certIDResponse.ID), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/results", + "name": "SendResultsToConnectAPI", + "kind": "function", + "source": [ + "func SendResultsToConnectAPI(zipFile, apiKey, connectBaseURL, certID, proxyURL, proxyPort string) error {", + "\tlog.Info(\"Sending results to Red Hat Connect\")", + "", + "\t// sanitize the incoming variables, remove the double quotes if any", + "\tapiKey = strings.ReplaceAll(apiKey, \"\\\"\", \"\")", + "\tcertID = strings.ReplaceAll(certID, \"\\\"\", \"\")", + "\tproxyURL = strings.ReplaceAll(proxyURL, \"\\\"\", \"\")", + "\tproxyPort = strings.ReplaceAll(proxyPort, \"\\\"\", \"\")", + "\tconnectBaseURL = strings.ReplaceAll(connectBaseURL, \"\\\"\", \"\")", + "", + "\tvar buffer bytes.Buffer", + "", + "\t// Create a new multipart writer", + "\tw := multipart.NewWriter(\u0026buffer)", + "", + "\tlog.Debug(\"Creating form file for %s\", zipFile)", + "", + "\tclaimFile, err := os.Open(zipFile)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tdefer claimFile.Close()", + "", + "\tfw, err := w.CreateFormFile(\"attachment\", zipFile)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create form file: %v\", err)", + "\t}", + "", + "\tif _, err = io.Copy(fw, claimFile); err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"type\", \"RhocpBestPracticeTestResult\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"certId\", certID)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Create a form field", + "\terr = createFormField(w, \"description\", \"CNF Test Results\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\tw.Close()", + "", + "\t// Create the URL", + "\tconnectAPIURL := fmt.Sprintf(\"%s/attachments/upload\", connectBaseURL)", + "", + "\t// Create a new request", + "\treq, err := http.NewRequest(\"POST\", connectAPIURL, \u0026buffer)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to create new request: %v\", err)", + "\t}", + "", + "\t// Set the content type", + "\treq.Header.Set(\"Content-Type\", w.FormDataContentType())", + "\treq.Header.Set(\"x-api-key\", apiKey)", + "", + "\t// Create a client", + "\tclient := \u0026http.Client{", + "\t\tTimeout: redHatConnectAPITimeout, // 60 second timeout for Red Hat Connect API upload", + "\t}", + "\tsetProxy(client, proxyURL, proxyPort)", + "\tresponse, err := sendRequest(req, client)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to send post request to the endpoint: %v\", err)", + "\t}", + "\tdefer response.Body.Close()", + "", + "\t// Parse the result of the request", + "\tvar uploadResult UploadResult", + "\terr = json.NewDecoder(response.Body).Decode(\u0026uploadResult)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"failed to decode response body: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Download URL: %s\", uploadResult.DownloadURL)", + "\tlog.Info(\"Upload Date: %s\", uploadResult.UploadedDate)", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "os", + "name": "Remove", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "os", + "name": "Remove", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run", + "name": "runTestSuite", + "kind": "function", + "source": [ + "func runTestSuite(cmd *cobra.Command, _ []string) error {", + "\terr := initTestParamsFromFlags(cmd)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to initialize the test parameters, err: %v\", err)", + "\t}", + "", + "\ttestParams := configuration.GetTestParameters()", + "\tif testParams.ServerMode {", + "\t\tlog.Info(\"Running Certification Suite in web server mode\")", + "\t\twebserver.StartServer(testParams.OutputDir)", + "\t} else {", + "\t\tcertsuite.Startup()", + "\t\tdefer certsuite.Shutdown()", + "\t\tlog.Info(\"Running Certification Suite in stand-alone mode\")", + "\t\terr := certsuite.Run(testParams.LabelsFilter, testParams.OutputDir)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to run Certification Suite: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "runHandler", + "kind": "function", + "source": [ + "func runHandler(w http.ResponseWriter, r *http.Request) {", + "\tbuf = bytes.NewBufferString(\"\")", + "\t// The log output will be written to the log file and to this buffer buf", + "\tlog.SetLogger(log.GetMultiLogger(buf))", + "", + "\tjsonData := r.FormValue(\"jsonData\") // \"jsonData\" is the name of the JSON input field", + "\tvar data RequestedData", + "\tif err := json.Unmarshal([]byte(jsonData), \u0026data); err != nil {", + "\t\tfmt.Println(\"Error:\", err)", + "\t}", + "\tflattenedOptions := data.SelectedOptions", + "", + "\t// Get the file from the request", + "\tfile, fileHeader, err := r.FormFile(\"kubeConfigPath\") // \"fileInput\" is the name of the file input field", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to retrieve file from form\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "\tdefer file.Close()", + "", + "\tlog.Info(\"Kubeconfig file name received: %s\", fileHeader.Filename)", + "\tkubeconfigTempFile, err := os.CreateTemp(\"\", \"webserver-kubeconfig-*\")", + "\tif err != nil {", + "\t\thttp.Error(w, \"Failed to create temp file to store the kubeconfig content.\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "", + "\tdefer func() {", + "\t\tlog.Info(\"Removing temporary kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\terr = os.Remove(kubeconfigTempFile.Name())", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to remove temp kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\t}", + "\t}()", + "", + "\t_, err = io.Copy(kubeconfigTempFile, file)", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to copy file\", http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t_ = kubeconfigTempFile.Close()", + "", + "\tlog.Info(\"Web Server kubeconfig file : %v (copied into %v)\", fileHeader.Filename, kubeconfigTempFile.Name())", + "\tlog.Info(\"Web Server Labels filter : %v\", flattenedOptions)", + "", + "\ttnfConfig, err := os.ReadFile(\"config/certsuite_config.yml\")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error reading YAML file: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t}", + "", + "\tnewData := updateTnf(tnfConfig, \u0026data)", + "", + "\t// Write the modified YAML data back to the file", + "\tvar filePerm fs.FileMode = 0o644 // owner can read/write, group and others can only read", + "\terr = os.WriteFile(\"config/certsuite_config.yml\", newData, filePerm)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing YAML file: %v\", err)", + "\t}", + "\tlabelsFilter := strings.Join(flattenedOptions, \",\")", + "", + "\t_ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name())", + "\tcertsuite.LoadChecksDB(labelsFilter)", + "", + "\toutputFolder := r.Context().Value(outputFolderCtxKey).(string)", + "", + "\tif err := checksdb.InitLabelsExprEvaluator(labelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(outputFolder, \"debug\"); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tlog.Info(\"Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s\", labelsFilter, outputFolder)", + "\terr = certsuite.Run(labelsFilter, outputFolder)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to run CNF Cert Suite: %v\", err)", + "\t}", + "", + "\t// Return the result as JSON", + "\tresponse := struct {", + "\t\tMessage string `json:\"Message\"`", + "\t}{", + "\t\tMessage: fmt.Sprintf(\"Succeeded to run %s\", strings.Join(flattenedOptions, \" \")),", + "\t}", + "\t// Serialize the response data to JSON", + "\tjsonResponse, err := json.Marshal(response)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to marshal jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t// Set the Content-Type header to specify that the response is JSON", + "\tw.Header().Set(\"Content-Type\", \"application/json\")", + "\t// Write the JSON response to the client", + "\tlog.Info(\"Sending web response: %v\", response)", + "\t_, err = w.Write(jsonResponse)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to write jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "Shutdown", + "qualifiedName": "Shutdown", + "exported": true, + "signature": "func()()", + "doc": "Shutdown Closes the global log file\n\nThe function attempts to close the globally opened log file used throughout\nthe test suite. If an error occurs during closure, it writes a message to\nstandard error and terminates the program with a non‑zero exit code. No\nvalue is returned.", + "position": "/Users/deliedit/dev/certsuite/pkg/certsuite/certsuite.go:156", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "CloseGlobalLogFile", + "kind": "function", + "source": [ + "func CloseGlobalLogFile() error {", + "\treturn globalLogFile.Close()", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Exit", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run", + "name": "runTestSuite", + "kind": "function", + "source": [ + "func runTestSuite(cmd *cobra.Command, _ []string) error {", + "\terr := initTestParamsFromFlags(cmd)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to initialize the test parameters, err: %v\", err)", + "\t}", + "", + "\ttestParams := configuration.GetTestParameters()", + "\tif testParams.ServerMode {", + "\t\tlog.Info(\"Running Certification Suite in web server mode\")", + "\t\twebserver.StartServer(testParams.OutputDir)", + "\t} else {", + "\t\tcertsuite.Startup()", + "\t\tdefer certsuite.Shutdown()", + "\t\tlog.Info(\"Running Certification Suite in stand-alone mode\")", + "\t\terr := certsuite.Run(testParams.LabelsFilter, testParams.OutputDir)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to run Certification Suite: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Shutdown() {", + "\terr := log.CloseGlobalLogFile()", + "\tif err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not close the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "}" + ] + }, + { + "name": "Startup", + "qualifiedName": "Startup", + "exported": true, + "signature": "func()()", + "doc": "Startup Initializes the certification suite runtime\n\nThe function retrieves global test parameters, prepares a label expression\nevaluator for filtering tests, creates or replaces the log file in the output\ndirectory, and warns if no labels are provided. It then loads Kubernetes\nclient configurations, initializes the checks database according to the label\nfilter, and outputs version and configuration information to both the console\nand log. Finally, it displays a banner before the suite begins execution.", + "position": "/Users/deliedit/dev/certsuite/pkg/certsuite/certsuite.go:112", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "InitLabelsExprEvaluator", + "kind": "function", + "source": [ + "func InitLabelsExprEvaluator(labelsFilter string) error {", + "\t// Expand the abstract \"all\" label into actual existing labels", + "\tif labelsFilter == \"all\" {", + "\t\tallTags := []string{identifiers.TagCommon, identifiers.TagExtended,", + "\t\t\tidentifiers.TagFarEdge, identifiers.TagTelco}", + "\t\tlabelsFilter = strings.Join(allTags, \",\")", + "\t}", + "", + "\teval, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not create a label evaluator, err: %v\", err)", + "\t}", + "", + "\tlabelsExprEvaluator = eval", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Exit", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "CreateGlobalLogFile", + "kind": "function", + "source": [ + "func CreateGlobalLogFile(outputDir, logLevel string) error {", + "\tlogFilePath := outputDir + \"/\" + LogFileName", + "\terr := os.Remove(logFilePath)", + "\tif err != nil \u0026\u0026 !os.IsNotExist(err) {", + "\t\treturn fmt.Errorf(\"could not delete old log file, err: %v\", err)", + "\t}", + "", + "\tlogFile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE, LogFilePermissions)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not open a new log file, err: %v\", err)", + "\t}", + "", + "\tSetupLogger(logFile, logLevel)", + "\tglobalLogFile = logFile", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Exit", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "getK8sClientsConfigFileNames", + "kind": "function", + "source": [ + "func getK8sClientsConfigFileNames() []string {", + "\tparams := configuration.GetTestParameters()", + "\tfileNames := []string{}", + "\tif params.Kubeconfig != \"\" {", + "\t\t// Add the kubeconfig path", + "\t\tfileNames = append(fileNames, params.Kubeconfig)", + "\t}", + "\thomeDir := os.Getenv(\"HOME\")", + "\tif homeDir != \"\" {", + "\t\tkubeConfigFilePath := filepath.Join(homeDir, \".kube\", \"config\")", + "\t\t// Check if the kubeconfig path exists", + "\t\tif _, err := os.Stat(kubeConfigFilePath); err == nil {", + "\t\t\tlog.Info(\"kubeconfig path %s is present\", kubeConfigFilePath)", + "\t\t\t// Only add the kubeconfig to the list of paths if it exists, since it is not added by the user", + "\t\t\tfileNames = append(fileNames, kubeConfigFilePath)", + "\t\t} else {", + "\t\t\tlog.Info(\"kubeconfig path %s is not present\", kubeConfigFilePath)", + "\t\t}", + "\t}", + "", + "\treturn fileNames", + "}" + ] + }, + { + "name": "LoadChecksDB", + "kind": "function", + "source": [ + "func LoadChecksDB(labelsExpr string) {", + "\tLoadInternalChecksDB()", + "", + "\tif preflight.ShouldRun(labelsExpr) {", + "\t\tpreflight.LoadChecks()", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions", + "name": "GitVersion", + "kind": "function", + "source": [ + "func GitVersion() string {", + "\tif GitRelease == \"\" {", + "\t\tGitDisplayRelease = \"Unreleased build post \" + GitPreviousRelease", + "\t} else {", + "\t\tGitDisplayRelease = GitRelease", + "\t}", + "", + "\treturn GitDisplayRelease + \" (\" + GitCommit + \")\"", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "ToUpper", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintBanner", + "kind": "function", + "source": [ + "func PrintBanner() {", + "\tfmt.Print(banner)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions", + "name": "GitVersion", + "kind": "function", + "source": [ + "func GitVersion() string {", + "\tif GitRelease == \"\" {", + "\t\tGitDisplayRelease = \"Unreleased build post \" + GitPreviousRelease", + "\t} else {", + "\t\tGitDisplayRelease = GitRelease", + "\t}", + "", + "\treturn GitDisplayRelease + \" (\" + GitCommit + \")\"", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run", + "name": "runTestSuite", + "kind": "function", + "source": [ + "func runTestSuite(cmd *cobra.Command, _ []string) error {", + "\terr := initTestParamsFromFlags(cmd)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to initialize the test parameters, err: %v\", err)", + "\t}", + "", + "\ttestParams := configuration.GetTestParameters()", + "\tif testParams.ServerMode {", + "\t\tlog.Info(\"Running Certification Suite in web server mode\")", + "\t\twebserver.StartServer(testParams.OutputDir)", + "\t} else {", + "\t\tcertsuite.Startup()", + "\t\tdefer certsuite.Shutdown()", + "\t\tlog.Info(\"Running Certification Suite in stand-alone mode\")", + "\t\terr := certsuite.Run(testParams.LabelsFilter, testParams.OutputDir)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to run Certification Suite: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Startup() {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Create an evaluator to filter test cases with labels", + "\tif err := checksdb.InitLabelsExprEvaluator(testParams.LabelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(testParams.OutputDir, testParams.LogLevel); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\t// Diagnostic functions will run when no labels are provided.", + "\tif testParams.LabelsFilter == noLabelsFilterExpr {", + "\t\tlog.Warn(\"The Best Practices Test Suite will run in diagnostic mode so no test case will be launched\")", + "\t}", + "", + "\t// Set clientsholder singleton with the filenames from the env vars.", + "\t_ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...)", + "\tLoadChecksDB(testParams.LabelsFilter)", + "", + "\tlog.Info(\"Certsuite Version: %v\", versions.GitVersion())", + "\tlog.Info(\"Claim Format Version: %s\", versions.ClaimFormatVersion)", + "\tlog.Info(\"Labels filter: %v\", testParams.LabelsFilter)", + "\tlog.Info(\"Log level: %s\", strings.ToUpper(testParams.LogLevel))", + "", + "\tcli.PrintBanner()", + "", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "\tfmt.Printf(\"Checks filter: %s\\n\", testParams.LabelsFilter)", + "\tfmt.Printf(\"Output folder: %s\\n\", testParams.OutputDir)", + "\tfmt.Printf(\"Log file: %s (level=%s)\\n\", log.LogFileName, testParams.LogLevel)", + "\tfmt.Printf(\"\\n\")", + "}" + ] + }, + { + "name": "getK8sClientsConfigFileNames", + "qualifiedName": "getK8sClientsConfigFileNames", + "exported": false, + "signature": "func()([]string)", + "doc": "getK8sClientsConfigFileNames Collects Kubernetes configuration file paths\n\nThe function retrieves test parameters to determine if a custom kubeconfig\npath is specified, then adds it to the list of filenames. It also checks for\na default config in the user's home directory under .kube/config, appending\nit only if the file exists. The resulting slice contains zero or more valid\nconfiguration paths used elsewhere to initialize client holders.", + "position": "/Users/deliedit/dev/certsuite/pkg/certsuite/certsuite.go:81", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Getenv", + "kind": "function" + }, + { + "pkgPath": "path/filepath", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Stat", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Startup", + "kind": "function", + "source": [ + "func Startup() {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Create an evaluator to filter test cases with labels", + "\tif err := checksdb.InitLabelsExprEvaluator(testParams.LabelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(testParams.OutputDir, testParams.LogLevel); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\t// Diagnostic functions will run when no labels are provided.", + "\tif testParams.LabelsFilter == noLabelsFilterExpr {", + "\t\tlog.Warn(\"The Best Practices Test Suite will run in diagnostic mode so no test case will be launched\")", + "\t}", + "", + "\t// Set clientsholder singleton with the filenames from the env vars.", + "\t_ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...)", + "\tLoadChecksDB(testParams.LabelsFilter)", + "", + "\tlog.Info(\"Certsuite Version: %v\", versions.GitVersion())", + "\tlog.Info(\"Claim Format Version: %s\", versions.ClaimFormatVersion)", + "\tlog.Info(\"Labels filter: %v\", testParams.LabelsFilter)", + "\tlog.Info(\"Log level: %s\", strings.ToUpper(testParams.LogLevel))", + "", + "\tcli.PrintBanner()", + "", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "\tfmt.Printf(\"Checks filter: %s\\n\", testParams.LabelsFilter)", + "\tfmt.Printf(\"Output folder: %s\\n\", testParams.OutputDir)", + "\tfmt.Printf(\"Log file: %s (level=%s)\\n\", log.LogFileName, testParams.LogLevel)", + "\tfmt.Printf(\"\\n\")", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getK8sClientsConfigFileNames() []string {", + "\tparams := configuration.GetTestParameters()", + "\tfileNames := []string{}", + "\tif params.Kubeconfig != \"\" {", + "\t\t// Add the kubeconfig path", + "\t\tfileNames = append(fileNames, params.Kubeconfig)", + "\t}", + "\thomeDir := os.Getenv(\"HOME\")", + "\tif homeDir != \"\" {", + "\t\tkubeConfigFilePath := filepath.Join(homeDir, \".kube\", \"config\")", + "\t\t// Check if the kubeconfig path exists", + "\t\tif _, err := os.Stat(kubeConfigFilePath); err == nil {", + "\t\t\tlog.Info(\"kubeconfig path %s is present\", kubeConfigFilePath)", + "\t\t\t// Only add the kubeconfig to the list of paths if it exists, since it is not added by the user", + "\t\t\tfileNames = append(fileNames, kubeConfigFilePath)", + "\t\t} else {", + "\t\t\tlog.Info(\"kubeconfig path %s is not present\", kubeConfigFilePath)", + "\t\t}", + "\t}", + "", + "\treturn fileNames", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "claimFileName", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/certsuite/certsuite.go:68" + }, + { + "name": "collectorAppURL", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/certsuite/certsuite.go:69" + }, + { + "name": "junitXMLOutputFileName", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/certsuite/certsuite.go:67" + }, + { + "name": "noLabelsFilterExpr", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/certsuite/certsuite.go:71" + }, + { + "name": "timeoutDefaultvalue", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/certsuite/certsuite.go:70" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "checksdb", + "files": 3, + "imports": [ + "errors", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite-claim/pkg/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/labels", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "os", + "os/signal", + "runtime/debug", + "strings", + "sync", + "syscall", + "time", + "unicode/utf8" + ], + "structs": [ + { + "name": "Check", + "exported": true, + "doc": "Check Represents an individual compliance check\n\nThis type holds configuration, state, and results for a single test. It\ntracks identifiers, labels, timing, timeouts, and any error that occurs\nduring execution. The struct also contains optional functions to run before,\nafter, or as the main check logic, along with mechanisms for skipping,\naborting, and logging output.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:50", + "fields": { + "AfterCheckFn": "func(check *Check) error", + "BeforeCheckFn": "func(check *Check) error", + "CapturedOutput": "string", + "CheckFn": "func(check *Check) error", + "EndTime": "time.Time", + "Error": "error", + "ID": "string", + "Labels": "[]string", + "Result": "CheckResult", + "SkipCheckFns": "[]func() (skip bool, reason string)", + "SkipMode": "skipMode", + "StartTime": "time.Time", + "Timeout": "time.Duration", + "abortChan": "chan string", + "details": "string", + "logArchive": "*strings.Builder", + "logger": "*log.Logger", + "mutex": "sync.Mutex", + "skipReason": "string" + }, + "methodNames": [ + "Abort", + "GetLogger", + "GetLogs", + "LogDebug", + "LogError", + "LogFatal", + "LogInfo", + "LogWarn", + "Run", + "SetAbortChan", + "SetResult", + "SetResultAborted", + "SetResultError", + "SetResultSkipped", + "WithAfterCheckFn", + "WithBeforeCheckFn", + "WithCheckFn", + "WithSkipCheckFn", + "WithSkipModeAll", + "WithSkipModeAny", + "WithTimeout" + ], + "source": [ + "type Check struct {", + "\tmutex sync.Mutex", + "\tID string", + "\tLabels []string", + "", + "\tBeforeCheckFn, AfterCheckFn func(check *Check) error", + "\tCheckFn func(check *Check) error", + "", + "\tSkipCheckFns []func() (skip bool, reason string)", + "\tSkipMode skipMode", + "", + "\tResult CheckResult", + "\tCapturedOutput string", + "\tdetails string", + "\tskipReason string", + "", + "\tlogger *log.Logger", + "\tlogArchive *strings.Builder", + "", + "\tStartTime, EndTime time.Time", + "\tTimeout time.Duration", + "\tError error", + "\tabortChan chan string", + "}" + ] + }, + { + "name": "ChecksGroup", + "exported": true, + "doc": "ChecksGroup Holds a collection of checks and orchestrates their execution\n\nThis structure stores the group's name, the list of checks to run, and\noptional callback functions for before/after all and before/after each check.\nIt tracks which check is currently executing to handle aborts or failures\ncorrectly. The group provides methods to add checks, run them with support\nfor labeling, and record results.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:24", + "fields": { + "afterAllFn": "func(checks []*Check) error", + "afterEachFn": "func(check *Check) error", + "beforeAllFn": "func(checks []*Check) error", + "beforeEachFn": "func(check *Check) error", + "checks": "[]*Check", + "currentRunningCheckIdx": "int", + "name": "string" + }, + "methodNames": [ + "Add", + "OnAbort", + "RecordChecksResults", + "RunChecks", + "WithAfterAllFn", + "WithAfterEachFn", + "WithBeforeAllFn", + "WithBeforeEachFn" + ], + "source": [ + "type ChecksGroup struct {", + "\tname string", + "\tchecks []*Check", + "", + "\tbeforeAllFn, afterAllFn func(checks []*Check) error", + "", + "\tbeforeEachFn, afterEachFn func(check *Check) error", + "", + "\tcurrentRunningCheckIdx int", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "Abort", + "qualifiedName": "Check.Abort", + "exported": true, + "receiver": "Check", + "signature": "func(string)()", + "doc": "Check.Abort Aborts a check immediately with an error message\n\nThe method locks the check’s mutex, constructs a descriptive abort message\nusing the check ID and the supplied reason, sends this message on the abort\nchannel, then panics to terminate execution. It is used to halt a check that\nencounters a non‑graceful failure condition.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:101", + "calls": [ + { + "name": "Lock", + "kind": "function" + }, + { + "name": "Unlock", + "kind": "function" + }, + { + "name": "panic", + "kind": "function" + }, + { + "name": "AbortPanicMsg", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) Abort(reason string) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tabortMsg := check.ID + \" issued non-graceful abort: \" + reason", + "", + "\tcheck.abortChan \u003c- abortMsg", + "\tpanic(AbortPanicMsg(abortMsg))", + "}" + ] + }, + { + "name": "GetLogger", + "qualifiedName": "Check.GetLogger", + "exported": true, + "receiver": "Check", + "signature": "func()(*log.Logger)", + "doc": "Check.GetLogger Provides access to the check's logger\n\nThe method returns the logger stored in the Check instance, allowing callers\nto log messages related to that specific check. It does not modify the state\nand simply exposes the internal logger pointer.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:189", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) GetLogger() *log.Logger {", + "\treturn check.logger", + "}" + ] + }, + { + "name": "GetLogs", + "qualifiedName": "Check.GetLogs", + "exported": true, + "receiver": "Check", + "signature": "func()(string)", + "doc": "Check.GetLogs Retrieves stored log output\n\nThis method returns the accumulated log data for a check as a single string.\nThe logs are gathered during the check's execution and stored in an internal\nbuffer, which this function simply exposes to callers such as reporting or\nresult recording functions.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:180", + "calls": [ + { + "name": "CheckResult.String", + "kind": "function", + "source": [ + "func (cr CheckResult) String() string {", + "\treturn string(cr)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "printFailedChecksLog", + "kind": "function", + "source": [ + "func printFailedChecksLog() {", + "\tfor _, group := range dbByGroup {", + "\t\tfor _, check := range group.checks {", + "\t\t\tif check.Result != CheckResultFailed {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tlogHeader := fmt.Sprintf(\"| \"+cli.Cyan+\"LOG (%s)\"+cli.Reset+\" |\", check.ID)", + "\t\t\tnbSymbols := utf8.RuneCountInString(logHeader) - nbColorSymbols", + "\t\t\tfmt.Println(strings.Repeat(\"-\", nbSymbols))", + "\t\t\tfmt.Println(logHeader)", + "\t\t\tfmt.Println(strings.Repeat(\"-\", nbSymbols))", + "\t\t\tcheckLogs := check.GetLogs()", + "\t\t\tif checkLogs == \"\" {", + "\t\t\t\tfmt.Println(\"Empty log output\")", + "\t\t\t} else {", + "\t\t\t\tfmt.Println(checkLogs)", + "\t\t\t}", + "\t\t}", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "recordCheckResult", + "kind": "function", + "source": [ + "func recordCheckResult(check *Check) {", + "\tclaimID, ok := identifiers.TestIDToClaimID[check.ID]", + "\tif !ok {", + "\t\tcheck.LogDebug(\"TestID %s has no corresponding Claim ID - skipping result recording\", check.ID)", + "\t\treturn", + "\t}", + "", + "\tcheck.LogInfo(\"Recording result %q, claimID: %+v\", strings.ToUpper(check.Result.String()), claimID)", + "\tresultsDB[check.ID] = claim.Result{", + "\t\tTestID: \u0026claimID,", + "\t\tState: check.Result.String(),", + "\t\tStartTime: check.StartTime.String(),", + "\t\tEndTime: check.EndTime.String(),", + "\t\tDuration: int(check.EndTime.Sub(check.StartTime).Seconds()),", + "\t\tSkipReason: check.skipReason,", + "\t\tCapturedTestOutput: check.GetLogs(),", + "\t\tCheckDetails: check.details,", + "", + "\t\tCategoryClassification: \u0026claim.CategoryClassification{", + "\t\t\tExtended: identifiers.Catalog[claimID].CategoryClassification[identifiers.Extended],", + "\t\t\tFarEdge: identifiers.Catalog[claimID].CategoryClassification[identifiers.FarEdge],", + "\t\t\tNonTelco: identifiers.Catalog[claimID].CategoryClassification[identifiers.NonTelco],", + "\t\t\tTelco: identifiers.Catalog[claimID].CategoryClassification[identifiers.Telco]},", + "\t\tCatalogInfo: \u0026claim.CatalogInfo{", + "\t\t\tDescription: identifiers.Catalog[claimID].Description,", + "\t\t\tRemediation: identifiers.Catalog[claimID].Remediation,", + "\t\t\tBestPracticeReference: identifiers.Catalog[claimID].BestPracticeReference,", + "\t\t\tExceptionProcess: identifiers.Catalog[claimID].ExceptionProcess,", + "\t\t},", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) GetLogs() string {", + "\treturn check.logArchive.String()", + "}" + ] + }, + { + "name": "LogDebug", + "qualifiedName": "Check.LogDebug", + "exported": true, + "receiver": "Check", + "signature": "func(string, ...any)()", + "doc": "Check.LogDebug logs a debug message with optional formatting\n\nThis method sends a formatted string to the check's logger at the debug\nlevel, allowing additional arguments for interpolation. It forwards the call\nto an internal logging helper that determines if the debug level is enabled\nbefore emitting the record. No value is returned.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:128", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "recordCheckResult", + "kind": "function", + "source": [ + "func recordCheckResult(check *Check) {", + "\tclaimID, ok := identifiers.TestIDToClaimID[check.ID]", + "\tif !ok {", + "\t\tcheck.LogDebug(\"TestID %s has no corresponding Claim ID - skipping result recording\", check.ID)", + "\t\treturn", + "\t}", + "", + "\tcheck.LogInfo(\"Recording result %q, claimID: %+v\", strings.ToUpper(check.Result.String()), claimID)", + "\tresultsDB[check.ID] = claim.Result{", + "\t\tTestID: \u0026claimID,", + "\t\tState: check.Result.String(),", + "\t\tStartTime: check.StartTime.String(),", + "\t\tEndTime: check.EndTime.String(),", + "\t\tDuration: int(check.EndTime.Sub(check.StartTime).Seconds()),", + "\t\tSkipReason: check.skipReason,", + "\t\tCapturedTestOutput: check.GetLogs(),", + "\t\tCheckDetails: check.details,", + "", + "\t\tCategoryClassification: \u0026claim.CategoryClassification{", + "\t\t\tExtended: identifiers.Catalog[claimID].CategoryClassification[identifiers.Extended],", + "\t\t\tFarEdge: identifiers.Catalog[claimID].CategoryClassification[identifiers.FarEdge],", + "\t\t\tNonTelco: identifiers.Catalog[claimID].CategoryClassification[identifiers.NonTelco],", + "\t\t\tTelco: identifiers.Catalog[claimID].CategoryClassification[identifiers.Telco]},", + "\t\tCatalogInfo: \u0026claim.CatalogInfo{", + "\t\t\tDescription: identifiers.Catalog[claimID].Description,", + "\t\t\tRemediation: identifiers.Catalog[claimID].Remediation,", + "\t\t\tBestPracticeReference: identifiers.Catalog[claimID].BestPracticeReference,", + "\t\t\tExceptionProcess: identifiers.Catalog[claimID].ExceptionProcess,", + "\t\t},", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) LogDebug(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "LogError", + "qualifiedName": "Check.LogError", + "exported": true, + "receiver": "Check", + "signature": "func(string, ...any)()", + "doc": "Check.LogError logs an error message for the check\n\nThis method sends a formatted string and optional arguments to the logging\nsystem at the error level, associating the log with the specific check\ninstance. It uses the check's logger field or falls back to a default if nil.\nThe function does not return any value.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:158", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.SetResult", + "kind": "function", + "source": [ + "func (check *Check) SetResult(compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tif check.Result == CheckResultAborted {", + "\t\treturn", + "\t}", + "", + "\tresultObjectsStr, err := testhelper.ResultObjectsToString(compliantObjects, nonCompliantObjects)", + "\tif err != nil {", + "\t\tcheck.LogError(\"Failed to get result objects string for check %s: %v\", check.ID, err)", + "\t}", + "", + "\tcheck.details = resultObjectsStr", + "", + "\t// If an error/panic happened before, do not change the result.", + "\tif check.Result == CheckResultError {", + "\t\treturn", + "\t}", + "", + "\tif len(nonCompliantObjects) \u003e 0 {", + "\t\tcheck.Result = CheckResultFailed", + "\t\tcheck.skipReason = \"\"", + "\t} else if len(compliantObjects) == 0 {", + "\t\t// Mark this check as skipped.", + "\t\tcheck.LogWarn(\"Check %s marked as skipped as both compliant and non-compliant objects lists are empty.\", check.ID)", + "\t\tcheck.skipReason = \"compliant and non-compliant objects lists are empty\"", + "\t\tcheck.Result = CheckResultSkipped", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runCheck", + "kind": "function", + "source": [ + "func runCheck(check *Check, group *ChecksGroup, remainingChecks []*Check) (err error) {", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\t// Don't do anything in case the check was manually aborted by check.Abort().", + "\t\t\tif msg, ok := r.(AbortPanicMsg); ok {", + "\t\t\t\tlog.Warn(\"Check was manually aborted, msg: %v\", msg)", + "\t\t\t\terr = fmt.Errorf(\"%v\", msg)", + "\t\t\t\treturn", + "\t\t\t}", + "", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "", + "\t\t\tcheck.LogError(\"Panic while running check %s function:\\n%v\", check.ID, stackTrace)", + "\t\t\terr = onFailure(fmt.Sprintf(\"check %s function panic\", check.ID), stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := check.Run(); err != nil {", + "\t\tcheck.LogError(\"Unexpected error while running check %s function: %v\", check.ID, err.Error())", + "\t\treturn onFailure(fmt.Sprintf(\"check %s function unexpected error\", check.ID), err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "shouldSkipCheck", + "kind": "function", + "source": [ + "func shouldSkipCheck(check *Check) (skip bool, reasons []string) {", + "\tif len(check.SkipCheckFns) == 0 {", + "\t\treturn false, []string{}", + "\t}", + "", + "\t// Short-circuit", + "\tif len(check.SkipCheckFns) == 0 {", + "\t\treturn false, []string{}", + "\t}", + "", + "\t// Save the skipFn index in case it panics so it can be used in the log trace.", + "\tcurrentSkipFnIndex := 0", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tcheck.LogError(\"Skip check function (idx=%d) panic'ed: %s\", currentSkipFnIndex, stackTrace)", + "\t\t\tskip = true", + "\t\t\treasons = []string{fmt.Sprintf(\"skipCheckFn (idx=%d) panic:\\n%s\", currentSkipFnIndex, stackTrace)}", + "\t\t}", + "\t}()", + "", + "\t// Call all the skip functions first.", + "\tfor _, skipFn := range check.SkipCheckFns {", + "\t\tif skip, reason := skipFn(); skip {", + "\t\t\treasons = append(reasons, reason)", + "\t\t}", + "\t\tcurrentSkipFnIndex++", + "\t}", + "", + "\t// If none of the skipFn returned true, exit now.", + "\tif len(reasons) == 0 {", + "\t\treturn false, []string{}", + "\t}", + "", + "\t// Now we need to check the skipMode for this check.", + "\tswitch check.SkipMode {", + "\tcase SkipModeAny:", + "\t\treturn true, reasons", + "\tcase SkipModeAll:", + "\t\t// Only skip if all the skipFn returned true.", + "\t\tif len(reasons) == len(check.SkipCheckFns) {", + "\t\t\treturn true, reasons", + "\t\t}", + "\t\treturn false, []string{}", + "\t}", + "", + "\treturn false, []string{}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) LogError(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelError, msg, args...)", + "}" + ] + }, + { + "name": "LogFatal", + "qualifiedName": "Check.LogFatal", + "exported": true, + "receiver": "Check", + "signature": "func(string, ...any)()", + "doc": "Check.LogFatal Logs a fatal message and terminates the program\n\nThe method records a fatal log entry using the provided logger, prints the\nmessage to standard error prefixed with \"FATAL:\", and then exits the process\nwith status code 1. It accepts a format string and optional arguments, which\nare passed to both the logger and the formatted output.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:168", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Exit", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) LogFatal(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "LogInfo", + "qualifiedName": "Check.LogInfo", + "exported": true, + "receiver": "Check", + "signature": "func(string, ...any)()", + "doc": "Check.LogInfo Logs an informational message for a check\n\nThis method forwards the supplied format string and arguments to a logging\nhelper, tagging the output with the Info level. It uses the check's internal\nlogger if available or falls back to a default logger. The function does not\nreturn any value; it simply emits the formatted log entry.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:138", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.Run", + "kind": "function", + "source": [ + "func (check *Check) Run() error {", + "\tif check == nil {", + "\t\treturn fmt.Errorf(\"check is a nil pointer\")", + "\t}", + "", + "\tif check.Error != nil {", + "\t\treturn fmt.Errorf(\"unable to run due to a previously existing error: %v\", check.Error)", + "\t}", + "", + "\tcli.PrintCheckRunning(check.ID)", + "", + "\tcheck.StartTime = time.Now()", + "\tdefer func() {", + "\t\tcheck.EndTime = time.Now()", + "\t}()", + "", + "\tcheck.LogInfo(\"Running check (labels: %v)\", check.Labels)", + "\tif check.BeforeCheckFn != nil {", + "\t\tif err := check.BeforeCheckFn(check); err != nil {", + "\t\t\treturn fmt.Errorf(\"check %s failed in before check function: %v\", check.ID, err)", + "\t\t}", + "\t}", + "", + "\tif err := check.CheckFn(check); err != nil {", + "\t\treturn fmt.Errorf(\"check %s failed in check function: %v\", check.ID, err)", + "\t}", + "", + "\tif check.AfterCheckFn != nil {", + "\t\tif err := check.AfterCheckFn(check); err != nil {", + "\t\t\treturn fmt.Errorf(\"check %s failed in after check function: %v\", check.ID, err)", + "\t\t}", + "\t}", + "", + "\tprintCheckResult(check)", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "recordCheckResult", + "kind": "function", + "source": [ + "func recordCheckResult(check *Check) {", + "\tclaimID, ok := identifiers.TestIDToClaimID[check.ID]", + "\tif !ok {", + "\t\tcheck.LogDebug(\"TestID %s has no corresponding Claim ID - skipping result recording\", check.ID)", + "\t\treturn", + "\t}", + "", + "\tcheck.LogInfo(\"Recording result %q, claimID: %+v\", strings.ToUpper(check.Result.String()), claimID)", + "\tresultsDB[check.ID] = claim.Result{", + "\t\tTestID: \u0026claimID,", + "\t\tState: check.Result.String(),", + "\t\tStartTime: check.StartTime.String(),", + "\t\tEndTime: check.EndTime.String(),", + "\t\tDuration: int(check.EndTime.Sub(check.StartTime).Seconds()),", + "\t\tSkipReason: check.skipReason,", + "\t\tCapturedTestOutput: check.GetLogs(),", + "\t\tCheckDetails: check.details,", + "", + "\t\tCategoryClassification: \u0026claim.CategoryClassification{", + "\t\t\tExtended: identifiers.Catalog[claimID].CategoryClassification[identifiers.Extended],", + "\t\t\tFarEdge: identifiers.Catalog[claimID].CategoryClassification[identifiers.FarEdge],", + "\t\t\tNonTelco: identifiers.Catalog[claimID].CategoryClassification[identifiers.NonTelco],", + "\t\t\tTelco: identifiers.Catalog[claimID].CategoryClassification[identifiers.Telco]},", + "\t\tCatalogInfo: \u0026claim.CatalogInfo{", + "\t\t\tDescription: identifiers.Catalog[claimID].Description,", + "\t\t\tRemediation: identifiers.Catalog[claimID].Remediation,", + "\t\t\tBestPracticeReference: identifiers.Catalog[claimID].BestPracticeReference,", + "\t\t\tExceptionProcess: identifiers.Catalog[claimID].ExceptionProcess,", + "\t\t},", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "skipCheck", + "kind": "function", + "source": [ + "func skipCheck(check *Check, reason string) {", + "\tcheck.LogInfo(\"Skipping check %s, reason: %s\", check.ID, reason)", + "\tcheck.SetResultSkipped(reason)", + "\tprintCheckResult(check)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) LogInfo(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "LogWarn", + "qualifiedName": "Check.LogWarn", + "exported": true, + "receiver": "Check", + "signature": "func(string, ...any)()", + "doc": "Check.LogWarn logs a warning message for the check\n\nThe method formats a message with optional arguments and forwards it to the\ninternal logger at the warning level. It does not alter any state of the\nCheck instance, only records diagnostic information that can be inspected\nlater.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:148", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logf", + "kind": "function", + "source": [ + "func Logf(logger *Logger, level, format string, args ...any) {", + "\tif logger == nil {", + "\t\tlogger = \u0026Logger{", + "\t\t\tl: slog.Default(),", + "\t\t}", + "\t}", + "", + "\tlogLevel, err := parseLevel(level)", + "\tif err != nil {", + "\t\tlogger.Fatal(\"Error when parsing log level, err: %v\", err)", + "\t}", + "", + "\tif !logger.l.Enabled(context.TODO(), logLevel) {", + "\t\treturn", + "\t}", + "\tvar pcs [1]uintptr", + "\t// skip [Callers, Log, LogWrapper]", + "\truntime.Callers(3, pcs[:]) //nolint:mnd", + "\tr := slog.NewRecord(time.Now(), logLevel, fmt.Sprintf(format, args...), pcs[0])", + "\t_ = logger.l.Handler().Handle(context.TODO(), r)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.SetResult", + "kind": "function", + "source": [ + "func (check *Check) SetResult(compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tif check.Result == CheckResultAborted {", + "\t\treturn", + "\t}", + "", + "\tresultObjectsStr, err := testhelper.ResultObjectsToString(compliantObjects, nonCompliantObjects)", + "\tif err != nil {", + "\t\tcheck.LogError(\"Failed to get result objects string for check %s: %v\", check.ID, err)", + "\t}", + "", + "\tcheck.details = resultObjectsStr", + "", + "\t// If an error/panic happened before, do not change the result.", + "\tif check.Result == CheckResultError {", + "\t\treturn", + "\t}", + "", + "\tif len(nonCompliantObjects) \u003e 0 {", + "\t\tcheck.Result = CheckResultFailed", + "\t\tcheck.skipReason = \"\"", + "\t} else if len(compliantObjects) == 0 {", + "\t\t// Mark this check as skipped.", + "\t\tcheck.LogWarn(\"Check %s marked as skipped as both compliant and non-compliant objects lists are empty.\", check.ID)", + "\t\tcheck.skipReason = \"compliant and non-compliant objects lists are empty\"", + "\t\tcheck.Result = CheckResultSkipped", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.SetResultError", + "kind": "function", + "source": [ + "func (check *Check) SetResultError(reason string) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tif check.Result == CheckResultAborted {", + "\t\treturn", + "\t}", + "", + "\tif check.Result == CheckResultError {", + "\t\tcheck.LogWarn(\"Check %s result was already marked as error.\", check.ID)", + "\t\treturn", + "\t}", + "\tcheck.Result = CheckResultError", + "\tcheck.skipReason = reason", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) LogWarn(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "Run", + "qualifiedName": "Check.Run", + "exported": true, + "receiver": "Check", + "signature": "func()(error)", + "doc": "Check.Run Runs a check through its pre‑check, main, and post‑check stages\n\nThe method first validates the receiver and any prior errors, then signals\nthat the check is starting and records timestamps. It executes an optional\nbefore function, followed by the core check function, and finally an optional\nafter function, each returning an error if they fail. If all stages succeed,\nit prints the final result based on the check's outcome and returns nil;\notherwise it propagates the encountered error.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:406", + "calls": [ + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckRunning", + "kind": "function", + "source": [ + "func PrintCheckRunning(checkName string) {", + "\tstopChan = make(chan bool)", + "\tcheckLoggerChan = make(chan string)", + "", + "\tline := \"[ \" + CheckResultTagRunning + \" ] \" + checkName", + "\tif !isTTY() {", + "\t\tline += \"\\n\"", + "\t}", + "", + "\tfmt.Print(line)", + "", + "\tgo updateRunningCheckLine(checkName, stopChan)", + "}" + ] + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "name": "Check.LogInfo", + "kind": "function", + "source": [ + "func (check *Check) LogInfo(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "BeforeCheckFn", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "CheckFn", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "AfterCheckFn", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "printCheckResult", + "kind": "function", + "source": [ + "func printCheckResult(check *Check) {", + "\tswitch check.Result {", + "\tcase CheckResultPassed:", + "\t\tcli.PrintCheckPassed(check.ID)", + "\tcase CheckResultFailed:", + "\t\tcli.PrintCheckFailed(check.ID)", + "\tcase CheckResultSkipped:", + "\t\tcli.PrintCheckSkipped(check.ID, check.skipReason)", + "\tcase CheckResultAborted:", + "\t\tcli.PrintCheckAborted(check.ID, check.skipReason)", + "\tcase CheckResultError:", + "\t\tcli.PrintCheckErrored(check.ID)", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runCheck", + "kind": "function", + "source": [ + "func runCheck(check *Check, group *ChecksGroup, remainingChecks []*Check) (err error) {", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\t// Don't do anything in case the check was manually aborted by check.Abort().", + "\t\t\tif msg, ok := r.(AbortPanicMsg); ok {", + "\t\t\t\tlog.Warn(\"Check was manually aborted, msg: %v\", msg)", + "\t\t\t\terr = fmt.Errorf(\"%v\", msg)", + "\t\t\t\treturn", + "\t\t\t}", + "", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "", + "\t\t\tcheck.LogError(\"Panic while running check %s function:\\n%v\", check.ID, stackTrace)", + "\t\t\terr = onFailure(fmt.Sprintf(\"check %s function panic\", check.ID), stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := check.Run(); err != nil {", + "\t\tcheck.LogError(\"Unexpected error while running check %s function: %v\", check.ID, err.Error())", + "\t\treturn onFailure(fmt.Sprintf(\"check %s function unexpected error\", check.ID), err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) Run() error {", + "\tif check == nil {", + "\t\treturn fmt.Errorf(\"check is a nil pointer\")", + "\t}", + "", + "\tif check.Error != nil {", + "\t\treturn fmt.Errorf(\"unable to run due to a previously existing error: %v\", check.Error)", + "\t}", + "", + "\tcli.PrintCheckRunning(check.ID)", + "", + "\tcheck.StartTime = time.Now()", + "\tdefer func() {", + "\t\tcheck.EndTime = time.Now()", + "\t}()", + "", + "\tcheck.LogInfo(\"Running check (labels: %v)\", check.Labels)", + "\tif check.BeforeCheckFn != nil {", + "\t\tif err := check.BeforeCheckFn(check); err != nil {", + "\t\t\treturn fmt.Errorf(\"check %s failed in before check function: %v\", check.ID, err)", + "\t\t}", + "\t}", + "", + "\tif err := check.CheckFn(check); err != nil {", + "\t\treturn fmt.Errorf(\"check %s failed in check function: %v\", check.ID, err)", + "\t}", + "", + "\tif check.AfterCheckFn != nil {", + "\t\tif err := check.AfterCheckFn(check); err != nil {", + "\t\t\treturn fmt.Errorf(\"check %s failed in after check function: %v\", check.ID, err)", + "\t\t}", + "\t}", + "", + "\tprintCheckResult(check)", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "SetAbortChan", + "qualifiedName": "Check.SetAbortChan", + "exported": true, + "receiver": "Check", + "signature": "func(chan string)()", + "doc": "Check.SetAbortChan Assigns a channel to signal check abortion\n\nThis method records the supplied channel into the check instance so that the\ncheck can listen for abort signals during execution. It performs a simple\nfield assignment and does not return any value. The stored channel is later\nused by other parts of the framework to terminate the check prematurely when\nneeded.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:118", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.RunChecks", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RunChecks(stopChan \u003c-chan bool, abortChan chan string) (errs []error, failedChecks int) {", + "\tlog.Info(\"Running group %q checks.\", group.name)", + "\tfmt.Printf(\"Running suite %s\\n\", strings.ToUpper(group.name))", + "", + "\t// Get checks to run based on the label expr.", + "\tchecks := []*Check{}", + "\tfor _, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tskipCheck(check, \"no matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tchecks = append(checks, check)", + "\t}", + "", + "\tif len(checks) == 0 {", + "\t\treturn nil, 0", + "\t}", + "", + "\t// Run afterAllFn always, no matter previous panics/crashes.", + "\tdefer func() {", + "\t\tif err := runAfterAllFn(group, checks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "\t}()", + "", + "\tif err := runBeforeAllFn(group, checks); err != nil {", + "\t\terrs = append(errs, err)", + "\t\treturn errs, 0", + "\t}", + "", + "\tlog.Info(\"Checks to run: %d (group's total=%d)\", len(checks), len(group.checks))", + "\tgroup.currentRunningCheckIdx = 0", + "\tfor i, check := range checks {", + "\t\t// Fast stop in case the stop (abort/timeout) signal received.", + "\t\tselect {", + "\t\tcase \u003c-stopChan:", + "\t\t\treturn nil, 0", + "\t\tdefault:", + "\t\t}", + "", + "\t\t// Create a remainingChecks list excluding the current check.", + "\t\tremainingChecks := []*Check{}", + "\t\tif i+1 \u003c len(checks) {", + "\t\t\tremainingChecks = checks[i+1:]", + "\t\t}", + "", + "\t\tif err := runBeforeEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = []error{err}", + "\t\t}", + "", + "\t\tif len(errs) == 0 {", + "\t\t\t// Should we skip this check?", + "\t\t\tskip, reasons := shouldSkipCheck(check)", + "\t\t\tif skip {", + "\t\t\t\tskipCheck(check, strings.Join(reasons, \", \"))", + "\t\t\t} else {", + "\t\t\t\tcheck.SetAbortChan(abortChan) // Set the abort channel for the check.", + "\t\t\t\terr := runCheck(check, group, remainingChecks)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\terrs = append(errs, err)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// afterEach func must run even if the check was skipped or panicked/unexpected error.", + "\t\tif err := runAfterEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "", + "\t\t// Don't run more checks if any of beforeEach, the checkFn or afterEach functions errored/panicked.", + "\t\tif len(errs) \u003e 0 {", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Increment the failed checks counter.", + "\t\tif check.Result.String() == CheckResultFailed {", + "\t\t\tfailedChecks++", + "\t\t}", + "", + "\t\tgroup.currentRunningCheckIdx++", + "\t}", + "", + "\treturn errs, failedChecks", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) SetAbortChan(abortChan chan string) {", + "\tcheck.abortChan = abortChan", + "}" + ] + }, + { + "name": "SetResult", + "qualifiedName": "Check.SetResult", + "exported": true, + "receiver": "Check", + "signature": "func([]*testhelper.ReportObject, []*testhelper.ReportObject)()", + "doc": "Check.SetResult stores compliance results for a check\n\nThis method records the list of compliant and non‑compliant objects for a\ncheck, converting them into a JSON string that is kept in the details field.\nIt locks the check’s mutex to ensure thread safety, skips any changes if\nthe check has already been aborted or errored, and updates the result status\nbased on whether there are failures or no objects at all. Errors during\nserialization are logged as error messages.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:314", + "calls": [ + { + "name": "Lock", + "kind": "function" + }, + { + "name": "Unlock", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "ResultObjectsToString", + "kind": "function", + "source": [ + "func ResultObjectsToString(compliantObject, nonCompliantObject []*ReportObject) (string, error) {", + "\treason := FailureReasonOut{", + "\t\tCompliantObjectsOut: compliantObject,", + "\t\tNonCompliantObjectsOut: nonCompliantObject,", + "\t}", + "", + "\tbytes, err := json.Marshal(reason)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"could not marshall FailureReasonOut object: %v\", err)", + "\t}", + "", + "\treturn string(bytes), nil", + "}" + ] + }, + { + "name": "Check.LogError", + "kind": "function", + "source": [ + "func (check *Check) LogError(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelError, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "Check.LogWarn", + "kind": "function", + "source": [ + "func (check *Check) LogWarn(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelWarn, msg, args...)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) SetResult(compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tif check.Result == CheckResultAborted {", + "\t\treturn", + "\t}", + "", + "\tresultObjectsStr, err := testhelper.ResultObjectsToString(compliantObjects, nonCompliantObjects)", + "\tif err != nil {", + "\t\tcheck.LogError(\"Failed to get result objects string for check %s: %v\", check.ID, err)", + "\t}", + "", + "\tcheck.details = resultObjectsStr", + "", + "\t// If an error/panic happened before, do not change the result.", + "\tif check.Result == CheckResultError {", + "\t\treturn", + "\t}", + "", + "\tif len(nonCompliantObjects) \u003e 0 {", + "\t\tcheck.Result = CheckResultFailed", + "\t\tcheck.skipReason = \"\"", + "\t} else if len(compliantObjects) == 0 {", + "\t\t// Mark this check as skipped.", + "\t\tcheck.LogWarn(\"Check %s marked as skipped as both compliant and non-compliant objects lists are empty.\", check.ID)", + "\t\tcheck.skipReason = \"compliant and non-compliant objects lists are empty\"", + "\t\tcheck.Result = CheckResultSkipped", + "\t}", + "}" + ] + }, + { + "name": "SetResultAborted", + "qualifiedName": "Check.SetResultAborted", + "exported": true, + "receiver": "Check", + "signature": "func(string)()", + "doc": "Check.SetResultAborted Marks a check as aborted with a reason\n\nThis method records that the check has been aborted, setting its result state\naccordingly. It stores the provided abort reason for later reference and\nprotects the update with a mutex to ensure thread safety.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:390", + "calls": [ + { + "name": "Lock", + "kind": "function" + }, + { + "name": "Unlock", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.OnAbort", + "kind": "function", + "source": [ + "func (group *ChecksGroup) OnAbort(abortReason string) error {", + "\t// If this wasn't the group with the aborted check.", + "\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\tfmt.Printf(\"Skipping checks from suite %s\\n\", strings.ToUpper(group.name))", + "\t}", + "", + "\tfor i, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tcheck.SetResultSkipped(\"not matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If none of this group's checks was running yet, skip all.", + "\t\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Abort the check that was running when it was aborted and skip the rest.", + "\t\tif i == group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultAborted(abortReason)", + "\t\t} else if i \u003e group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t}", + "", + "\t\tprintCheckResult(check)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) SetResultAborted(reason string) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tcheck.Result = CheckResultAborted", + "\tcheck.skipReason = reason", + "}" + ] + }, + { + "name": "SetResultError", + "qualifiedName": "Check.SetResultError", + "exported": true, + "receiver": "Check", + "signature": "func(string)()", + "doc": "Check.SetResultError Marks a check as failed with an error reason\n\nThis method locks the check’s mutex, verifies that it has not already been\naborted or marked as an error, then sets its result to error and records the\nsupplied reason. If the check is already in an error state, a warning log is\nemitted instead of changing the state.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:369", + "calls": [ + { + "name": "Lock", + "kind": "function" + }, + { + "name": "Unlock", + "kind": "function" + }, + { + "name": "Check.LogWarn", + "kind": "function", + "source": [ + "func (check *Check) LogWarn(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelWarn, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "onFailure", + "kind": "function", + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) SetResultError(reason string) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tif check.Result == CheckResultAborted {", + "\t\treturn", + "\t}", + "", + "\tif check.Result == CheckResultError {", + "\t\tcheck.LogWarn(\"Check %s result was already marked as error.\", check.ID)", + "\t\treturn", + "\t}", + "\tcheck.Result = CheckResultError", + "\tcheck.skipReason = reason", + "}" + ] + }, + { + "name": "SetResultSkipped", + "qualifiedName": "Check.SetResultSkipped", + "exported": true, + "receiver": "Check", + "signature": "func(string)()", + "doc": "Check.SetResultSkipped Marks a check as skipped with an optional reason\n\nWhen invoked, this method acquires the check’s mutex to ensure thread\nsafety, then sets the result status to skipped unless the check has already\nbeen aborted. It records the provided reason for skipping, which can be used\nfor reporting or debugging purposes.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:351", + "calls": [ + { + "name": "Lock", + "kind": "function" + }, + { + "name": "Unlock", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.OnAbort", + "kind": "function", + "source": [ + "func (group *ChecksGroup) OnAbort(abortReason string) error {", + "\t// If this wasn't the group with the aborted check.", + "\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\tfmt.Printf(\"Skipping checks from suite %s\\n\", strings.ToUpper(group.name))", + "\t}", + "", + "\tfor i, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tcheck.SetResultSkipped(\"not matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If none of this group's checks was running yet, skip all.", + "\t\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Abort the check that was running when it was aborted and skip the rest.", + "\t\tif i == group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultAborted(abortReason)", + "\t\t} else if i \u003e group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t}", + "", + "\t\tprintCheckResult(check)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "skipCheck", + "kind": "function", + "source": [ + "func skipCheck(check *Check, reason string) {", + "\tcheck.LogInfo(\"Skipping check %s, reason: %s\", check.ID, reason)", + "\tcheck.SetResultSkipped(reason)", + "\tprintCheckResult(check)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) SetResultSkipped(reason string) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tif check.Result == CheckResultAborted {", + "\t\treturn", + "\t}", + "", + "\tcheck.Result = CheckResultSkipped", + "\tcheck.skipReason = reason", + "}" + ] + }, + { + "name": "WithAfterCheckFn", + "qualifiedName": "Check.WithAfterCheckFn", + "exported": true, + "receiver": "Check", + "signature": "func(func(check *Check) error)(*Check)", + "doc": "Check.WithAfterCheckFn Sets a callback to run after the check completes\n\nThe method attaches a function that will be invoked once the check finishes,\nprovided no error has already occurred. It stores the supplied function in\nthe AfterCheckFn field of the Check instance and returns the same instance\nfor chaining.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:229", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) WithAfterCheckFn(afterCheckFn func(check *Check) error) *Check {", + "\tif check.Error != nil {", + "\t\treturn check", + "\t}", + "", + "\tcheck.AfterCheckFn = afterCheckFn", + "\treturn check", + "}" + ] + }, + { + "name": "WithBeforeCheckFn", + "qualifiedName": "Check.WithBeforeCheckFn", + "exported": true, + "receiver": "Check", + "signature": "func(func(check *Check) error)(*Check)", + "doc": "Check.WithBeforeCheckFn Assigns a custom function to run before the main check\n\nThe method accepts a callback that receives the current Check instance and\nmay return an error. If the Check already contains an error, it skips\nassignment and returns the Check unchanged; otherwise, it stores the callback\nin BeforeCheckFn and returns the same Check pointer for chaining.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:214", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) WithBeforeCheckFn(beforeCheckFn func(check *Check) error) *Check {", + "\tif check.Error != nil {", + "\t\treturn check", + "\t}", + "", + "\tcheck.BeforeCheckFn = beforeCheckFn", + "\treturn check", + "}" + ] + }, + { + "name": "WithCheckFn", + "qualifiedName": "Check.WithCheckFn", + "exported": true, + "receiver": "Check", + "signature": "func(func(check *Check) error)(*Check)", + "doc": "Check.WithCheckFn Assigns a new check function only when no previous error exists\n\nThis method first checks whether the Check instance already contains an\nerror; if so, it returns the instance unchanged. Otherwise, it assigns the\nprovided function to the CheckFn field and then returns the modified instance\nfor chaining.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:199", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) WithCheckFn(checkFn func(check *Check) error) *Check {", + "\tif check.Error != nil {", + "\t\treturn check", + "\t}", + "", + "\tcheck.CheckFn = checkFn", + "\treturn check", + "}" + ] + }, + { + "name": "WithSkipCheckFn", + "qualifiedName": "Check.WithSkipCheckFn", + "exported": true, + "receiver": "Check", + "signature": "func(...func() (skip bool, reason string))(*Check)", + "doc": "Check.WithSkipCheckFn Adds functions that decide whether a test should be skipped\n\nWhen called, this method appends one or more supplied functions to the\nreceiver's list of skip-check callbacks, but only if no previous error has\nbeen recorded on the Check instance. Each added function returns a boolean\nindicating whether skipping is required and an optional reason string. The\nupdated Check pointer is then returned for chaining.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:245", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) WithSkipCheckFn(skipCheckFn ...func() (skip bool, reason string)) *Check {", + "\tif check.Error != nil {", + "\t\treturn check", + "\t}", + "", + "\tcheck.SkipCheckFns = append(check.SkipCheckFns, skipCheckFn...)", + "", + "\treturn check", + "}" + ] + }, + { + "name": "WithSkipModeAll", + "qualifiedName": "Check.WithSkipModeAll", + "exported": true, + "receiver": "Check", + "signature": "func()(*Check)", + "doc": "Check.WithSkipModeAll enables all-skip mode\n\nThis method changes a check's configuration so that it will skip any\nremaining steps or validations, effectively marking the check as fully\nskipped. It first verifies that no error has already been recorded on the\ncheck; if an error exists, it returns immediately without modifying the\nstate. When successful, it assigns the SkipModeAll constant to the check and\nreturns the modified check for further chaining.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:280", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) WithSkipModeAll() *Check {", + "\tif check.Error != nil {", + "\t\treturn check", + "\t}", + "", + "\tcheck.SkipMode = SkipModeAll", + "", + "\treturn check", + "}" + ] + }, + { + "name": "WithSkipModeAny", + "qualifiedName": "Check.WithSkipModeAny", + "exported": true, + "receiver": "Check", + "signature": "func()(*Check)", + "doc": "Check.WithSkipModeAny sets the check to always skip when appropriate\n\nThis method changes the internal skip mode of a check to allow it to be\nskipped under any circumstance that matches the default logic. If an error is\nalready present on the check, the call becomes a no‑op and simply returns\nthe existing instance. Otherwise it assigns SkipModeAny to the check and\nreturns the updated object for chaining.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:262", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) WithSkipModeAny() *Check {", + "\tif check.Error != nil {", + "\t\treturn check", + "\t}", + "", + "\tcheck.SkipMode = SkipModeAny", + "", + "\treturn check", + "}" + ] + }, + { + "name": "WithTimeout", + "qualifiedName": "Check.WithTimeout", + "exported": true, + "receiver": "Check", + "signature": "func(time.Duration)(*Check)", + "doc": "Check.WithTimeout assigns a timeout value to the check\n\nIf the check has not already encountered an error, this method updates its\nTimeout field with the supplied duration and returns the modified check for\nchaining. If an error is present, it simply returns the check unchanged so\nthat subsequent operations are skipped.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:296", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (check *Check) WithTimeout(duration time.Duration) *Check {", + "\tif check.Error != nil {", + "\t\treturn check", + "\t}", + "", + "\tcheck.Timeout = duration", + "", + "\treturn check", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "CheckResult.String", + "exported": true, + "receiver": "CheckResult", + "signature": "func()(string)", + "doc": "CheckResult.String Converts the result code to a readable string\n\nThis method casts the underlying CheckResult type, which is an alias of\nstring, into a standard Go string. It returns the textual representation of\nthe check outcome, such as \"Passed\", \"Failed\" or \"Skipped\". The conversion\nallows callers to use the result in logs and comparisons without needing to\nknow the internal type.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:39", + "calls": [ + { + "name": "string", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.GetLogs", + "kind": "function", + "source": [ + "func (check *Check) GetLogs() string {", + "\treturn check.logArchive.String()", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.RunChecks", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RunChecks(stopChan \u003c-chan bool, abortChan chan string) (errs []error, failedChecks int) {", + "\tlog.Info(\"Running group %q checks.\", group.name)", + "\tfmt.Printf(\"Running suite %s\\n\", strings.ToUpper(group.name))", + "", + "\t// Get checks to run based on the label expr.", + "\tchecks := []*Check{}", + "\tfor _, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tskipCheck(check, \"no matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tchecks = append(checks, check)", + "\t}", + "", + "\tif len(checks) == 0 {", + "\t\treturn nil, 0", + "\t}", + "", + "\t// Run afterAllFn always, no matter previous panics/crashes.", + "\tdefer func() {", + "\t\tif err := runAfterAllFn(group, checks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "\t}()", + "", + "\tif err := runBeforeAllFn(group, checks); err != nil {", + "\t\terrs = append(errs, err)", + "\t\treturn errs, 0", + "\t}", + "", + "\tlog.Info(\"Checks to run: %d (group's total=%d)\", len(checks), len(group.checks))", + "\tgroup.currentRunningCheckIdx = 0", + "\tfor i, check := range checks {", + "\t\t// Fast stop in case the stop (abort/timeout) signal received.", + "\t\tselect {", + "\t\tcase \u003c-stopChan:", + "\t\t\treturn nil, 0", + "\t\tdefault:", + "\t\t}", + "", + "\t\t// Create a remainingChecks list excluding the current check.", + "\t\tremainingChecks := []*Check{}", + "\t\tif i+1 \u003c len(checks) {", + "\t\t\tremainingChecks = checks[i+1:]", + "\t\t}", + "", + "\t\tif err := runBeforeEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = []error{err}", + "\t\t}", + "", + "\t\tif len(errs) == 0 {", + "\t\t\t// Should we skip this check?", + "\t\t\tskip, reasons := shouldSkipCheck(check)", + "\t\t\tif skip {", + "\t\t\t\tskipCheck(check, strings.Join(reasons, \", \"))", + "\t\t\t} else {", + "\t\t\t\tcheck.SetAbortChan(abortChan) // Set the abort channel for the check.", + "\t\t\t\terr := runCheck(check, group, remainingChecks)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\terrs = append(errs, err)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// afterEach func must run even if the check was skipped or panicked/unexpected error.", + "\t\tif err := runAfterEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "", + "\t\t// Don't run more checks if any of beforeEach, the checkFn or afterEach functions errored/panicked.", + "\t\tif len(errs) \u003e 0 {", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Increment the failed checks counter.", + "\t\tif check.Result.String() == CheckResultFailed {", + "\t\t\tfailedChecks++", + "\t\t}", + "", + "\t\tgroup.currentRunningCheckIdx++", + "\t}", + "", + "\treturn errs, failedChecks", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "recordCheckResult", + "kind": "function", + "source": [ + "func recordCheckResult(check *Check) {", + "\tclaimID, ok := identifiers.TestIDToClaimID[check.ID]", + "\tif !ok {", + "\t\tcheck.LogDebug(\"TestID %s has no corresponding Claim ID - skipping result recording\", check.ID)", + "\t\treturn", + "\t}", + "", + "\tcheck.LogInfo(\"Recording result %q, claimID: %+v\", strings.ToUpper(check.Result.String()), claimID)", + "\tresultsDB[check.ID] = claim.Result{", + "\t\tTestID: \u0026claimID,", + "\t\tState: check.Result.String(),", + "\t\tStartTime: check.StartTime.String(),", + "\t\tEndTime: check.EndTime.String(),", + "\t\tDuration: int(check.EndTime.Sub(check.StartTime).Seconds()),", + "\t\tSkipReason: check.skipReason,", + "\t\tCapturedTestOutput: check.GetLogs(),", + "\t\tCheckDetails: check.details,", + "", + "\t\tCategoryClassification: \u0026claim.CategoryClassification{", + "\t\t\tExtended: identifiers.Catalog[claimID].CategoryClassification[identifiers.Extended],", + "\t\t\tFarEdge: identifiers.Catalog[claimID].CategoryClassification[identifiers.FarEdge],", + "\t\t\tNonTelco: identifiers.Catalog[claimID].CategoryClassification[identifiers.NonTelco],", + "\t\t\tTelco: identifiers.Catalog[claimID].CategoryClassification[identifiers.Telco]},", + "\t\tCatalogInfo: \u0026claim.CatalogInfo{", + "\t\t\tDescription: identifiers.Catalog[claimID].Description,", + "\t\t\tRemediation: identifiers.Catalog[claimID].Remediation,", + "\t\t\tBestPracticeReference: identifiers.Catalog[claimID].BestPracticeReference,", + "\t\t\tExceptionProcess: identifiers.Catalog[claimID].ExceptionProcess,", + "\t\t},", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (cr CheckResult) String() string {", + "\treturn string(cr)", + "}" + ] + }, + { + "name": "Add", + "qualifiedName": "ChecksGroup.Add", + "exported": true, + "receiver": "ChecksGroup", + "signature": "func(*Check)()", + "doc": "ChecksGroup.Add Adds a check to the group\n\nThis method acquires a global lock, appends the provided check to the group's\ninternal slice, and then releases the lock. It ensures thread‑safe\nmodification of the checks collection while keeping the operation simple and\nefficient.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:119", + "calls": [ + { + "name": "Lock", + "kind": "function" + }, + { + "name": "Unlock", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (group *ChecksGroup) Add(check *Check) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\tgroup.checks = append(group.checks, check)", + "}" + ] + }, + { + "name": "OnAbort", + "qualifiedName": "ChecksGroup.OnAbort", + "exported": true, + "receiver": "ChecksGroup", + "signature": "func(string)(error)", + "doc": "ChecksGroup.OnAbort Handles a group’s abort by setting check results accordingly\n\nWhen an abort occurs, this method iterates over all checks in the group.\nChecks that do not match labels are marked as skipped with a label reason. If\nno check had started yet, every remaining check is skipped with the abort\nreason; otherwise the currently running check is marked aborted and\nsubsequent checks are skipped. Each result is printed immediately.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:487", + "calls": [ + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ToUpper", + "kind": "function" + }, + { + "name": "Eval", + "kind": "function" + }, + { + "name": "Check.SetResultSkipped", + "kind": "function", + "source": [ + "func (check *Check) SetResultSkipped(reason string) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tif check.Result == CheckResultAborted {", + "\t\treturn", + "\t}", + "", + "\tcheck.Result = CheckResultSkipped", + "\tcheck.skipReason = reason", + "}" + ] + }, + { + "name": "Check.SetResultSkipped", + "kind": "function", + "source": [ + "func (check *Check) SetResultSkipped(reason string) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tif check.Result == CheckResultAborted {", + "\t\treturn", + "\t}", + "", + "\tcheck.Result = CheckResultSkipped", + "\tcheck.skipReason = reason", + "}" + ] + }, + { + "name": "Check.SetResultAborted", + "kind": "function", + "source": [ + "func (check *Check) SetResultAborted(reason string) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tcheck.Result = CheckResultAborted", + "\tcheck.skipReason = reason", + "}" + ] + }, + { + "name": "Check.SetResultSkipped", + "kind": "function", + "source": [ + "func (check *Check) SetResultSkipped(reason string) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tif check.Result == CheckResultAborted {", + "\t\treturn", + "\t}", + "", + "\tcheck.Result = CheckResultSkipped", + "\tcheck.skipReason = reason", + "}" + ] + }, + { + "name": "printCheckResult", + "kind": "function", + "source": [ + "func printCheckResult(check *Check) {", + "\tswitch check.Result {", + "\tcase CheckResultPassed:", + "\t\tcli.PrintCheckPassed(check.ID)", + "\tcase CheckResultFailed:", + "\t\tcli.PrintCheckFailed(check.ID)", + "\tcase CheckResultSkipped:", + "\t\tcli.PrintCheckSkipped(check.ID, check.skipReason)", + "\tcase CheckResultAborted:", + "\t\tcli.PrintCheckAborted(check.ID, check.skipReason)", + "\tcase CheckResultError:", + "\t\tcli.PrintCheckErrored(check.ID)", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "RunChecks", + "kind": "function", + "source": [ + "func RunChecks(timeout time.Duration) (failedCtr int, err error) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\t// Timeout channel", + "\ttimeOutChan := time.After(timeout)", + "\t// SIGINT(ctrl+c)/SIGTERM capture channel.", + "\tconst SIGINTBufferLen = 10", + "\tsigIntChan := make(chan os.Signal, SIGINTBufferLen)", + "\tsignal.Notify(sigIntChan, syscall.SIGINT, syscall.SIGTERM)", + "\t// turn off ctrl-c capture on exit", + "\tdefer signal.Stop(sigIntChan)", + "", + "\tabort := false", + "\tvar abortReason string", + "\tvar errs []error", + "\tfor _, group := range dbByGroup {", + "\t\tif abort {", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t\tgroup.RecordChecksResults()", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Stop channel, so we can send a stop signal to group.RunChecks()", + "\t\tstopChan := make(chan bool, 1)", + "\t\tabortChan := make(chan string, 1)", + "", + "\t\t// Done channel for the goroutine that runs group.RunChecks().", + "\t\tgroupDone := make(chan bool)", + "\t\tgo func() {", + "\t\t\tchecks, failedCheckCtr := group.RunChecks(stopChan, abortChan)", + "\t\t\tfailedCtr += failedCheckCtr", + "\t\t\terrs = append(errs, checks...)", + "\t\t\tgroupDone \u003c- true", + "\t\t}()", + "", + "\t\tselect {", + "\t\tcase \u003c-groupDone:", + "\t\t\tlog.Debug(\"Group %s finished running checks.\", group.name)", + "\t\tcase abortReason = \u003c-abortChan:", + "\t\t\tlog.Warn(\"Group %s aborted.\", group.name)", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-timeOutChan:", + "\t\t\tlog.Warn(\"Running all checks timed-out.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"global time-out\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-sigIntChan:", + "\t\t\tlog.Warn(\"SIGINT/SIGTERM received.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"SIGINT/SIGTERM\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t}", + "", + "\t\tgroup.RecordChecksResults()", + "\t}", + "", + "\t// Print the results in the CLI", + "\tcli.PrintResultsTable(getResultsSummary())", + "\tprintFailedChecksLog()", + "", + "\tif len(errs) \u003e 0 {", + "\t\tlog.Error(\"RunChecks errors: %v\", errs)", + "\t\treturn 0, fmt.Errorf(\"%d errors found in checks/groups\", len(errs))", + "\t}", + "", + "\treturn failedCtr, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (group *ChecksGroup) OnAbort(abortReason string) error {", + "\t// If this wasn't the group with the aborted check.", + "\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\tfmt.Printf(\"Skipping checks from suite %s\\n\", strings.ToUpper(group.name))", + "\t}", + "", + "\tfor i, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tcheck.SetResultSkipped(\"not matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If none of this group's checks was running yet, skip all.", + "\t\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Abort the check that was running when it was aborted and skip the rest.", + "\t\tif i == group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultAborted(abortReason)", + "\t\t} else if i \u003e group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t}", + "", + "\t\tprintCheckResult(check)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "RecordChecksResults", + "qualifiedName": "ChecksGroup.RecordChecksResults", + "exported": true, + "receiver": "ChecksGroup", + "signature": "func()()", + "doc": "ChecksGroup.RecordChecksResults Logs each check result and stores it in the results database\n\nThe method iterates over all checks in the group, invoking a helper that logs\ninformation about the test ID, state, and duration. For each check, it\nrecords the outcome in a shared map keyed by the test identifier, including\nmetadata such as timestamps, skip reasons, and catalog references. This\nensures that results are persisted for later reporting or further processing.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:525", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "recordCheckResult", + "kind": "function", + "source": [ + "func recordCheckResult(check *Check) {", + "\tclaimID, ok := identifiers.TestIDToClaimID[check.ID]", + "\tif !ok {", + "\t\tcheck.LogDebug(\"TestID %s has no corresponding Claim ID - skipping result recording\", check.ID)", + "\t\treturn", + "\t}", + "", + "\tcheck.LogInfo(\"Recording result %q, claimID: %+v\", strings.ToUpper(check.Result.String()), claimID)", + "\tresultsDB[check.ID] = claim.Result{", + "\t\tTestID: \u0026claimID,", + "\t\tState: check.Result.String(),", + "\t\tStartTime: check.StartTime.String(),", + "\t\tEndTime: check.EndTime.String(),", + "\t\tDuration: int(check.EndTime.Sub(check.StartTime).Seconds()),", + "\t\tSkipReason: check.skipReason,", + "\t\tCapturedTestOutput: check.GetLogs(),", + "\t\tCheckDetails: check.details,", + "", + "\t\tCategoryClassification: \u0026claim.CategoryClassification{", + "\t\t\tExtended: identifiers.Catalog[claimID].CategoryClassification[identifiers.Extended],", + "\t\t\tFarEdge: identifiers.Catalog[claimID].CategoryClassification[identifiers.FarEdge],", + "\t\t\tNonTelco: identifiers.Catalog[claimID].CategoryClassification[identifiers.NonTelco],", + "\t\t\tTelco: identifiers.Catalog[claimID].CategoryClassification[identifiers.Telco]},", + "\t\tCatalogInfo: \u0026claim.CatalogInfo{", + "\t\t\tDescription: identifiers.Catalog[claimID].Description,", + "\t\t\tRemediation: identifiers.Catalog[claimID].Remediation,", + "\t\t\tBestPracticeReference: identifiers.Catalog[claimID].BestPracticeReference,", + "\t\t\tExceptionProcess: identifiers.Catalog[claimID].ExceptionProcess,", + "\t\t},", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "RunChecks", + "kind": "function", + "source": [ + "func RunChecks(timeout time.Duration) (failedCtr int, err error) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\t// Timeout channel", + "\ttimeOutChan := time.After(timeout)", + "\t// SIGINT(ctrl+c)/SIGTERM capture channel.", + "\tconst SIGINTBufferLen = 10", + "\tsigIntChan := make(chan os.Signal, SIGINTBufferLen)", + "\tsignal.Notify(sigIntChan, syscall.SIGINT, syscall.SIGTERM)", + "\t// turn off ctrl-c capture on exit", + "\tdefer signal.Stop(sigIntChan)", + "", + "\tabort := false", + "\tvar abortReason string", + "\tvar errs []error", + "\tfor _, group := range dbByGroup {", + "\t\tif abort {", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t\tgroup.RecordChecksResults()", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Stop channel, so we can send a stop signal to group.RunChecks()", + "\t\tstopChan := make(chan bool, 1)", + "\t\tabortChan := make(chan string, 1)", + "", + "\t\t// Done channel for the goroutine that runs group.RunChecks().", + "\t\tgroupDone := make(chan bool)", + "\t\tgo func() {", + "\t\t\tchecks, failedCheckCtr := group.RunChecks(stopChan, abortChan)", + "\t\t\tfailedCtr += failedCheckCtr", + "\t\t\terrs = append(errs, checks...)", + "\t\t\tgroupDone \u003c- true", + "\t\t}()", + "", + "\t\tselect {", + "\t\tcase \u003c-groupDone:", + "\t\t\tlog.Debug(\"Group %s finished running checks.\", group.name)", + "\t\tcase abortReason = \u003c-abortChan:", + "\t\t\tlog.Warn(\"Group %s aborted.\", group.name)", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-timeOutChan:", + "\t\t\tlog.Warn(\"Running all checks timed-out.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"global time-out\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-sigIntChan:", + "\t\t\tlog.Warn(\"SIGINT/SIGTERM received.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"SIGINT/SIGTERM\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t}", + "", + "\t\tgroup.RecordChecksResults()", + "\t}", + "", + "\t// Print the results in the CLI", + "\tcli.PrintResultsTable(getResultsSummary())", + "\tprintFailedChecksLog()", + "", + "\tif len(errs) \u003e 0 {", + "\t\tlog.Error(\"RunChecks errors: %v\", errs)", + "\t\treturn 0, fmt.Errorf(\"%d errors found in checks/groups\", len(errs))", + "\t}", + "", + "\treturn failedCtr, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (group *ChecksGroup) RecordChecksResults() {", + "\tlog.Info(\"Recording checks results of group %s\", group.name)", + "\tfor _, check := range group.checks {", + "\t\trecordCheckResult(check)", + "\t}", + "}" + ] + }, + { + "name": "RunChecks", + "qualifiedName": "ChecksGroup.RunChecks", + "exported": true, + "receiver": "ChecksGroup", + "signature": "func(\u003c-chan bool, chan string)([]error, int)", + "doc": "ChecksGroup.RunChecks Executes a filtered set of checks with lifecycle hooks\n\nThe method gathers checks whose labels match the group’s filter, then runs\nthem in order while invoking BeforeAll, BeforeEach, AfterEach, and AfterAll\ncallbacks. It handles skipping logic, abort signals, and panics by recording\nerrors or marking checks as skipped/failed. The function returns any\ncollected errors and a count of failed checks.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:396", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ToUpper", + "kind": "function" + }, + { + "name": "Eval", + "kind": "function" + }, + { + "name": "skipCheck", + "kind": "function", + "source": [ + "func skipCheck(check *Check, reason string) {", + "\tcheck.LogInfo(\"Skipping check %s, reason: %s\", check.ID, reason)", + "\tcheck.SetResultSkipped(reason)", + "\tprintCheckResult(check)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "runAfterAllFn", + "kind": "function", + "source": [ + "func runAfterAllFn(group *ChecksGroup, checks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running afterAll\", group.name)", + "", + "\tif group.afterAllFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tlastCheck := checks[len(checks)-1]", + "\tzeroRemainingChecks := []*Check{}", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running afterAll function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"afterAll function panicked\", \"\\n: \"+stackTrace, group, lastCheck, zeroRemainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.afterAllFn(group.checks); err != nil {", + "\t\tlog.Error(\"Unexpected error while running afterAll function: %v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"afterAll function unexpected error\", err.Error(), group, lastCheck, zeroRemainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "runBeforeAllFn", + "kind": "function", + "source": [ + "func runBeforeAllFn(group *ChecksGroup, checks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running beforeAll\", group.name)", + "\tif group.beforeAllFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tfirstCheck := checks[0]", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running beforeAll function:\\n%v\", stackTrace)", + "\t\t\t// Set first check's result as error and skip the remaining ones.", + "\t\t\terr = onFailure(\"beforeAll function panicked\", \"\\n:\"+stackTrace, group, firstCheck, checks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.beforeAllFn(checks); err != nil {", + "\t\tlog.Error(\"Unexpected error while running beforeAll function: %v\", err)", + "\t\t// Set first check's result as error and skip the remaining ones.", + "\t\treturn onFailure(\"beforeAll function unexpected error\", err.Error(), group, firstCheck, checks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "runBeforeEachFn", + "kind": "function", + "source": [ + "func runBeforeEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running beforeEach for check %s\", group.name, check.ID)", + "\tif group.beforeEachFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running beforeEach function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"beforeEach function panicked\", \"\\n: \"+stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.beforeEachFn(check); err != nil {", + "\t\tlog.Error(\"Unexpected error while running beforeEach function:\\n%v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"beforeEach function unexpected error\", err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "shouldSkipCheck", + "kind": "function", + "source": [ + "func shouldSkipCheck(check *Check) (skip bool, reasons []string) {", + "\tif len(check.SkipCheckFns) == 0 {", + "\t\treturn false, []string{}", + "\t}", + "", + "\t// Short-circuit", + "\tif len(check.SkipCheckFns) == 0 {", + "\t\treturn false, []string{}", + "\t}", + "", + "\t// Save the skipFn index in case it panics so it can be used in the log trace.", + "\tcurrentSkipFnIndex := 0", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tcheck.LogError(\"Skip check function (idx=%d) panic'ed: %s\", currentSkipFnIndex, stackTrace)", + "\t\t\tskip = true", + "\t\t\treasons = []string{fmt.Sprintf(\"skipCheckFn (idx=%d) panic:\\n%s\", currentSkipFnIndex, stackTrace)}", + "\t\t}", + "\t}()", + "", + "\t// Call all the skip functions first.", + "\tfor _, skipFn := range check.SkipCheckFns {", + "\t\tif skip, reason := skipFn(); skip {", + "\t\t\treasons = append(reasons, reason)", + "\t\t}", + "\t\tcurrentSkipFnIndex++", + "\t}", + "", + "\t// If none of the skipFn returned true, exit now.", + "\tif len(reasons) == 0 {", + "\t\treturn false, []string{}", + "\t}", + "", + "\t// Now we need to check the skipMode for this check.", + "\tswitch check.SkipMode {", + "\tcase SkipModeAny:", + "\t\treturn true, reasons", + "\tcase SkipModeAll:", + "\t\t// Only skip if all the skipFn returned true.", + "\t\tif len(reasons) == len(check.SkipCheckFns) {", + "\t\t\treturn true, reasons", + "\t\t}", + "\t\treturn false, []string{}", + "\t}", + "", + "\treturn false, []string{}", + "}" + ] + }, + { + "name": "skipCheck", + "kind": "function", + "source": [ + "func skipCheck(check *Check, reason string) {", + "\tcheck.LogInfo(\"Skipping check %s, reason: %s\", check.ID, reason)", + "\tcheck.SetResultSkipped(reason)", + "\tprintCheckResult(check)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "name": "Check.SetAbortChan", + "kind": "function", + "source": [ + "func (check *Check) SetAbortChan(abortChan chan string) {", + "\tcheck.abortChan = abortChan", + "}" + ] + }, + { + "name": "runCheck", + "kind": "function", + "source": [ + "func runCheck(check *Check, group *ChecksGroup, remainingChecks []*Check) (err error) {", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\t// Don't do anything in case the check was manually aborted by check.Abort().", + "\t\t\tif msg, ok := r.(AbortPanicMsg); ok {", + "\t\t\t\tlog.Warn(\"Check was manually aborted, msg: %v\", msg)", + "\t\t\t\terr = fmt.Errorf(\"%v\", msg)", + "\t\t\t\treturn", + "\t\t\t}", + "", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "", + "\t\t\tcheck.LogError(\"Panic while running check %s function:\\n%v\", check.ID, stackTrace)", + "\t\t\terr = onFailure(fmt.Sprintf(\"check %s function panic\", check.ID), stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := check.Run(); err != nil {", + "\t\tcheck.LogError(\"Unexpected error while running check %s function: %v\", check.ID, err.Error())", + "\t\treturn onFailure(fmt.Sprintf(\"check %s function unexpected error\", check.ID), err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "runAfterEachFn", + "kind": "function", + "source": [ + "func runAfterEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running afterEach for check %s\", group.name, check.ID)", + "", + "\tif group.afterEachFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running afterEach function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"afterEach function panicked\", \"\\n: \"+stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.afterEachFn(check); err != nil {", + "\t\tlog.Error(\"Unexpected error while running afterEach function:\\n%v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"afterEach function unexpected error\", err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "CheckResult.String", + "kind": "function", + "source": [ + "func (cr CheckResult) String() string {", + "\treturn string(cr)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (group *ChecksGroup) RunChecks(stopChan \u003c-chan bool, abortChan chan string) (errs []error, failedChecks int) {", + "\tlog.Info(\"Running group %q checks.\", group.name)", + "\tfmt.Printf(\"Running suite %s\\n\", strings.ToUpper(group.name))", + "", + "\t// Get checks to run based on the label expr.", + "\tchecks := []*Check{}", + "\tfor _, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tskipCheck(check, \"no matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tchecks = append(checks, check)", + "\t}", + "", + "\tif len(checks) == 0 {", + "\t\treturn nil, 0", + "\t}", + "", + "\t// Run afterAllFn always, no matter previous panics/crashes.", + "\tdefer func() {", + "\t\tif err := runAfterAllFn(group, checks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "\t}()", + "", + "\tif err := runBeforeAllFn(group, checks); err != nil {", + "\t\terrs = append(errs, err)", + "\t\treturn errs, 0", + "\t}", + "", + "\tlog.Info(\"Checks to run: %d (group's total=%d)\", len(checks), len(group.checks))", + "\tgroup.currentRunningCheckIdx = 0", + "\tfor i, check := range checks {", + "\t\t// Fast stop in case the stop (abort/timeout) signal received.", + "\t\tselect {", + "\t\tcase \u003c-stopChan:", + "\t\t\treturn nil, 0", + "\t\tdefault:", + "\t\t}", + "", + "\t\t// Create a remainingChecks list excluding the current check.", + "\t\tremainingChecks := []*Check{}", + "\t\tif i+1 \u003c len(checks) {", + "\t\t\tremainingChecks = checks[i+1:]", + "\t\t}", + "", + "\t\tif err := runBeforeEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = []error{err}", + "\t\t}", + "", + "\t\tif len(errs) == 0 {", + "\t\t\t// Should we skip this check?", + "\t\t\tskip, reasons := shouldSkipCheck(check)", + "\t\t\tif skip {", + "\t\t\t\tskipCheck(check, strings.Join(reasons, \", \"))", + "\t\t\t} else {", + "\t\t\t\tcheck.SetAbortChan(abortChan) // Set the abort channel for the check.", + "\t\t\t\terr := runCheck(check, group, remainingChecks)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\terrs = append(errs, err)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// afterEach func must run even if the check was skipped or panicked/unexpected error.", + "\t\tif err := runAfterEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "", + "\t\t// Don't run more checks if any of beforeEach, the checkFn or afterEach functions errored/panicked.", + "\t\tif len(errs) \u003e 0 {", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Increment the failed checks counter.", + "\t\tif check.Result.String() == CheckResultFailed {", + "\t\t\tfailedChecks++", + "\t\t}", + "", + "\t\tgroup.currentRunningCheckIdx++", + "\t}", + "", + "\treturn errs, failedChecks", + "}" + ] + }, + { + "name": "WithAfterAllFn", + "qualifiedName": "ChecksGroup.WithAfterAllFn", + "exported": true, + "receiver": "ChecksGroup", + "signature": "func(func(checks []*Check) error)(*ChecksGroup)", + "doc": "ChecksGroup.WithAfterAllFn Assigns a callback to execute after all checks complete\n\nThis method stores the supplied function in the ChecksGroup so it will be\ncalled with the list of executed checks once processing is finished. The\nstored function can perform cleanup or result aggregation. It returns the\nsame group instance, allowing method chaining.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:107", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (group *ChecksGroup) WithAfterAllFn(afterAllFn func(checks []*Check) error) *ChecksGroup {", + "\tgroup.afterAllFn = afterAllFn", + "", + "\treturn group", + "}" + ] + }, + { + "name": "WithAfterEachFn", + "qualifiedName": "ChecksGroup.WithAfterEachFn", + "exported": true, + "receiver": "ChecksGroup", + "signature": "func(func(check *Check) error)(*ChecksGroup)", + "doc": "ChecksGroup.WithAfterEachFn Assigns a function that runs after every individual check\n\nThis method stores the provided function as the group's post‑check hook,\nensuring it is invoked with a reference to each Check object once the check\ncompletes. The stored callback can modify or inspect the check before the\ngroup continues processing. It returns the same ChecksGroup instance for\nchaining.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:95", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (group *ChecksGroup) WithAfterEachFn(afterEachFn func(check *Check) error) *ChecksGroup {", + "\tgroup.afterEachFn = afterEachFn", + "", + "\treturn group", + "}" + ] + }, + { + "name": "WithBeforeAllFn", + "qualifiedName": "ChecksGroup.WithBeforeAllFn", + "exported": true, + "receiver": "ChecksGroup", + "signature": "func(func(checks []*Check) error)(*ChecksGroup)", + "doc": "ChecksGroup.WithBeforeAllFn Registers a function to run before all checks\n\nThis method assigns the provided callback to the group, which will be\nexecuted with the slice of checks prior to any other operations. It returns\nthe modified group for chaining purposes.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:70", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (group *ChecksGroup) WithBeforeAllFn(beforeAllFn func(checks []*Check) error) *ChecksGroup {", + "\tgroup.beforeAllFn = beforeAllFn", + "", + "\treturn group", + "}" + ] + }, + { + "name": "WithBeforeEachFn", + "qualifiedName": "ChecksGroup.WithBeforeEachFn", + "exported": true, + "receiver": "ChecksGroup", + "signature": "func(func(check *Check) error)(*ChecksGroup)", + "doc": "ChecksGroup.WithBeforeEachFn Assigns a callback to execute prior to each check\n\nThis method accepts a function that takes a check pointer and may return an\nerror. It stores this function in the group's internal field so it will be\ninvoked before each individual check runs. The group instance is returned,\nallowing further chained configuration calls.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:82", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (group *ChecksGroup) WithBeforeEachFn(beforeEachFn func(check *Check) error) *ChecksGroup {", + "\tgroup.beforeEachFn = beforeEachFn", + "", + "\treturn group", + "}" + ] + }, + { + "name": "FilterCheckIDs", + "qualifiedName": "FilterCheckIDs", + "exported": true, + "signature": "func()([]string, error)", + "doc": "FilterCheckIDs Retrieves test case identifiers that satisfy the current label filter\n\nThe function iterates through all check groups in the database, evaluating\neach check's labels against a global expression evaluator. If a check passes\nthe evaluation, its identifier is appended to a result slice. After\nprocessing all checks, the slice of matching IDs is returned with no error.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:301", + "calls": [ + { + "name": "Eval", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "getMatchingTestIDs", + "kind": "function", + "source": [ + "func getMatchingTestIDs(labelExpr string) ([]string, error) {", + "\tif err := checksdb.InitLabelsExprEvaluator(labelExpr); err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to initialize a test case label evaluator, err: %v\", err)", + "\t}", + "\tcertsuite.LoadInternalChecksDB()", + "\ttestIDs, err := checksdb.FilterCheckIDs()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not list test cases, err: %v\", err)", + "\t}", + "", + "\treturn testIDs, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func FilterCheckIDs() ([]string, error) {", + "\tfilteredCheckIDs := []string{}", + "\tfor _, group := range dbByGroup {", + "\t\tfor _, check := range group.checks {", + "\t\t\tif labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\t\tfilteredCheckIDs = append(filteredCheckIDs, check.ID)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn filteredCheckIDs, nil", + "}" + ] + }, + { + "name": "GetReconciledResults", + "qualifiedName": "GetReconciledResults", + "exported": true, + "signature": "func()(map[string]claim.Result)", + "doc": "GetReconciledResults Aggregates all stored check results into a map\n\nThe function collects entries from an internal database of test outcomes,\nmapping each key to its corresponding claim result object. It ensures every\nkey is represented in the returned map, initializing missing entries before\nassigning the actual data. The resulting map is used by other components to\npopulate the final claim report.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:164", + "calls": [ + { + "name": "make", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "ClaimBuilder.Build", + "kind": "function", + "source": [ + "func (c *ClaimBuilder) Build(outputFile string) {", + "\tendTime := time.Now()", + "", + "\tc.claimRoot.Claim.Metadata.EndTime = endTime.UTC().Format(DateTimeFormatDirective)", + "\tc.claimRoot.Claim.Results = checksdb.GetReconciledResults()", + "", + "\t// Marshal the claim and output to file", + "\tpayload := MarshalClaimOutput(c.claimRoot)", + "\tWriteClaimOutput(outputFile, payload)", + "", + "\tlog.Info(\"Claim file created at %s\", outputFile)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetReconciledResults() map[string]claim.Result {", + "\tresultMap := make(map[string]claim.Result)", + "\tfor key := range resultsDB {", + "\t\t// initializes the result map, if necessary", + "\t\tif _, ok := resultMap[key]; !ok {", + "\t\t\tresultMap[key] = claim.Result{}", + "\t\t}", + "", + "\t\tresultMap[key] = resultsDB[key]", + "\t}", + "\treturn resultMap", + "}" + ] + }, + { + "name": "GetResults", + "qualifiedName": "GetResults", + "exported": true, + "signature": "func()(map[string]claim.Result)", + "doc": "GetResults Retrieves the current mapping of check identifiers to their results\n\nThe function returns a map where each key is a string identifier for a\nspecific compliance check, and the corresponding value contains the result\ndata for that check. It simply exposes an internal database that holds all\nrecorded outcomes. No parameters are required or modified during its\nexecution.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:247", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetResults() map[string]claim.Result {", + "\treturn resultsDB", + "}" + ] + }, + { + "name": "GetTestSuites", + "qualifiedName": "GetTestSuites", + "exported": true, + "signature": "func()([]string)", + "doc": "GetTestSuites Retrieves a list of unique test suite identifiers from the database\n\nThis function iterates over all keys in an internal results map, collecting\neach distinct test suite name into a slice. It ensures no duplicates by\nchecking membership before appending. The resulting slice of strings is\nreturned for further processing.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:257", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetTestSuites() []string {", + "\t// Collect all of the unique test suites from the resultsDB", + "\tvar suites []string", + "\tfor key := range resultsDB {", + "\t\t// Only append to the slice if it does not already exist", + "\t\tif !stringhelper.StringInSlice(suites, key, false) {", + "\t\t\tsuites = append(suites, key)", + "\t\t}", + "\t}", + "\treturn suites", + "}" + ] + }, + { + "name": "GetTestsCountByState", + "qualifiedName": "GetTestsCountByState", + "exported": true, + "signature": "func(string)(int)", + "doc": "GetTestsCountByState Counts tests that match a given state\n\nThe function iterates over the global results database, incrementing a\ncounter each time an entry’s state equals the provided string. It then\nreturns the total number of matching entries as an integer. This is useful\nfor summarizing how many tests are in a particular status.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:285", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetTestsCountByState(state string) int {", + "\tcount := 0", + "\tfor r := range resultsDB {", + "\t\tif resultsDB[r].State == state {", + "\t\t\tcount++", + "\t\t}", + "\t}", + "\treturn count", + "}" + ] + }, + { + "name": "GetTotalTests", + "qualifiedName": "GetTotalTests", + "exported": true, + "signature": "func()(int)", + "doc": "GetTotalTests Retrieves the number of tests stored in the database\n\nThis function accesses an internal slice that holds test results and returns\nits length as an integer. It provides a quick way to determine how many tests\nare currently recorded without exposing the underlying data structure. The\nresult is returned immediately after calculating the count.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:275", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetTotalTests() int {", + "\treturn len(resultsDB)", + "}" + ] + }, + { + "name": "InitLabelsExprEvaluator", + "qualifiedName": "InitLabelsExprEvaluator", + "exported": true, + "signature": "func(string)(error)", + "doc": "InitLabelsExprEvaluator Creates a label evaluator from a filter expression\n\nThis function takes a string representing a label filter, expands the special\nkeyword \"all\" into a comma‑separated list of known tags, then constructs a\nLabelsExprEvaluator using the helper in the labels package. If construction\nfails, it returns an error describing the problem; otherwise it stores the\nevaluator in a global variable for later use by other parts of the program.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:321", + "calls": [ + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/labels", + "name": "NewLabelsExprEvaluator", + "kind": "function", + "source": [ + "func NewLabelsExprEvaluator(labelsExpr string) (LabelsExprEvaluator, error) {", + "\tgoLikeExpr := strings.ReplaceAll(labelsExpr, \"-\", \"_\")", + "\tgoLikeExpr = strings.ReplaceAll(goLikeExpr, \",\", \"||\")", + "", + "\tnode, err := parser.ParseExpr(goLikeExpr)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to parse labels expression %s: %v\", labelsExpr, err)", + "\t}", + "", + "\treturn labelsExprParser{", + "\t\tastRootNode: node,", + "\t}, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info", + "name": "getMatchingTestIDs", + "kind": "function", + "source": [ + "func getMatchingTestIDs(labelExpr string) ([]string, error) {", + "\tif err := checksdb.InitLabelsExprEvaluator(labelExpr); err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to initialize a test case label evaluator, err: %v\", err)", + "\t}", + "\tcertsuite.LoadInternalChecksDB()", + "\ttestIDs, err := checksdb.FilterCheckIDs()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not list test cases, err: %v\", err)", + "\t}", + "", + "\treturn testIDs, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Startup", + "kind": "function", + "source": [ + "func Startup() {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Create an evaluator to filter test cases with labels", + "\tif err := checksdb.InitLabelsExprEvaluator(testParams.LabelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(testParams.OutputDir, testParams.LogLevel); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\t// Diagnostic functions will run when no labels are provided.", + "\tif testParams.LabelsFilter == noLabelsFilterExpr {", + "\t\tlog.Warn(\"The Best Practices Test Suite will run in diagnostic mode so no test case will be launched\")", + "\t}", + "", + "\t// Set clientsholder singleton with the filenames from the env vars.", + "\t_ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...)", + "\tLoadChecksDB(testParams.LabelsFilter)", + "", + "\tlog.Info(\"Certsuite Version: %v\", versions.GitVersion())", + "\tlog.Info(\"Claim Format Version: %s\", versions.ClaimFormatVersion)", + "\tlog.Info(\"Labels filter: %v\", testParams.LabelsFilter)", + "\tlog.Info(\"Log level: %s\", strings.ToUpper(testParams.LogLevel))", + "", + "\tcli.PrintBanner()", + "", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "\tfmt.Printf(\"Checks filter: %s\\n\", testParams.LabelsFilter)", + "\tfmt.Printf(\"Output folder: %s\\n\", testParams.OutputDir)", + "\tfmt.Printf(\"Log file: %s (level=%s)\\n\", log.LogFileName, testParams.LogLevel)", + "\tfmt.Printf(\"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "runHandler", + "kind": "function", + "source": [ + "func runHandler(w http.ResponseWriter, r *http.Request) {", + "\tbuf = bytes.NewBufferString(\"\")", + "\t// The log output will be written to the log file and to this buffer buf", + "\tlog.SetLogger(log.GetMultiLogger(buf))", + "", + "\tjsonData := r.FormValue(\"jsonData\") // \"jsonData\" is the name of the JSON input field", + "\tvar data RequestedData", + "\tif err := json.Unmarshal([]byte(jsonData), \u0026data); err != nil {", + "\t\tfmt.Println(\"Error:\", err)", + "\t}", + "\tflattenedOptions := data.SelectedOptions", + "", + "\t// Get the file from the request", + "\tfile, fileHeader, err := r.FormFile(\"kubeConfigPath\") // \"fileInput\" is the name of the file input field", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to retrieve file from form\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "\tdefer file.Close()", + "", + "\tlog.Info(\"Kubeconfig file name received: %s\", fileHeader.Filename)", + "\tkubeconfigTempFile, err := os.CreateTemp(\"\", \"webserver-kubeconfig-*\")", + "\tif err != nil {", + "\t\thttp.Error(w, \"Failed to create temp file to store the kubeconfig content.\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "", + "\tdefer func() {", + "\t\tlog.Info(\"Removing temporary kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\terr = os.Remove(kubeconfigTempFile.Name())", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to remove temp kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\t}", + "\t}()", + "", + "\t_, err = io.Copy(kubeconfigTempFile, file)", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to copy file\", http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t_ = kubeconfigTempFile.Close()", + "", + "\tlog.Info(\"Web Server kubeconfig file : %v (copied into %v)\", fileHeader.Filename, kubeconfigTempFile.Name())", + "\tlog.Info(\"Web Server Labels filter : %v\", flattenedOptions)", + "", + "\ttnfConfig, err := os.ReadFile(\"config/certsuite_config.yml\")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error reading YAML file: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t}", + "", + "\tnewData := updateTnf(tnfConfig, \u0026data)", + "", + "\t// Write the modified YAML data back to the file", + "\tvar filePerm fs.FileMode = 0o644 // owner can read/write, group and others can only read", + "\terr = os.WriteFile(\"config/certsuite_config.yml\", newData, filePerm)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing YAML file: %v\", err)", + "\t}", + "\tlabelsFilter := strings.Join(flattenedOptions, \",\")", + "", + "\t_ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name())", + "\tcertsuite.LoadChecksDB(labelsFilter)", + "", + "\toutputFolder := r.Context().Value(outputFolderCtxKey).(string)", + "", + "\tif err := checksdb.InitLabelsExprEvaluator(labelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(outputFolder, \"debug\"); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tlog.Info(\"Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s\", labelsFilter, outputFolder)", + "\terr = certsuite.Run(labelsFilter, outputFolder)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to run CNF Cert Suite: %v\", err)", + "\t}", + "", + "\t// Return the result as JSON", + "\tresponse := struct {", + "\t\tMessage string `json:\"Message\"`", + "\t}{", + "\t\tMessage: fmt.Sprintf(\"Succeeded to run %s\", strings.Join(flattenedOptions, \" \")),", + "\t}", + "\t// Serialize the response data to JSON", + "\tjsonResponse, err := json.Marshal(response)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to marshal jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t// Set the Content-Type header to specify that the response is JSON", + "\tw.Header().Set(\"Content-Type\", \"application/json\")", + "\t// Write the JSON response to the client", + "\tlog.Info(\"Sending web response: %v\", response)", + "\t_, err = w.Write(jsonResponse)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to write jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func InitLabelsExprEvaluator(labelsFilter string) error {", + "\t// Expand the abstract \"all\" label into actual existing labels", + "\tif labelsFilter == \"all\" {", + "\t\tallTags := []string{identifiers.TagCommon, identifiers.TagExtended,", + "\t\t\tidentifiers.TagFarEdge, identifiers.TagTelco}", + "\t\tlabelsFilter = strings.Join(allTags, \",\")", + "\t}", + "", + "\teval, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not create a label evaluator, err: %v\", err)", + "\t}", + "", + "\tlabelsExprEvaluator = eval", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "NewCheck", + "qualifiedName": "NewCheck", + "exported": true, + "signature": "func(string, []string)(*Check)", + "doc": "NewCheck Creates a new check instance\n\nThis function constructs a Check object with the provided identifier and\nlabel set. It assigns an initial passed result status, creates a string\nbuilder for log storage, and attaches a multi‑logger that records events\nspecific to this check. The fully initialized Check is then returned as a\npointer.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:82", + "calls": [ + { + "name": "With", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "GetMultiLogger", + "kind": "function", + "source": [ + "func GetMultiLogger(writers ...io.Writer) *Logger {", + "\topts := slog.HandlerOptions{", + "\t\tLevel: globalLogLevel,", + "\t\tReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {", + "\t\t\tif a.Key == slog.LevelKey {", + "\t\t\t\tlevel := a.Value.Any().(slog.Level)", + "\t\t\t\tlevelLabel, exists := CustomLevelNames[level]", + "\t\t\t\tif !exists {", + "\t\t\t\t\tlevelLabel = level.String()", + "\t\t\t\t}", + "", + "\t\t\t\ta.Value = slog.StringValue(levelLabel)", + "\t\t\t}", + "", + "\t\t\treturn a", + "\t\t},", + "\t}", + "", + "\tvar handlers []slog.Handler", + "\tif globalLogger != nil {", + "\t\thandlers = []slog.Handler{globalLogger.l.Handler()}", + "\t}", + "", + "\tfor _, writer := range writers {", + "\t\thandlers = append(handlers, NewCustomHandler(writer, \u0026opts))", + "\t}", + "", + "\treturn \u0026Logger{l: slog.New(NewMultiHandler(handlers...))}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AffiliatedCertTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmVersionIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\ttestHelmVersion(check)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoOperatorsFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAllOperatorCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHelmCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerIsCertifiedDigestIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerCertificationStatusByDigest(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/manageability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ManageabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ManageabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainersImageTag)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImageTag(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPortNameFormat)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerPortNameFormat(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PerformanceTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestExclusiveCPUPool(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRtAppNoExecProbes)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUs).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestRtAppsNoExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSharedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoNonGuaranteedPodContainersWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetNonGuaranteedPodContainersWithoutHostPID(), scheduling.SharedCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsolatedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLimitedUseOfExecProbesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestLimitedUseOfExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "generatePreflightContainerCnfCertTest", + "kind": "function", + "source": [ + "func generatePreflightContainerCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, containers []*provider.Container) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "\t\t\tfor _, cut := range containers {", + "\t\t\t\tfor _, r := range cut.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Container %q has passed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has failed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has errored Preflight test %q, err: %v\", cut, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Container has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "generatePreflightOperatorCnfCertTest", + "kind": "function", + "source": [ + "func generatePreflightOperatorCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, operators []*provider.Operator) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t\t\tfor _, op := range operators {", + "\t\t\t\tfor _, r := range op.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Operator %q has passed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has failed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has errored Preflight test %q, err: %v\", op, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, fmt.Sprintf(\"Operator has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "name": "NewChecksGroup", + "qualifiedName": "NewChecksGroup", + "exported": true, + "signature": "func(string)(*ChecksGroup)", + "doc": "NewChecksGroup creates or retrieves a checks group by name\n\nThis function locks the global database, ensuring thread safety while\naccessing the map of groups. It initializes the map if necessary, then looks\nup an existing group with the given key. If found it returns that instance;\notherwise it constructs a new ChecksGroup with default fields, stores it in\nthe map, and returns it.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:42", + "calls": [ + { + "name": "Lock", + "kind": "function" + }, + { + "name": "Unlock", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AffiliatedCertTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmVersionIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\ttestHelmVersion(check)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoOperatorsFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAllOperatorCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHelmCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerIsCertifiedDigestIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerCertificationStatusByDigest(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/manageability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ManageabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ManageabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainersImageTag)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImageTag(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPortNameFormat)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerPortNameFormat(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PerformanceTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestExclusiveCPUPool(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRtAppNoExecProbes)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUs).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestRtAppsNoExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSharedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoNonGuaranteedPodContainersWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetNonGuaranteedPodContainersWithoutHostPID(), scheduling.SharedCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsolatedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLimitedUseOfExecProbesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestLimitedUseOfExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Running %s suite checks\", common.PreflightTestKey)", + "", + "\t// As the preflight lib's checks need to run here, we need to get the test environment now.", + "\tenv = provider.GetTestEnvironment()", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PreflightTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\ttestPreflightContainers(checksGroup, \u0026env)", + "\tif provider.IsOCPCluster() {", + "\t\tlog.Info(\"OCP cluster detected, allowing Preflight operator tests to run\")", + "\t\ttestPreflightOperators(checksGroup, \u0026env)", + "\t} else {", + "\t\tlog.Info(\"Skipping the Preflight operators test because it requires an OCP cluster to run against\")", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewChecksGroup(groupName string) *ChecksGroup {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\tif dbByGroup == nil {", + "\t\tdbByGroup = map[string]*ChecksGroup{}", + "\t}", + "", + "\tgroup, exists := dbByGroup[groupName]", + "\tif exists {", + "\t\treturn group", + "\t}", + "", + "\tgroup = \u0026ChecksGroup{", + "\t\tname: groupName,", + "\t\tchecks: []*Check{},", + "\t\tcurrentRunningCheckIdx: checkIdxNone,", + "\t}", + "\tdbByGroup[groupName] = group", + "", + "\treturn group", + "}" + ] + }, + { + "name": "RunChecks", + "qualifiedName": "RunChecks", + "exported": true, + "signature": "func(time.Duration)(int, error)", + "doc": "RunChecks Executes all check groups with timeout and signal handling\n\nThe function locks the database, starts a timeout timer, and listens for\nSIGINT or SIGTERM signals. It iterates over each check group, launching a\ngoroutine to run its checks while monitoring for aborts or timeouts. After\nexecution it records results, prints a summary table, logs failures, and\nreturns the count of failed checks or an error if any occurred.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:41", + "calls": [ + { + "name": "Lock", + "kind": "function" + }, + { + "name": "Unlock", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "After", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "os/signal", + "name": "Notify", + "kind": "function" + }, + { + "pkgPath": "os/signal", + "name": "Stop", + "kind": "function" + }, + { + "name": "ChecksGroup.OnAbort", + "kind": "function", + "source": [ + "func (group *ChecksGroup) OnAbort(abortReason string) error {", + "\t// If this wasn't the group with the aborted check.", + "\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\tfmt.Printf(\"Skipping checks from suite %s\\n\", strings.ToUpper(group.name))", + "\t}", + "", + "\tfor i, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tcheck.SetResultSkipped(\"not matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If none of this group's checks was running yet, skip all.", + "\t\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Abort the check that was running when it was aborted and skip the rest.", + "\t\tif i == group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultAborted(abortReason)", + "\t\t} else if i \u003e group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t}", + "", + "\t\tprintCheckResult(check)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "ChecksGroup.RecordChecksResults", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RecordChecksResults() {", + "\tlog.Info(\"Recording checks results of group %s\", group.name)", + "\tfor _, check := range group.checks {", + "\t\trecordCheckResult(check)", + "\t}", + "}" + ] + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "RunChecks", + "kind": "function", + "source": [ + "func RunChecks(timeout time.Duration) (failedCtr int, err error) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\t// Timeout channel", + "\ttimeOutChan := time.After(timeout)", + "\t// SIGINT(ctrl+c)/SIGTERM capture channel.", + "\tconst SIGINTBufferLen = 10", + "\tsigIntChan := make(chan os.Signal, SIGINTBufferLen)", + "\tsignal.Notify(sigIntChan, syscall.SIGINT, syscall.SIGTERM)", + "\t// turn off ctrl-c capture on exit", + "\tdefer signal.Stop(sigIntChan)", + "", + "\tabort := false", + "\tvar abortReason string", + "\tvar errs []error", + "\tfor _, group := range dbByGroup {", + "\t\tif abort {", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t\tgroup.RecordChecksResults()", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Stop channel, so we can send a stop signal to group.RunChecks()", + "\t\tstopChan := make(chan bool, 1)", + "\t\tabortChan := make(chan string, 1)", + "", + "\t\t// Done channel for the goroutine that runs group.RunChecks().", + "\t\tgroupDone := make(chan bool)", + "\t\tgo func() {", + "\t\t\tchecks, failedCheckCtr := group.RunChecks(stopChan, abortChan)", + "\t\t\tfailedCtr += failedCheckCtr", + "\t\t\terrs = append(errs, checks...)", + "\t\t\tgroupDone \u003c- true", + "\t\t}()", + "", + "\t\tselect {", + "\t\tcase \u003c-groupDone:", + "\t\t\tlog.Debug(\"Group %s finished running checks.\", group.name)", + "\t\tcase abortReason = \u003c-abortChan:", + "\t\t\tlog.Warn(\"Group %s aborted.\", group.name)", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-timeOutChan:", + "\t\t\tlog.Warn(\"Running all checks timed-out.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"global time-out\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-sigIntChan:", + "\t\t\tlog.Warn(\"SIGINT/SIGTERM received.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"SIGINT/SIGTERM\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t}", + "", + "\t\tgroup.RecordChecksResults()", + "\t}", + "", + "\t// Print the results in the CLI", + "\tcli.PrintResultsTable(getResultsSummary())", + "\tprintFailedChecksLog()", + "", + "\tif len(errs) \u003e 0 {", + "\t\tlog.Error(\"RunChecks errors: %v\", errs)", + "\t\treturn 0, fmt.Errorf(\"%d errors found in checks/groups\", len(errs))", + "\t}", + "", + "\treturn failedCtr, nil", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "ChecksGroup.OnAbort", + "kind": "function", + "source": [ + "func (group *ChecksGroup) OnAbort(abortReason string) error {", + "\t// If this wasn't the group with the aborted check.", + "\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\tfmt.Printf(\"Skipping checks from suite %s\\n\", strings.ToUpper(group.name))", + "\t}", + "", + "\tfor i, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tcheck.SetResultSkipped(\"not matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If none of this group's checks was running yet, skip all.", + "\t\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Abort the check that was running when it was aborted and skip the rest.", + "\t\tif i == group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultAborted(abortReason)", + "\t\t} else if i \u003e group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t}", + "", + "\t\tprintCheckResult(check)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "ChecksGroup.OnAbort", + "kind": "function", + "source": [ + "func (group *ChecksGroup) OnAbort(abortReason string) error {", + "\t// If this wasn't the group with the aborted check.", + "\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\tfmt.Printf(\"Skipping checks from suite %s\\n\", strings.ToUpper(group.name))", + "\t}", + "", + "\tfor i, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tcheck.SetResultSkipped(\"not matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If none of this group's checks was running yet, skip all.", + "\t\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Abort the check that was running when it was aborted and skip the rest.", + "\t\tif i == group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultAborted(abortReason)", + "\t\t} else if i \u003e group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t}", + "", + "\t\tprintCheckResult(check)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "ChecksGroup.OnAbort", + "kind": "function", + "source": [ + "func (group *ChecksGroup) OnAbort(abortReason string) error {", + "\t// If this wasn't the group with the aborted check.", + "\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\tfmt.Printf(\"Skipping checks from suite %s\\n\", strings.ToUpper(group.name))", + "\t}", + "", + "\tfor i, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tcheck.SetResultSkipped(\"not matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If none of this group's checks was running yet, skip all.", + "\t\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Abort the check that was running when it was aborted and skip the rest.", + "\t\tif i == group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultAborted(abortReason)", + "\t\t} else if i \u003e group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t}", + "", + "\t\tprintCheckResult(check)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "ChecksGroup.RecordChecksResults", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RecordChecksResults() {", + "\tlog.Info(\"Recording checks results of group %s\", group.name)", + "\tfor _, check := range group.checks {", + "\t\trecordCheckResult(check)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintResultsTable", + "kind": "function", + "source": [ + "func PrintResultsTable(results map[string][]int) {", + "\tfmt.Printf(\"\\n\")", + "\tfmt.Println(\"-----------------------------------------------------------\")", + "\tfmt.Printf(\"| %-27s %-9s %-9s %s |\\n\", \"SUITE\", \"PASSED\", \"FAILED\", \"SKIPPED\")", + "\tfmt.Println(\"-----------------------------------------------------------\")", + "\tfor groupName, groupResults := range results {", + "\t\tfmt.Printf(\"| %-25s %8d %9d %10d |\\n\", groupName,", + "\t\t\tgroupResults[0],", + "\t\t\tgroupResults[1],", + "\t\t\tgroupResults[2])", + "\t\tfmt.Println(\"-----------------------------------------------------------\")", + "\t}", + "\tfmt.Printf(\"\\n\")", + "}" + ] + }, + { + "name": "getResultsSummary", + "kind": "function", + "source": [ + "func getResultsSummary() map[string][]int {", + "\tresults := make(map[string][]int)", + "\tfor groupName, group := range dbByGroup {", + "\t\tgroupResults := []int{0, 0, 0}", + "\t\tfor _, check := range group.checks {", + "\t\t\tswitch check.Result {", + "\t\t\tcase CheckResultPassed:", + "\t\t\t\tgroupResults[PASSED]++", + "\t\t\tcase CheckResultFailed:", + "\t\t\t\tgroupResults[FAILED]++", + "\t\t\tcase CheckResultSkipped:", + "\t\t\t\tgroupResults[SKIPPED]++", + "\t\t\t}", + "\t\t}", + "\t\tresults[groupName] = groupResults", + "\t}", + "\treturn results", + "}" + ] + }, + { + "name": "printFailedChecksLog", + "kind": "function", + "source": [ + "func printFailedChecksLog() {", + "\tfor _, group := range dbByGroup {", + "\t\tfor _, check := range group.checks {", + "\t\t\tif check.Result != CheckResultFailed {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tlogHeader := fmt.Sprintf(\"| \"+cli.Cyan+\"LOG (%s)\"+cli.Reset+\" |\", check.ID)", + "\t\t\tnbSymbols := utf8.RuneCountInString(logHeader) - nbColorSymbols", + "\t\t\tfmt.Println(strings.Repeat(\"-\", nbSymbols))", + "\t\t\tfmt.Println(logHeader)", + "\t\t\tfmt.Println(strings.Repeat(\"-\", nbSymbols))", + "\t\t\tcheckLogs := check.GetLogs()", + "\t\t\tif checkLogs == \"\" {", + "\t\t\t\tfmt.Println(\"Empty log output\")", + "\t\t\t} else {", + "\t\t\t\tfmt.Println(checkLogs)", + "\t\t\t}", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "RunChecks", + "kind": "function", + "source": [ + "func RunChecks(timeout time.Duration) (failedCtr int, err error) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\t// Timeout channel", + "\ttimeOutChan := time.After(timeout)", + "\t// SIGINT(ctrl+c)/SIGTERM capture channel.", + "\tconst SIGINTBufferLen = 10", + "\tsigIntChan := make(chan os.Signal, SIGINTBufferLen)", + "\tsignal.Notify(sigIntChan, syscall.SIGINT, syscall.SIGTERM)", + "\t// turn off ctrl-c capture on exit", + "\tdefer signal.Stop(sigIntChan)", + "", + "\tabort := false", + "\tvar abortReason string", + "\tvar errs []error", + "\tfor _, group := range dbByGroup {", + "\t\tif abort {", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t\tgroup.RecordChecksResults()", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Stop channel, so we can send a stop signal to group.RunChecks()", + "\t\tstopChan := make(chan bool, 1)", + "\t\tabortChan := make(chan string, 1)", + "", + "\t\t// Done channel for the goroutine that runs group.RunChecks().", + "\t\tgroupDone := make(chan bool)", + "\t\tgo func() {", + "\t\t\tchecks, failedCheckCtr := group.RunChecks(stopChan, abortChan)", + "\t\t\tfailedCtr += failedCheckCtr", + "\t\t\terrs = append(errs, checks...)", + "\t\t\tgroupDone \u003c- true", + "\t\t}()", + "", + "\t\tselect {", + "\t\tcase \u003c-groupDone:", + "\t\t\tlog.Debug(\"Group %s finished running checks.\", group.name)", + "\t\tcase abortReason = \u003c-abortChan:", + "\t\t\tlog.Warn(\"Group %s aborted.\", group.name)", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-timeOutChan:", + "\t\t\tlog.Warn(\"Running all checks timed-out.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"global time-out\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-sigIntChan:", + "\t\t\tlog.Warn(\"SIGINT/SIGTERM received.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"SIGINT/SIGTERM\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t}", + "", + "\t\tgroup.RecordChecksResults()", + "\t}", + "", + "\t// Print the results in the CLI", + "\tcli.PrintResultsTable(getResultsSummary())", + "\tprintFailedChecksLog()", + "", + "\tif len(errs) \u003e 0 {", + "\t\tlog.Error(\"RunChecks errors: %v\", errs)", + "\t\treturn 0, fmt.Errorf(\"%d errors found in checks/groups\", len(errs))", + "\t}", + "", + "\treturn failedCtr, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func RunChecks(timeout time.Duration) (failedCtr int, err error) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\t// Timeout channel", + "\ttimeOutChan := time.After(timeout)", + "\t// SIGINT(ctrl+c)/SIGTERM capture channel.", + "\tconst SIGINTBufferLen = 10", + "\tsigIntChan := make(chan os.Signal, SIGINTBufferLen)", + "\tsignal.Notify(sigIntChan, syscall.SIGINT, syscall.SIGTERM)", + "\t// turn off ctrl-c capture on exit", + "\tdefer signal.Stop(sigIntChan)", + "", + "\tabort := false", + "\tvar abortReason string", + "\tvar errs []error", + "\tfor _, group := range dbByGroup {", + "\t\tif abort {", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t\tgroup.RecordChecksResults()", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Stop channel, so we can send a stop signal to group.RunChecks()", + "\t\tstopChan := make(chan bool, 1)", + "\t\tabortChan := make(chan string, 1)", + "", + "\t\t// Done channel for the goroutine that runs group.RunChecks().", + "\t\tgroupDone := make(chan bool)", + "\t\tgo func() {", + "\t\t\tchecks, failedCheckCtr := group.RunChecks(stopChan, abortChan)", + "\t\t\tfailedCtr += failedCheckCtr", + "\t\t\terrs = append(errs, checks...)", + "\t\t\tgroupDone \u003c- true", + "\t\t}()", + "", + "\t\tselect {", + "\t\tcase \u003c-groupDone:", + "\t\t\tlog.Debug(\"Group %s finished running checks.\", group.name)", + "\t\tcase abortReason = \u003c-abortChan:", + "\t\t\tlog.Warn(\"Group %s aborted.\", group.name)", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-timeOutChan:", + "\t\t\tlog.Warn(\"Running all checks timed-out.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"global time-out\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-sigIntChan:", + "\t\t\tlog.Warn(\"SIGINT/SIGTERM received.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"SIGINT/SIGTERM\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t}", + "", + "\t\tgroup.RecordChecksResults()", + "\t}", + "", + "\t// Print the results in the CLI", + "\tcli.PrintResultsTable(getResultsSummary())", + "\tprintFailedChecksLog()", + "", + "\tif len(errs) \u003e 0 {", + "\t\tlog.Error(\"RunChecks errors: %v\", errs)", + "\t\treturn 0, fmt.Errorf(\"%d errors found in checks/groups\", len(errs))", + "\t}", + "", + "\treturn failedCtr, nil", + "}" + ] + }, + { + "name": "getResultsSummary", + "qualifiedName": "getResultsSummary", + "exported": false, + "signature": "func()(map[string][]int)", + "doc": "getResultsSummary generates a table of check results per group\n\nThis function builds a map where each key is the name of a check group and\nthe value is a slice of three integers counting passed, failed, and skipped\nchecks. It iterates over all groups in the database, tallies results for each\ncheck according to its status, and stores the counts. The resulting map is\nreturned for use by the CLI output.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:190", + "calls": [ + { + "name": "make", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "RunChecks", + "kind": "function", + "source": [ + "func RunChecks(timeout time.Duration) (failedCtr int, err error) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\t// Timeout channel", + "\ttimeOutChan := time.After(timeout)", + "\t// SIGINT(ctrl+c)/SIGTERM capture channel.", + "\tconst SIGINTBufferLen = 10", + "\tsigIntChan := make(chan os.Signal, SIGINTBufferLen)", + "\tsignal.Notify(sigIntChan, syscall.SIGINT, syscall.SIGTERM)", + "\t// turn off ctrl-c capture on exit", + "\tdefer signal.Stop(sigIntChan)", + "", + "\tabort := false", + "\tvar abortReason string", + "\tvar errs []error", + "\tfor _, group := range dbByGroup {", + "\t\tif abort {", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t\tgroup.RecordChecksResults()", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Stop channel, so we can send a stop signal to group.RunChecks()", + "\t\tstopChan := make(chan bool, 1)", + "\t\tabortChan := make(chan string, 1)", + "", + "\t\t// Done channel for the goroutine that runs group.RunChecks().", + "\t\tgroupDone := make(chan bool)", + "\t\tgo func() {", + "\t\t\tchecks, failedCheckCtr := group.RunChecks(stopChan, abortChan)", + "\t\t\tfailedCtr += failedCheckCtr", + "\t\t\terrs = append(errs, checks...)", + "\t\t\tgroupDone \u003c- true", + "\t\t}()", + "", + "\t\tselect {", + "\t\tcase \u003c-groupDone:", + "\t\t\tlog.Debug(\"Group %s finished running checks.\", group.name)", + "\t\tcase abortReason = \u003c-abortChan:", + "\t\t\tlog.Warn(\"Group %s aborted.\", group.name)", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-timeOutChan:", + "\t\t\tlog.Warn(\"Running all checks timed-out.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"global time-out\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-sigIntChan:", + "\t\t\tlog.Warn(\"SIGINT/SIGTERM received.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"SIGINT/SIGTERM\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t}", + "", + "\t\tgroup.RecordChecksResults()", + "\t}", + "", + "\t// Print the results in the CLI", + "\tcli.PrintResultsTable(getResultsSummary())", + "\tprintFailedChecksLog()", + "", + "\tif len(errs) \u003e 0 {", + "\t\tlog.Error(\"RunChecks errors: %v\", errs)", + "\t\treturn 0, fmt.Errorf(\"%d errors found in checks/groups\", len(errs))", + "\t}", + "", + "\treturn failedCtr, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getResultsSummary() map[string][]int {", + "\tresults := make(map[string][]int)", + "\tfor groupName, group := range dbByGroup {", + "\t\tgroupResults := []int{0, 0, 0}", + "\t\tfor _, check := range group.checks {", + "\t\t\tswitch check.Result {", + "\t\t\tcase CheckResultPassed:", + "\t\t\t\tgroupResults[PASSED]++", + "\t\t\tcase CheckResultFailed:", + "\t\t\t\tgroupResults[FAILED]++", + "\t\t\tcase CheckResultSkipped:", + "\t\t\t\tgroupResults[SKIPPED]++", + "\t\t\t}", + "\t\t}", + "\t\tresults[groupName] = groupResults", + "\t}", + "\treturn results", + "}" + ] + }, + { + "name": "onFailure", + "qualifiedName": "onFailure", + "exported": false, + "signature": "func(string, string, *ChecksGroup, *Check, []*Check)(error)", + "doc": "onFailure Handles a failure during group or check execution\n\nWhen a before/after or check function fails, this routine marks the current\ncheck as an error with a descriptive message. It then skips all remaining\nchecks in the same group using a concise skip reason. Finally it returns a\ngeneric error that indicates which failure type occurred.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:158", + "calls": [ + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "name": "Check.SetResultError", + "kind": "function", + "source": [ + "func (check *Check) SetResultError(reason string) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tif check.Result == CheckResultAborted {", + "\t\treturn", + "\t}", + "", + "\tif check.Result == CheckResultError {", + "\t\tcheck.LogWarn(\"Check %s result was already marked as error.\", check.ID)", + "\t\treturn", + "\t}", + "\tcheck.Result = CheckResultError", + "\tcheck.skipReason = reason", + "}" + ] + }, + { + "name": "skipAll", + "kind": "function", + "source": [ + "func skipAll(checks []*Check, reason string) {", + "\tfor _, check := range checks {", + "\t\tskipCheck(check, reason)", + "\t}", + "}" + ] + }, + { + "pkgPath": "errors", + "name": "New", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runAfterAllFn", + "kind": "function", + "source": [ + "func runAfterAllFn(group *ChecksGroup, checks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running afterAll\", group.name)", + "", + "\tif group.afterAllFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tlastCheck := checks[len(checks)-1]", + "\tzeroRemainingChecks := []*Check{}", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running afterAll function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"afterAll function panicked\", \"\\n: \"+stackTrace, group, lastCheck, zeroRemainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.afterAllFn(group.checks); err != nil {", + "\t\tlog.Error(\"Unexpected error while running afterAll function: %v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"afterAll function unexpected error\", err.Error(), group, lastCheck, zeroRemainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runAfterEachFn", + "kind": "function", + "source": [ + "func runAfterEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running afterEach for check %s\", group.name, check.ID)", + "", + "\tif group.afterEachFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running afterEach function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"afterEach function panicked\", \"\\n: \"+stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.afterEachFn(check); err != nil {", + "\t\tlog.Error(\"Unexpected error while running afterEach function:\\n%v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"afterEach function unexpected error\", err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runBeforeAllFn", + "kind": "function", + "source": [ + "func runBeforeAllFn(group *ChecksGroup, checks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running beforeAll\", group.name)", + "\tif group.beforeAllFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tfirstCheck := checks[0]", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running beforeAll function:\\n%v\", stackTrace)", + "\t\t\t// Set first check's result as error and skip the remaining ones.", + "\t\t\terr = onFailure(\"beforeAll function panicked\", \"\\n:\"+stackTrace, group, firstCheck, checks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.beforeAllFn(checks); err != nil {", + "\t\tlog.Error(\"Unexpected error while running beforeAll function: %v\", err)", + "\t\t// Set first check's result as error and skip the remaining ones.", + "\t\treturn onFailure(\"beforeAll function unexpected error\", err.Error(), group, firstCheck, checks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runBeforeEachFn", + "kind": "function", + "source": [ + "func runBeforeEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running beforeEach for check %s\", group.name, check.ID)", + "\tif group.beforeEachFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running beforeEach function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"beforeEach function panicked\", \"\\n: \"+stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.beforeEachFn(check); err != nil {", + "\t\tlog.Error(\"Unexpected error while running beforeEach function:\\n%v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"beforeEach function unexpected error\", err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "runCheck", + "kind": "function", + "source": [ + "func runCheck(check *Check, group *ChecksGroup, remainingChecks []*Check) (err error) {", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\t// Don't do anything in case the check was manually aborted by check.Abort().", + "\t\t\tif msg, ok := r.(AbortPanicMsg); ok {", + "\t\t\t\tlog.Warn(\"Check was manually aborted, msg: %v\", msg)", + "\t\t\t\terr = fmt.Errorf(\"%v\", msg)", + "\t\t\t\treturn", + "\t\t\t}", + "", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "", + "\t\t\tcheck.LogError(\"Panic while running check %s function:\\n%v\", check.ID, stackTrace)", + "\t\t\terr = onFailure(fmt.Sprintf(\"check %s function panic\", check.ID), stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := check.Run(); err != nil {", + "\t\tcheck.LogError(\"Unexpected error while running check %s function: %v\", check.ID, err.Error())", + "\t\treturn onFailure(fmt.Sprintf(\"check %s function unexpected error\", check.ID), err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + }, + { + "name": "printCheckResult", + "qualifiedName": "printCheckResult", + "exported": false, + "signature": "func(*Check)()", + "doc": "printCheckResult Displays the final status of a check\n\nThe function examines the result field of a check object and calls an\nappropriate CLI helper to print a formatted message indicating pass, fail,\nskip, abort or error. It uses the check's ID and any skip reason when\nrelevant, ensuring that the output line is cleared before printing.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:450", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckPassed", + "kind": "function", + "source": [ + "func PrintCheckPassed(checkName string) {", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagPass + \" ] \" + checkName + \"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckFailed", + "kind": "function", + "source": [ + "func PrintCheckFailed(checkName string) {", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagFail + \" ] \" + checkName + \"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckSkipped", + "kind": "function", + "source": [ + "func PrintCheckSkipped(checkName, reason string) {", + "\t// It shouldn't happen too often, but some checks might be set as skipped inside the checkFn", + "\t// if neither compliant objects nor non-compliant objects were found.", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagSkip + \" ] \" + checkName + \" (\" + reason + \")\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckAborted", + "kind": "function", + "source": [ + "func PrintCheckAborted(checkName, reason string) {", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagAborted + \" ] \" + checkName + \" (\" + reason + \")\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli", + "name": "PrintCheckErrored", + "kind": "function", + "source": [ + "func PrintCheckErrored(checkName string) {", + "\tstopCheckLineGoroutine()", + "", + "\tfmt.Print(ClearLineCode + \"[ \" + CheckResultTagError + \" ] \" + checkName + \"\\n\")", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.Run", + "kind": "function", + "source": [ + "func (check *Check) Run() error {", + "\tif check == nil {", + "\t\treturn fmt.Errorf(\"check is a nil pointer\")", + "\t}", + "", + "\tif check.Error != nil {", + "\t\treturn fmt.Errorf(\"unable to run due to a previously existing error: %v\", check.Error)", + "\t}", + "", + "\tcli.PrintCheckRunning(check.ID)", + "", + "\tcheck.StartTime = time.Now()", + "\tdefer func() {", + "\t\tcheck.EndTime = time.Now()", + "\t}()", + "", + "\tcheck.LogInfo(\"Running check (labels: %v)\", check.Labels)", + "\tif check.BeforeCheckFn != nil {", + "\t\tif err := check.BeforeCheckFn(check); err != nil {", + "\t\t\treturn fmt.Errorf(\"check %s failed in before check function: %v\", check.ID, err)", + "\t\t}", + "\t}", + "", + "\tif err := check.CheckFn(check); err != nil {", + "\t\treturn fmt.Errorf(\"check %s failed in check function: %v\", check.ID, err)", + "\t}", + "", + "\tif check.AfterCheckFn != nil {", + "\t\tif err := check.AfterCheckFn(check); err != nil {", + "\t\t\treturn fmt.Errorf(\"check %s failed in after check function: %v\", check.ID, err)", + "\t\t}", + "\t}", + "", + "\tprintCheckResult(check)", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.OnAbort", + "kind": "function", + "source": [ + "func (group *ChecksGroup) OnAbort(abortReason string) error {", + "\t// If this wasn't the group with the aborted check.", + "\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\tfmt.Printf(\"Skipping checks from suite %s\\n\", strings.ToUpper(group.name))", + "\t}", + "", + "\tfor i, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tcheck.SetResultSkipped(\"not matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If none of this group's checks was running yet, skip all.", + "\t\tif group.currentRunningCheckIdx == checkIdxNone {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Abort the check that was running when it was aborted and skip the rest.", + "\t\tif i == group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultAborted(abortReason)", + "\t\t} else if i \u003e group.currentRunningCheckIdx {", + "\t\t\tcheck.SetResultSkipped(abortReason)", + "\t\t}", + "", + "\t\tprintCheckResult(check)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "skipCheck", + "kind": "function", + "source": [ + "func skipCheck(check *Check, reason string) {", + "\tcheck.LogInfo(\"Skipping check %s, reason: %s\", check.ID, reason)", + "\tcheck.SetResultSkipped(reason)", + "\tprintCheckResult(check)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func printCheckResult(check *Check) {", + "\tswitch check.Result {", + "\tcase CheckResultPassed:", + "\t\tcli.PrintCheckPassed(check.ID)", + "\tcase CheckResultFailed:", + "\t\tcli.PrintCheckFailed(check.ID)", + "\tcase CheckResultSkipped:", + "\t\tcli.PrintCheckSkipped(check.ID, check.skipReason)", + "\tcase CheckResultAborted:", + "\t\tcli.PrintCheckAborted(check.ID, check.skipReason)", + "\tcase CheckResultError:", + "\t\tcli.PrintCheckErrored(check.ID)", + "\t}", + "}" + ] + }, + { + "name": "printFailedChecksLog", + "qualifiedName": "printFailedChecksLog", + "exported": false, + "signature": "func()()", + "doc": "printFailedChecksLog Displays logs for checks that failed\n\nThis function iterates over all check groups and their individual checks,\nprinting a formatted header and the log content only for those that did not\nsucceed. For each failed check it calculates the appropriate number of dashes\nto align the header, prints separators, the colored header indicating the\ncheck ID, and then either the captured log or a message if no output was\nrecorded. The function writes directly to standard output using fmt.Println.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:219", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "unicode/utf8", + "name": "RuneCountInString", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Repeat", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Repeat", + "kind": "function" + }, + { + "name": "Check.GetLogs", + "kind": "function", + "source": [ + "func (check *Check) GetLogs() string {", + "\treturn check.logArchive.String()", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "RunChecks", + "kind": "function", + "source": [ + "func RunChecks(timeout time.Duration) (failedCtr int, err error) {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\t// Timeout channel", + "\ttimeOutChan := time.After(timeout)", + "\t// SIGINT(ctrl+c)/SIGTERM capture channel.", + "\tconst SIGINTBufferLen = 10", + "\tsigIntChan := make(chan os.Signal, SIGINTBufferLen)", + "\tsignal.Notify(sigIntChan, syscall.SIGINT, syscall.SIGTERM)", + "\t// turn off ctrl-c capture on exit", + "\tdefer signal.Stop(sigIntChan)", + "", + "\tabort := false", + "\tvar abortReason string", + "\tvar errs []error", + "\tfor _, group := range dbByGroup {", + "\t\tif abort {", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t\tgroup.RecordChecksResults()", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Stop channel, so we can send a stop signal to group.RunChecks()", + "\t\tstopChan := make(chan bool, 1)", + "\t\tabortChan := make(chan string, 1)", + "", + "\t\t// Done channel for the goroutine that runs group.RunChecks().", + "\t\tgroupDone := make(chan bool)", + "\t\tgo func() {", + "\t\t\tchecks, failedCheckCtr := group.RunChecks(stopChan, abortChan)", + "\t\t\tfailedCtr += failedCheckCtr", + "\t\t\terrs = append(errs, checks...)", + "\t\t\tgroupDone \u003c- true", + "\t\t}()", + "", + "\t\tselect {", + "\t\tcase \u003c-groupDone:", + "\t\t\tlog.Debug(\"Group %s finished running checks.\", group.name)", + "\t\tcase abortReason = \u003c-abortChan:", + "\t\t\tlog.Warn(\"Group %s aborted.\", group.name)", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-timeOutChan:", + "\t\t\tlog.Warn(\"Running all checks timed-out.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"global time-out\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\tcase \u003c-sigIntChan:", + "\t\t\tlog.Warn(\"SIGINT/SIGTERM received.\")", + "\t\t\tstopChan \u003c- true", + "", + "\t\t\tabort = true", + "\t\t\tabortReason = \"SIGINT/SIGTERM\"", + "\t\t\t_ = group.OnAbort(abortReason)", + "\t\t}", + "", + "\t\tgroup.RecordChecksResults()", + "\t}", + "", + "\t// Print the results in the CLI", + "\tcli.PrintResultsTable(getResultsSummary())", + "\tprintFailedChecksLog()", + "", + "\tif len(errs) \u003e 0 {", + "\t\tlog.Error(\"RunChecks errors: %v\", errs)", + "\t\treturn 0, fmt.Errorf(\"%d errors found in checks/groups\", len(errs))", + "\t}", + "", + "\treturn failedCtr, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func printFailedChecksLog() {", + "\tfor _, group := range dbByGroup {", + "\t\tfor _, check := range group.checks {", + "\t\t\tif check.Result != CheckResultFailed {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tlogHeader := fmt.Sprintf(\"| \"+cli.Cyan+\"LOG (%s)\"+cli.Reset+\" |\", check.ID)", + "\t\t\tnbSymbols := utf8.RuneCountInString(logHeader) - nbColorSymbols", + "\t\t\tfmt.Println(strings.Repeat(\"-\", nbSymbols))", + "\t\t\tfmt.Println(logHeader)", + "\t\t\tfmt.Println(strings.Repeat(\"-\", nbSymbols))", + "\t\t\tcheckLogs := check.GetLogs()", + "\t\t\tif checkLogs == \"\" {", + "\t\t\t\tfmt.Println(\"Empty log output\")", + "\t\t\t} else {", + "\t\t\t\tfmt.Println(checkLogs)", + "\t\t\t}", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "recordCheckResult", + "qualifiedName": "recordCheckResult", + "exported": false, + "signature": "func(*Check)()", + "doc": "recordCheckResult Stores the check result in the results database\n\nThe function looks up a claim ID for a given test, logs debugging information\nif none is found, and otherwise records various fields such as state,\ntimestamps, duration, skip reason, captured output, details, category\nclassification, and catalog metadata into the global resultsDB map. It\nformats strings to uppercase for logging and calculates duration in seconds\nfrom start and end times.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:125", + "calls": [ + { + "name": "Check.LogDebug", + "kind": "function", + "source": [ + "func (check *Check) LogDebug(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Check.LogInfo", + "kind": "function", + "source": [ + "func (check *Check) LogInfo(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "ToUpper", + "kind": "function" + }, + { + "name": "CheckResult.String", + "kind": "function", + "source": [ + "func (cr CheckResult) String() string {", + "\treturn string(cr)", + "}" + ] + }, + { + "name": "CheckResult.String", + "kind": "function", + "source": [ + "func (cr CheckResult) String() string {", + "\treturn string(cr)", + "}" + ] + }, + { + "name": "CheckResult.String", + "kind": "function", + "source": [ + "func (cr CheckResult) String() string {", + "\treturn string(cr)", + "}" + ] + }, + { + "name": "CheckResult.String", + "kind": "function", + "source": [ + "func (cr CheckResult) String() string {", + "\treturn string(cr)", + "}" + ] + }, + { + "name": "int", + "kind": "function" + }, + { + "name": "Seconds", + "kind": "function" + }, + { + "name": "Sub", + "kind": "function" + }, + { + "name": "Check.GetLogs", + "kind": "function", + "source": [ + "func (check *Check) GetLogs() string {", + "\treturn check.logArchive.String()", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.RecordChecksResults", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RecordChecksResults() {", + "\tlog.Info(\"Recording checks results of group %s\", group.name)", + "\tfor _, check := range group.checks {", + "\t\trecordCheckResult(check)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func recordCheckResult(check *Check) {", + "\tclaimID, ok := identifiers.TestIDToClaimID[check.ID]", + "\tif !ok {", + "\t\tcheck.LogDebug(\"TestID %s has no corresponding Claim ID - skipping result recording\", check.ID)", + "\t\treturn", + "\t}", + "", + "\tcheck.LogInfo(\"Recording result %q, claimID: %+v\", strings.ToUpper(check.Result.String()), claimID)", + "\tresultsDB[check.ID] = claim.Result{", + "\t\tTestID: \u0026claimID,", + "\t\tState: check.Result.String(),", + "\t\tStartTime: check.StartTime.String(),", + "\t\tEndTime: check.EndTime.String(),", + "\t\tDuration: int(check.EndTime.Sub(check.StartTime).Seconds()),", + "\t\tSkipReason: check.skipReason,", + "\t\tCapturedTestOutput: check.GetLogs(),", + "\t\tCheckDetails: check.details,", + "", + "\t\tCategoryClassification: \u0026claim.CategoryClassification{", + "\t\t\tExtended: identifiers.Catalog[claimID].CategoryClassification[identifiers.Extended],", + "\t\t\tFarEdge: identifiers.Catalog[claimID].CategoryClassification[identifiers.FarEdge],", + "\t\t\tNonTelco: identifiers.Catalog[claimID].CategoryClassification[identifiers.NonTelco],", + "\t\t\tTelco: identifiers.Catalog[claimID].CategoryClassification[identifiers.Telco]},", + "\t\tCatalogInfo: \u0026claim.CatalogInfo{", + "\t\t\tDescription: identifiers.Catalog[claimID].Description,", + "\t\t\tRemediation: identifiers.Catalog[claimID].Remediation,", + "\t\t\tBestPracticeReference: identifiers.Catalog[claimID].BestPracticeReference,", + "\t\t\tExceptionProcess: identifiers.Catalog[claimID].ExceptionProcess,", + "\t\t},", + "\t}", + "}" + ] + }, + { + "name": "runAfterAllFn", + "qualifiedName": "runAfterAllFn", + "exported": false, + "signature": "func(*ChecksGroup, []*Check)(error)", + "doc": "runAfterAllFn Executes the group's final cleanup routine\n\nWhen a checks group has finished running all its checks, this function\ninvokes any registered afterAll hook with the entire list of checks. It logs\nthe start and handles both panics and returned errors by marking the last\nexecuted check as failed and preventing further actions. The result is an\nerror if the cleanup fails; otherwise nil.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:207", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "recover", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "runtime/debug", + "name": "Stack", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "onFailure", + "kind": "function", + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + }, + { + "name": "afterAllFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "onFailure", + "kind": "function", + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.RunChecks", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RunChecks(stopChan \u003c-chan bool, abortChan chan string) (errs []error, failedChecks int) {", + "\tlog.Info(\"Running group %q checks.\", group.name)", + "\tfmt.Printf(\"Running suite %s\\n\", strings.ToUpper(group.name))", + "", + "\t// Get checks to run based on the label expr.", + "\tchecks := []*Check{}", + "\tfor _, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tskipCheck(check, \"no matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tchecks = append(checks, check)", + "\t}", + "", + "\tif len(checks) == 0 {", + "\t\treturn nil, 0", + "\t}", + "", + "\t// Run afterAllFn always, no matter previous panics/crashes.", + "\tdefer func() {", + "\t\tif err := runAfterAllFn(group, checks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "\t}()", + "", + "\tif err := runBeforeAllFn(group, checks); err != nil {", + "\t\terrs = append(errs, err)", + "\t\treturn errs, 0", + "\t}", + "", + "\tlog.Info(\"Checks to run: %d (group's total=%d)\", len(checks), len(group.checks))", + "\tgroup.currentRunningCheckIdx = 0", + "\tfor i, check := range checks {", + "\t\t// Fast stop in case the stop (abort/timeout) signal received.", + "\t\tselect {", + "\t\tcase \u003c-stopChan:", + "\t\t\treturn nil, 0", + "\t\tdefault:", + "\t\t}", + "", + "\t\t// Create a remainingChecks list excluding the current check.", + "\t\tremainingChecks := []*Check{}", + "\t\tif i+1 \u003c len(checks) {", + "\t\t\tremainingChecks = checks[i+1:]", + "\t\t}", + "", + "\t\tif err := runBeforeEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = []error{err}", + "\t\t}", + "", + "\t\tif len(errs) == 0 {", + "\t\t\t// Should we skip this check?", + "\t\t\tskip, reasons := shouldSkipCheck(check)", + "\t\t\tif skip {", + "\t\t\t\tskipCheck(check, strings.Join(reasons, \", \"))", + "\t\t\t} else {", + "\t\t\t\tcheck.SetAbortChan(abortChan) // Set the abort channel for the check.", + "\t\t\t\terr := runCheck(check, group, remainingChecks)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\terrs = append(errs, err)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// afterEach func must run even if the check was skipped or panicked/unexpected error.", + "\t\tif err := runAfterEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "", + "\t\t// Don't run more checks if any of beforeEach, the checkFn or afterEach functions errored/panicked.", + "\t\tif len(errs) \u003e 0 {", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Increment the failed checks counter.", + "\t\tif check.Result.String() == CheckResultFailed {", + "\t\t\tfailedChecks++", + "\t\t}", + "", + "\t\tgroup.currentRunningCheckIdx++", + "\t}", + "", + "\treturn errs, failedChecks", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func runAfterAllFn(group *ChecksGroup, checks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running afterAll\", group.name)", + "", + "\tif group.afterAllFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tlastCheck := checks[len(checks)-1]", + "\tzeroRemainingChecks := []*Check{}", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running afterAll function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"afterAll function panicked\", \"\\n: \"+stackTrace, group, lastCheck, zeroRemainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.afterAllFn(group.checks); err != nil {", + "\t\tlog.Error(\"Unexpected error while running afterAll function: %v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"afterAll function unexpected error\", err.Error(), group, lastCheck, zeroRemainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "runAfterEachFn", + "qualifiedName": "runAfterEachFn", + "exported": false, + "signature": "func(*ChecksGroup, *Check, []*Check)(error)", + "doc": "runAfterEachFn Handles post‑check cleanup and error reporting\n\nThis routine runs a group's afterEach function for each check, logging its\nstart and capturing any panic or returned error. If the function panics, it\nlogs the stack trace and marks the current check as failed without skipping\nsubsequent checks. On a normal error, it reports the issue, sets the check\nresult to an error state, and returns that error.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:272", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "recover", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "runtime/debug", + "name": "Stack", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "onFailure", + "kind": "function", + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + }, + { + "name": "afterEachFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "onFailure", + "kind": "function", + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.RunChecks", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RunChecks(stopChan \u003c-chan bool, abortChan chan string) (errs []error, failedChecks int) {", + "\tlog.Info(\"Running group %q checks.\", group.name)", + "\tfmt.Printf(\"Running suite %s\\n\", strings.ToUpper(group.name))", + "", + "\t// Get checks to run based on the label expr.", + "\tchecks := []*Check{}", + "\tfor _, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tskipCheck(check, \"no matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tchecks = append(checks, check)", + "\t}", + "", + "\tif len(checks) == 0 {", + "\t\treturn nil, 0", + "\t}", + "", + "\t// Run afterAllFn always, no matter previous panics/crashes.", + "\tdefer func() {", + "\t\tif err := runAfterAllFn(group, checks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "\t}()", + "", + "\tif err := runBeforeAllFn(group, checks); err != nil {", + "\t\terrs = append(errs, err)", + "\t\treturn errs, 0", + "\t}", + "", + "\tlog.Info(\"Checks to run: %d (group's total=%d)\", len(checks), len(group.checks))", + "\tgroup.currentRunningCheckIdx = 0", + "\tfor i, check := range checks {", + "\t\t// Fast stop in case the stop (abort/timeout) signal received.", + "\t\tselect {", + "\t\tcase \u003c-stopChan:", + "\t\t\treturn nil, 0", + "\t\tdefault:", + "\t\t}", + "", + "\t\t// Create a remainingChecks list excluding the current check.", + "\t\tremainingChecks := []*Check{}", + "\t\tif i+1 \u003c len(checks) {", + "\t\t\tremainingChecks = checks[i+1:]", + "\t\t}", + "", + "\t\tif err := runBeforeEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = []error{err}", + "\t\t}", + "", + "\t\tif len(errs) == 0 {", + "\t\t\t// Should we skip this check?", + "\t\t\tskip, reasons := shouldSkipCheck(check)", + "\t\t\tif skip {", + "\t\t\t\tskipCheck(check, strings.Join(reasons, \", \"))", + "\t\t\t} else {", + "\t\t\t\tcheck.SetAbortChan(abortChan) // Set the abort channel for the check.", + "\t\t\t\terr := runCheck(check, group, remainingChecks)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\terrs = append(errs, err)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// afterEach func must run even if the check was skipped or panicked/unexpected error.", + "\t\tif err := runAfterEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "", + "\t\t// Don't run more checks if any of beforeEach, the checkFn or afterEach functions errored/panicked.", + "\t\tif len(errs) \u003e 0 {", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Increment the failed checks counter.", + "\t\tif check.Result.String() == CheckResultFailed {", + "\t\t\tfailedChecks++", + "\t\t}", + "", + "\t\tgroup.currentRunningCheckIdx++", + "\t}", + "", + "\treturn errs, failedChecks", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func runAfterEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running afterEach for check %s\", group.name, check.ID)", + "", + "\tif group.afterEachFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running afterEach function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"afterEach function panicked\", \"\\n: \"+stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.afterEachFn(check); err != nil {", + "\t\tlog.Error(\"Unexpected error while running afterEach function:\\n%v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"afterEach function unexpected error\", err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "runBeforeAllFn", + "qualifiedName": "runBeforeAllFn", + "exported": false, + "signature": "func(*ChecksGroup, []*Check)(error)", + "doc": "runBeforeAllFn Executes a group-wide setup routine before any checks run\n\nThis function calls the optional beforeAllFn defined on a ChecksGroup,\npassing all checks to it. If the function panics or returns an error, the\nfirst check is marked as failed and all remaining checks are skipped with an\nexplanatory reason. No other actions occur if beforeAllFn is nil.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:175", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "recover", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "runtime/debug", + "name": "Stack", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "onFailure", + "kind": "function", + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + }, + { + "name": "beforeAllFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "onFailure", + "kind": "function", + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.RunChecks", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RunChecks(stopChan \u003c-chan bool, abortChan chan string) (errs []error, failedChecks int) {", + "\tlog.Info(\"Running group %q checks.\", group.name)", + "\tfmt.Printf(\"Running suite %s\\n\", strings.ToUpper(group.name))", + "", + "\t// Get checks to run based on the label expr.", + "\tchecks := []*Check{}", + "\tfor _, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tskipCheck(check, \"no matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tchecks = append(checks, check)", + "\t}", + "", + "\tif len(checks) == 0 {", + "\t\treturn nil, 0", + "\t}", + "", + "\t// Run afterAllFn always, no matter previous panics/crashes.", + "\tdefer func() {", + "\t\tif err := runAfterAllFn(group, checks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "\t}()", + "", + "\tif err := runBeforeAllFn(group, checks); err != nil {", + "\t\terrs = append(errs, err)", + "\t\treturn errs, 0", + "\t}", + "", + "\tlog.Info(\"Checks to run: %d (group's total=%d)\", len(checks), len(group.checks))", + "\tgroup.currentRunningCheckIdx = 0", + "\tfor i, check := range checks {", + "\t\t// Fast stop in case the stop (abort/timeout) signal received.", + "\t\tselect {", + "\t\tcase \u003c-stopChan:", + "\t\t\treturn nil, 0", + "\t\tdefault:", + "\t\t}", + "", + "\t\t// Create a remainingChecks list excluding the current check.", + "\t\tremainingChecks := []*Check{}", + "\t\tif i+1 \u003c len(checks) {", + "\t\t\tremainingChecks = checks[i+1:]", + "\t\t}", + "", + "\t\tif err := runBeforeEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = []error{err}", + "\t\t}", + "", + "\t\tif len(errs) == 0 {", + "\t\t\t// Should we skip this check?", + "\t\t\tskip, reasons := shouldSkipCheck(check)", + "\t\t\tif skip {", + "\t\t\t\tskipCheck(check, strings.Join(reasons, \", \"))", + "\t\t\t} else {", + "\t\t\t\tcheck.SetAbortChan(abortChan) // Set the abort channel for the check.", + "\t\t\t\terr := runCheck(check, group, remainingChecks)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\terrs = append(errs, err)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// afterEach func must run even if the check was skipped or panicked/unexpected error.", + "\t\tif err := runAfterEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "", + "\t\t// Don't run more checks if any of beforeEach, the checkFn or afterEach functions errored/panicked.", + "\t\tif len(errs) \u003e 0 {", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Increment the failed checks counter.", + "\t\tif check.Result.String() == CheckResultFailed {", + "\t\t\tfailedChecks++", + "\t\t}", + "", + "\t\tgroup.currentRunningCheckIdx++", + "\t}", + "", + "\treturn errs, failedChecks", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func runBeforeAllFn(group *ChecksGroup, checks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running beforeAll\", group.name)", + "\tif group.beforeAllFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tfirstCheck := checks[0]", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running beforeAll function:\\n%v\", stackTrace)", + "\t\t\t// Set first check's result as error and skip the remaining ones.", + "\t\t\terr = onFailure(\"beforeAll function panicked\", \"\\n:\"+stackTrace, group, firstCheck, checks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.beforeAllFn(checks); err != nil {", + "\t\tlog.Error(\"Unexpected error while running beforeAll function: %v\", err)", + "\t\t// Set first check's result as error and skip the remaining ones.", + "\t\treturn onFailure(\"beforeAll function unexpected error\", err.Error(), group, firstCheck, checks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "runBeforeEachFn", + "qualifiedName": "runBeforeEachFn", + "exported": false, + "signature": "func(*ChecksGroup, *Check, []*Check)(error)", + "doc": "runBeforeEachFn Executes a group’s beforeEach hook for a specific check\n\nThis function runs the optional beforeEachFn defined on a ChecksGroup,\npassing it the current Check. It captures panics or returned errors, logs\ndiagnostic information, and records the failure by marking the check as\nerrored and skipping subsequent checks. If no issues occur, the function\nsimply returns nil.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:241", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "recover", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "runtime/debug", + "name": "Stack", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "onFailure", + "kind": "function", + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + }, + { + "name": "beforeEachFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "onFailure", + "kind": "function", + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.RunChecks", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RunChecks(stopChan \u003c-chan bool, abortChan chan string) (errs []error, failedChecks int) {", + "\tlog.Info(\"Running group %q checks.\", group.name)", + "\tfmt.Printf(\"Running suite %s\\n\", strings.ToUpper(group.name))", + "", + "\t// Get checks to run based on the label expr.", + "\tchecks := []*Check{}", + "\tfor _, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tskipCheck(check, \"no matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tchecks = append(checks, check)", + "\t}", + "", + "\tif len(checks) == 0 {", + "\t\treturn nil, 0", + "\t}", + "", + "\t// Run afterAllFn always, no matter previous panics/crashes.", + "\tdefer func() {", + "\t\tif err := runAfterAllFn(group, checks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "\t}()", + "", + "\tif err := runBeforeAllFn(group, checks); err != nil {", + "\t\terrs = append(errs, err)", + "\t\treturn errs, 0", + "\t}", + "", + "\tlog.Info(\"Checks to run: %d (group's total=%d)\", len(checks), len(group.checks))", + "\tgroup.currentRunningCheckIdx = 0", + "\tfor i, check := range checks {", + "\t\t// Fast stop in case the stop (abort/timeout) signal received.", + "\t\tselect {", + "\t\tcase \u003c-stopChan:", + "\t\t\treturn nil, 0", + "\t\tdefault:", + "\t\t}", + "", + "\t\t// Create a remainingChecks list excluding the current check.", + "\t\tremainingChecks := []*Check{}", + "\t\tif i+1 \u003c len(checks) {", + "\t\t\tremainingChecks = checks[i+1:]", + "\t\t}", + "", + "\t\tif err := runBeforeEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = []error{err}", + "\t\t}", + "", + "\t\tif len(errs) == 0 {", + "\t\t\t// Should we skip this check?", + "\t\t\tskip, reasons := shouldSkipCheck(check)", + "\t\t\tif skip {", + "\t\t\t\tskipCheck(check, strings.Join(reasons, \", \"))", + "\t\t\t} else {", + "\t\t\t\tcheck.SetAbortChan(abortChan) // Set the abort channel for the check.", + "\t\t\t\terr := runCheck(check, group, remainingChecks)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\terrs = append(errs, err)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// afterEach func must run even if the check was skipped or panicked/unexpected error.", + "\t\tif err := runAfterEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "", + "\t\t// Don't run more checks if any of beforeEach, the checkFn or afterEach functions errored/panicked.", + "\t\tif len(errs) \u003e 0 {", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Increment the failed checks counter.", + "\t\tif check.Result.String() == CheckResultFailed {", + "\t\t\tfailedChecks++", + "\t\t}", + "", + "\t\tgroup.currentRunningCheckIdx++", + "\t}", + "", + "\treturn errs, failedChecks", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func runBeforeEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) {", + "\tlog.Debug(\"GROUP %s - Running beforeEach for check %s\", group.name, check.ID)", + "\tif group.beforeEachFn == nil {", + "\t\treturn nil", + "\t}", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tlog.Error(\"Panic while running beforeEach function:\\n%v\", stackTrace)", + "\t\t\t// Set last check's result as error, no need to skip anyone.", + "\t\t\terr = onFailure(\"beforeEach function panicked\", \"\\n: \"+stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := group.beforeEachFn(check); err != nil {", + "\t\tlog.Error(\"Unexpected error while running beforeEach function:\\n%v\", err.Error())", + "\t\t// Set last check's result as error, no need to skip anyone.", + "\t\treturn onFailure(\"beforeEach function unexpected error\", err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "runCheck", + "qualifiedName": "runCheck", + "exported": false, + "signature": "func(*Check, *ChecksGroup, []*Check)(error)", + "doc": "runCheck Executes a check with error handling and panic recovery\n\nThe function runs the provided check, capturing any panics or errors that\noccur during its execution. If a panic is detected, it distinguishes between\nan intentional abort and unexpected failures, logs detailed information, and\nmarks subsequent checks as skipped. Successful completion returns nil, while\nany failure results in an error describing the issue.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:362", + "calls": [ + { + "name": "recover", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "runtime/debug", + "name": "Stack", + "kind": "function" + }, + { + "name": "Check.LogError", + "kind": "function", + "source": [ + "func (check *Check) LogError(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelError, msg, args...)", + "}" + ] + }, + { + "name": "onFailure", + "kind": "function", + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "Check.Run", + "kind": "function", + "source": [ + "func (check *Check) Run() error {", + "\tif check == nil {", + "\t\treturn fmt.Errorf(\"check is a nil pointer\")", + "\t}", + "", + "\tif check.Error != nil {", + "\t\treturn fmt.Errorf(\"unable to run due to a previously existing error: %v\", check.Error)", + "\t}", + "", + "\tcli.PrintCheckRunning(check.ID)", + "", + "\tcheck.StartTime = time.Now()", + "\tdefer func() {", + "\t\tcheck.EndTime = time.Now()", + "\t}()", + "", + "\tcheck.LogInfo(\"Running check (labels: %v)\", check.Labels)", + "\tif check.BeforeCheckFn != nil {", + "\t\tif err := check.BeforeCheckFn(check); err != nil {", + "\t\t\treturn fmt.Errorf(\"check %s failed in before check function: %v\", check.ID, err)", + "\t\t}", + "\t}", + "", + "\tif err := check.CheckFn(check); err != nil {", + "\t\treturn fmt.Errorf(\"check %s failed in check function: %v\", check.ID, err)", + "\t}", + "", + "\tif check.AfterCheckFn != nil {", + "\t\tif err := check.AfterCheckFn(check); err != nil {", + "\t\t\treturn fmt.Errorf(\"check %s failed in after check function: %v\", check.ID, err)", + "\t\t}", + "\t}", + "", + "\tprintCheckResult(check)", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "Check.LogError", + "kind": "function", + "source": [ + "func (check *Check) LogError(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelError, msg, args...)", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "onFailure", + "kind": "function", + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.RunChecks", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RunChecks(stopChan \u003c-chan bool, abortChan chan string) (errs []error, failedChecks int) {", + "\tlog.Info(\"Running group %q checks.\", group.name)", + "\tfmt.Printf(\"Running suite %s\\n\", strings.ToUpper(group.name))", + "", + "\t// Get checks to run based on the label expr.", + "\tchecks := []*Check{}", + "\tfor _, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tskipCheck(check, \"no matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tchecks = append(checks, check)", + "\t}", + "", + "\tif len(checks) == 0 {", + "\t\treturn nil, 0", + "\t}", + "", + "\t// Run afterAllFn always, no matter previous panics/crashes.", + "\tdefer func() {", + "\t\tif err := runAfterAllFn(group, checks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "\t}()", + "", + "\tif err := runBeforeAllFn(group, checks); err != nil {", + "\t\terrs = append(errs, err)", + "\t\treturn errs, 0", + "\t}", + "", + "\tlog.Info(\"Checks to run: %d (group's total=%d)\", len(checks), len(group.checks))", + "\tgroup.currentRunningCheckIdx = 0", + "\tfor i, check := range checks {", + "\t\t// Fast stop in case the stop (abort/timeout) signal received.", + "\t\tselect {", + "\t\tcase \u003c-stopChan:", + "\t\t\treturn nil, 0", + "\t\tdefault:", + "\t\t}", + "", + "\t\t// Create a remainingChecks list excluding the current check.", + "\t\tremainingChecks := []*Check{}", + "\t\tif i+1 \u003c len(checks) {", + "\t\t\tremainingChecks = checks[i+1:]", + "\t\t}", + "", + "\t\tif err := runBeforeEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = []error{err}", + "\t\t}", + "", + "\t\tif len(errs) == 0 {", + "\t\t\t// Should we skip this check?", + "\t\t\tskip, reasons := shouldSkipCheck(check)", + "\t\t\tif skip {", + "\t\t\t\tskipCheck(check, strings.Join(reasons, \", \"))", + "\t\t\t} else {", + "\t\t\t\tcheck.SetAbortChan(abortChan) // Set the abort channel for the check.", + "\t\t\t\terr := runCheck(check, group, remainingChecks)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\terrs = append(errs, err)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// afterEach func must run even if the check was skipped or panicked/unexpected error.", + "\t\tif err := runAfterEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "", + "\t\t// Don't run more checks if any of beforeEach, the checkFn or afterEach functions errored/panicked.", + "\t\tif len(errs) \u003e 0 {", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Increment the failed checks counter.", + "\t\tif check.Result.String() == CheckResultFailed {", + "\t\t\tfailedChecks++", + "\t\t}", + "", + "\t\tgroup.currentRunningCheckIdx++", + "\t}", + "", + "\treturn errs, failedChecks", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func runCheck(check *Check, group *ChecksGroup, remainingChecks []*Check) (err error) {", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\t// Don't do anything in case the check was manually aborted by check.Abort().", + "\t\t\tif msg, ok := r.(AbortPanicMsg); ok {", + "\t\t\t\tlog.Warn(\"Check was manually aborted, msg: %v\", msg)", + "\t\t\t\terr = fmt.Errorf(\"%v\", msg)", + "\t\t\t\treturn", + "\t\t\t}", + "", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "", + "\t\t\tcheck.LogError(\"Panic while running check %s function:\\n%v\", check.ID, stackTrace)", + "\t\t\terr = onFailure(fmt.Sprintf(\"check %s function panic\", check.ID), stackTrace, group, check, remainingChecks)", + "\t\t}", + "\t}()", + "", + "\tif err := check.Run(); err != nil {", + "\t\tcheck.LogError(\"Unexpected error while running check %s function: %v\", check.ID, err.Error())", + "\t\treturn onFailure(fmt.Sprintf(\"check %s function unexpected error\", check.ID), err.Error(), group, check, remainingChecks)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "shouldSkipCheck", + "qualifiedName": "shouldSkipCheck", + "exported": false, + "signature": "func(*Check)(bool, []string)", + "doc": "shouldSkipCheck decides whether a check should be skipped based on its skip functions\n\nThe function evaluates each user-provided skip function, collecting any\nreasons for skipping. If any reason is found, it applies the check's SkipMode\npolicy: SkipModeAny skips if at least one reason exists, while SkipModeAll\nrequires all skip functions to indicate a skip. The function also recovers\nfrom panics in skip functions, logs an error, and treats that as a skip with\na panic reason.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:305", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "recover", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "runtime/debug", + "name": "Stack", + "kind": "function" + }, + { + "name": "Check.LogError", + "kind": "function", + "source": [ + "func (check *Check) LogError(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "skipFn", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.RunChecks", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RunChecks(stopChan \u003c-chan bool, abortChan chan string) (errs []error, failedChecks int) {", + "\tlog.Info(\"Running group %q checks.\", group.name)", + "\tfmt.Printf(\"Running suite %s\\n\", strings.ToUpper(group.name))", + "", + "\t// Get checks to run based on the label expr.", + "\tchecks := []*Check{}", + "\tfor _, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tskipCheck(check, \"no matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tchecks = append(checks, check)", + "\t}", + "", + "\tif len(checks) == 0 {", + "\t\treturn nil, 0", + "\t}", + "", + "\t// Run afterAllFn always, no matter previous panics/crashes.", + "\tdefer func() {", + "\t\tif err := runAfterAllFn(group, checks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "\t}()", + "", + "\tif err := runBeforeAllFn(group, checks); err != nil {", + "\t\terrs = append(errs, err)", + "\t\treturn errs, 0", + "\t}", + "", + "\tlog.Info(\"Checks to run: %d (group's total=%d)\", len(checks), len(group.checks))", + "\tgroup.currentRunningCheckIdx = 0", + "\tfor i, check := range checks {", + "\t\t// Fast stop in case the stop (abort/timeout) signal received.", + "\t\tselect {", + "\t\tcase \u003c-stopChan:", + "\t\t\treturn nil, 0", + "\t\tdefault:", + "\t\t}", + "", + "\t\t// Create a remainingChecks list excluding the current check.", + "\t\tremainingChecks := []*Check{}", + "\t\tif i+1 \u003c len(checks) {", + "\t\t\tremainingChecks = checks[i+1:]", + "\t\t}", + "", + "\t\tif err := runBeforeEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = []error{err}", + "\t\t}", + "", + "\t\tif len(errs) == 0 {", + "\t\t\t// Should we skip this check?", + "\t\t\tskip, reasons := shouldSkipCheck(check)", + "\t\t\tif skip {", + "\t\t\t\tskipCheck(check, strings.Join(reasons, \", \"))", + "\t\t\t} else {", + "\t\t\t\tcheck.SetAbortChan(abortChan) // Set the abort channel for the check.", + "\t\t\t\terr := runCheck(check, group, remainingChecks)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\terrs = append(errs, err)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// afterEach func must run even if the check was skipped or panicked/unexpected error.", + "\t\tif err := runAfterEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "", + "\t\t// Don't run more checks if any of beforeEach, the checkFn or afterEach functions errored/panicked.", + "\t\tif len(errs) \u003e 0 {", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Increment the failed checks counter.", + "\t\tif check.Result.String() == CheckResultFailed {", + "\t\t\tfailedChecks++", + "\t\t}", + "", + "\t\tgroup.currentRunningCheckIdx++", + "\t}", + "", + "\treturn errs, failedChecks", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func shouldSkipCheck(check *Check) (skip bool, reasons []string) {", + "\tif len(check.SkipCheckFns) == 0 {", + "\t\treturn false, []string{}", + "\t}", + "", + "\t// Short-circuit", + "\tif len(check.SkipCheckFns) == 0 {", + "\t\treturn false, []string{}", + "\t}", + "", + "\t// Save the skipFn index in case it panics so it can be used in the log trace.", + "\tcurrentSkipFnIndex := 0", + "", + "\tdefer func() {", + "\t\tif r := recover(); r != nil {", + "\t\t\tstackTrace := fmt.Sprint(r) + \"\\n\" + string(debug.Stack())", + "\t\t\tcheck.LogError(\"Skip check function (idx=%d) panic'ed: %s\", currentSkipFnIndex, stackTrace)", + "\t\t\tskip = true", + "\t\t\treasons = []string{fmt.Sprintf(\"skipCheckFn (idx=%d) panic:\\n%s\", currentSkipFnIndex, stackTrace)}", + "\t\t}", + "\t}()", + "", + "\t// Call all the skip functions first.", + "\tfor _, skipFn := range check.SkipCheckFns {", + "\t\tif skip, reason := skipFn(); skip {", + "\t\t\treasons = append(reasons, reason)", + "\t\t}", + "\t\tcurrentSkipFnIndex++", + "\t}", + "", + "\t// If none of the skipFn returned true, exit now.", + "\tif len(reasons) == 0 {", + "\t\treturn false, []string{}", + "\t}", + "", + "\t// Now we need to check the skipMode for this check.", + "\tswitch check.SkipMode {", + "\tcase SkipModeAny:", + "\t\treturn true, reasons", + "\tcase SkipModeAll:", + "\t\t// Only skip if all the skipFn returned true.", + "\t\tif len(reasons) == len(check.SkipCheckFns) {", + "\t\t\treturn true, reasons", + "\t\t}", + "\t\treturn false, []string{}", + "\t}", + "", + "\treturn false, []string{}", + "}" + ] + }, + { + "name": "skipAll", + "qualifiedName": "skipAll", + "exported": false, + "signature": "func([]*Check, string)()", + "doc": "skipAll marks all remaining checks as skipped with a given reason\n\nThis routine iterates over a slice of check objects, calling an internal\nhelper for each one to log the skip action, set its result state to skipped,\nand output its status. The provided reason string is passed unchanged to\nevery check so that downstream reporting can identify why the checks were not\nexecuted. No value is returned; the function simply updates each check's\ninternal state.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:146", + "calls": [ + { + "name": "skipCheck", + "kind": "function", + "source": [ + "func skipCheck(check *Check, reason string) {", + "\tcheck.LogInfo(\"Skipping check %s, reason: %s\", check.ID, reason)", + "\tcheck.SetResultSkipped(reason)", + "\tprintCheckResult(check)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "onFailure", + "kind": "function", + "source": [ + "func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error {", + "\t// Set current Check's result as error.", + "\tfmt.Printf(\"\\r[ %s ] %-60s\\n\", cli.CheckResultTagError, currentCheck.ID)", + "\tcurrentCheck.SetResultError(failureType + \": \" + failureMsg)", + "\t// Set the remaining checks as skipped, using a simplified reason msg.", + "\treason := \"group \" + group.name + \" \" + failureType", + "\tskipAll(remainingChecks, reason)", + "\t// Return generic error using the reason.", + "\treturn errors.New(reason)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func skipAll(checks []*Check, reason string) {", + "\tfor _, check := range checks {", + "\t\tskipCheck(check, reason)", + "\t}", + "}" + ] + }, + { + "name": "skipCheck", + "qualifiedName": "skipCheck", + "exported": false, + "signature": "func(*Check, string)()", + "doc": "skipCheck Marks a check as skipped with a reason\n\nThis function records an informational message indicating that the specified\ncheck will not be executed due to the supplied reason. It then updates the\ncheck’s status to skipped and displays the outcome using the standard\noutput routine.", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:132", + "calls": [ + { + "name": "Check.LogInfo", + "kind": "function", + "source": [ + "func (check *Check) LogInfo(msg string, args ...any) {", + "\tlog.Logf(check.logger, log.LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Check.SetResultSkipped", + "kind": "function", + "source": [ + "func (check *Check) SetResultSkipped(reason string) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tif check.Result == CheckResultAborted {", + "\t\treturn", + "\t}", + "", + "\tcheck.Result = CheckResultSkipped", + "\tcheck.skipReason = reason", + "}" + ] + }, + { + "name": "printCheckResult", + "kind": "function", + "source": [ + "func printCheckResult(check *Check) {", + "\tswitch check.Result {", + "\tcase CheckResultPassed:", + "\t\tcli.PrintCheckPassed(check.ID)", + "\tcase CheckResultFailed:", + "\t\tcli.PrintCheckFailed(check.ID)", + "\tcase CheckResultSkipped:", + "\t\tcli.PrintCheckSkipped(check.ID, check.skipReason)", + "\tcase CheckResultAborted:", + "\t\tcli.PrintCheckAborted(check.ID, check.skipReason)", + "\tcase CheckResultError:", + "\t\tcli.PrintCheckErrored(check.ID)", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "ChecksGroup.RunChecks", + "kind": "function", + "source": [ + "func (group *ChecksGroup) RunChecks(stopChan \u003c-chan bool, abortChan chan string) (errs []error, failedChecks int) {", + "\tlog.Info(\"Running group %q checks.\", group.name)", + "\tfmt.Printf(\"Running suite %s\\n\", strings.ToUpper(group.name))", + "", + "\t// Get checks to run based on the label expr.", + "\tchecks := []*Check{}", + "\tfor _, check := range group.checks {", + "\t\tif !labelsExprEvaluator.Eval(check.Labels) {", + "\t\t\tskipCheck(check, \"no matching labels\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tchecks = append(checks, check)", + "\t}", + "", + "\tif len(checks) == 0 {", + "\t\treturn nil, 0", + "\t}", + "", + "\t// Run afterAllFn always, no matter previous panics/crashes.", + "\tdefer func() {", + "\t\tif err := runAfterAllFn(group, checks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "\t}()", + "", + "\tif err := runBeforeAllFn(group, checks); err != nil {", + "\t\terrs = append(errs, err)", + "\t\treturn errs, 0", + "\t}", + "", + "\tlog.Info(\"Checks to run: %d (group's total=%d)\", len(checks), len(group.checks))", + "\tgroup.currentRunningCheckIdx = 0", + "\tfor i, check := range checks {", + "\t\t// Fast stop in case the stop (abort/timeout) signal received.", + "\t\tselect {", + "\t\tcase \u003c-stopChan:", + "\t\t\treturn nil, 0", + "\t\tdefault:", + "\t\t}", + "", + "\t\t// Create a remainingChecks list excluding the current check.", + "\t\tremainingChecks := []*Check{}", + "\t\tif i+1 \u003c len(checks) {", + "\t\t\tremainingChecks = checks[i+1:]", + "\t\t}", + "", + "\t\tif err := runBeforeEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = []error{err}", + "\t\t}", + "", + "\t\tif len(errs) == 0 {", + "\t\t\t// Should we skip this check?", + "\t\t\tskip, reasons := shouldSkipCheck(check)", + "\t\t\tif skip {", + "\t\t\t\tskipCheck(check, strings.Join(reasons, \", \"))", + "\t\t\t} else {", + "\t\t\t\tcheck.SetAbortChan(abortChan) // Set the abort channel for the check.", + "\t\t\t\terr := runCheck(check, group, remainingChecks)", + "\t\t\t\tif err != nil {", + "\t\t\t\t\terrs = append(errs, err)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// afterEach func must run even if the check was skipped or panicked/unexpected error.", + "\t\tif err := runAfterEachFn(group, check, remainingChecks); err != nil {", + "\t\t\terrs = append(errs, err)", + "\t\t}", + "", + "\t\t// Don't run more checks if any of beforeEach, the checkFn or afterEach functions errored/panicked.", + "\t\tif len(errs) \u003e 0 {", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Increment the failed checks counter.", + "\t\tif check.Result.String() == CheckResultFailed {", + "\t\t\tfailedChecks++", + "\t\t}", + "", + "\t\tgroup.currentRunningCheckIdx++", + "\t}", + "", + "\treturn errs, failedChecks", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "skipAll", + "kind": "function", + "source": [ + "func skipAll(checks []*Check, reason string) {", + "\tfor _, check := range checks {", + "\t\tskipCheck(check, reason)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func skipCheck(check *Check, reason string) {", + "\tcheck.LogInfo(\"Skipping check %s, reason: %s\", check.ID, reason)", + "\tcheck.SetResultSkipped(reason)", + "\tprintCheckResult(check)", + "}" + ] + } + ], + "globals": [ + { + "name": "dbByGroup", + "exported": false, + "type": "map[string]*ChecksGroup", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:23" + }, + { + "name": "dbLock", + "exported": false, + "type": "sync.Mutex", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:22" + }, + { + "name": "labelsExprEvaluator", + "exported": false, + "type": "labels.LabelsExprEvaluator", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:27" + }, + { + "name": "resultsDB", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:25" + } + ], + "consts": [ + { + "name": "CheckResultAborted", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:20" + }, + { + "name": "CheckResultError", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:19" + }, + { + "name": "CheckResultFailed", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:18" + }, + { + "name": "CheckResultPassed", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:16" + }, + { + "name": "CheckResultSkipped", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:17" + }, + { + "name": "FAILED", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:179" + }, + { + "name": "PASSED", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:178" + }, + { + "name": "SKIPPED", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:180" + }, + { + "name": "SkipModeAll", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:27" + }, + { + "name": "SkipModeAny", + "exported": true, + "type": "skipMode", + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/check.go:26" + }, + { + "name": "checkIdxNone", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksgroup.go:14" + }, + { + "name": "nbColorSymbols", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/checksdb/checksdb.go:209" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "claimhelper", + "files": 1, + "imports": [ + "encoding/json", + "encoding/xml", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite-claim/pkg/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/labels", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "os", + "sort", + "strconv", + "strings", + "time" + ], + "structs": [ + { + "name": "ClaimBuilder", + "exported": true, + "doc": "ClaimBuilder Creates and writes claim reports in various formats\n\nIt gathers test results, populates the claim structure with metadata,\nconfigurations, and node information, then serializes the data to a file. The\nbuilder can also reset timestamps or output JUnit XML for CI integration.\nErrors during marshaling or file writing are logged as fatal.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:146", + "fields": { + "claimRoot": "*claim.Root" + }, + "methodNames": [ + "Build", + "Reset", + "ToJUnitXML" + ], + "source": [ + "type ClaimBuilder struct {", + "\tclaimRoot *claim.Root", + "}" + ] + }, + { + "name": "FailureMessage", + "exported": true, + "doc": "FailureMessage Represents an error message returned by a claim helper operation\n\nThe structure holds the error text as well as optional attributes for the\nmessage and its type. It is used to convey failure information in XML\nresponses, with the Text field containing the main content, while Message and\nType provide metadata that can be omitted if empty.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:70", + "fields": { + "Message": "string", + "Text": "string", + "Type": "string" + }, + "methodNames": null, + "source": [ + "type FailureMessage struct {", + "\tText string `xml:\",chardata\"`", + "\tMessage string `xml:\"message,attr,omitempty\"`", + "\tType string `xml:\"type,attr,omitempty\"`", + "}" + ] + }, + { + "name": "SkippedMessage", + "exported": true, + "doc": "SkippedMessage signals a skipped claim during processing\n\nThis struct holds the text of a message that is omitted from normal output\nand any associated metadata. The Text field contains the raw XML character\ndata while Messages stores an optional attribute providing additional\ncontext. It is used by the claim helper to record items that were\nintentionally left out during certificate claim generation.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:59", + "fields": { + "Messages": "string", + "Text": "string" + }, + "methodNames": null, + "source": [ + "type SkippedMessage struct {", + "\tText string `xml:\",chardata\"`", + "\tMessages string `xml:\"message,attr,omitempty\"`", + "}" + ] + }, + { + "name": "TestCase", + "exported": true, + "doc": "TestCase Holds the results of an individual test run\n\nThis structure stores metadata and outcome information for a single test\ncase, including its name, class context, execution status, duration, and any\nerror output. It also provides optional sub-structures to represent skipped\nor failed executions, enabling detailed reporting in XML format.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:82", + "fields": { + "Classname": "string", + "Failure": "*FailureMessage", + "Name": "string", + "Skipped": "*SkippedMessage", + "Status": "string", + "SystemErr": "string", + "Text": "string", + "Time": "string" + }, + "methodNames": null, + "source": [ + "type TestCase struct {", + "\tText string `xml:\",chardata\"`", + "\tName string `xml:\"name,attr,omitempty\"`", + "\tClassname string `xml:\"classname,attr,omitempty\"`", + "\tStatus string `xml:\"status,attr,omitempty\"`", + "\tTime string `xml:\"time,attr,omitempty\"`", + "\tSystemErr string `xml:\"system-err,omitempty\"`", + "\tSkipped *SkippedMessage `xml:\"skipped\"`", + "\tFailure *FailureMessage `xml:\"failure\"`", + "}" + ] + }, + { + "name": "TestSuitesXML", + "exported": true, + "doc": "TestSuitesXML Represents an XML report of test suite results\n\nThis struct holds attributes such as the total number of tests, failures,\ndisabled tests, errors, and elapsed time for a test run. It also contains a\nnested Testsuite element that provides more detailed information about each\nindividual test case. The fields are marshaled into XML with corresponding\nattribute tags.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:129", + "fields": { + "Disabled": "string", + "Errors": "string", + "Failures": "string", + "Tests": "string", + "Testsuite": "Testsuite", + "Text": "string", + "Time": "string", + "XMLName": "xml.Name" + }, + "methodNames": null, + "source": [ + "type TestSuitesXML struct {", + "\tXMLName xml.Name `xml:\"testsuites\"`", + "\tText string `xml:\",chardata\"`", + "\tTests string `xml:\"tests,attr,omitempty\"`", + "\tDisabled string `xml:\"disabled,attr,omitempty\"`", + "\tErrors string `xml:\"errors,attr,omitempty\"`", + "\tFailures string `xml:\"failures,attr,omitempty\"`", + "\tTime string `xml:\"time,attr,omitempty\"`", + "\tTestsuite Testsuite `xml:\"testsuite\"`", + "}" + ] + }, + { + "name": "Testsuite", + "exported": true, + "doc": "Testsuite Represents the results of a test suite execution\n\nThis struct holds metadata about a collection of tests, including counts for\ntotal tests, failures, errors, skipped and disabled cases. It also stores\ntiming information, timestamps, and any properties that may be attached to\nthe suite. Each individual test case is captured in a slice of TestCase\nstructs, allowing detailed inspection of each test's outcome.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:100", + "fields": { + "Disabled": "string", + "Errors": "string", + "Failures": "string", + "Name": "string", + "Package": "string", + "Properties": "struct{Text string; Property []struct{Text string; Name string; Value string}}", + "Skipped": "string", + "Testcase": "[]TestCase", + "Tests": "string", + "Text": "string", + "Time": "string", + "Timestamp": "string" + }, + "methodNames": null, + "source": [ + "type Testsuite struct {", + "\tText string `xml:\",chardata\"`", + "\tName string `xml:\"name,attr,omitempty\"`", + "\tPackage string `xml:\"package,attr,omitempty\"`", + "\tTests string `xml:\"tests,attr,omitempty\"`", + "\tDisabled string `xml:\"disabled,attr,omitempty\"`", + "\tSkipped string `xml:\"skipped,attr,omitempty\"`", + "\tErrors string `xml:\"errors,attr,omitempty\"`", + "\tFailures string `xml:\"failures,attr,omitempty\"`", + "\tTime string `xml:\"time,attr,omitempty\"`", + "\tTimestamp string `xml:\"timestamp,attr,omitempty\"`", + "\tProperties struct {", + "\t\tText string `xml:\",chardata\"`", + "\t\tProperty []struct {", + "\t\t\tText string `xml:\",chardata\"`", + "\t\t\tName string `xml:\"name,attr,omitempty\"`", + "\t\t\tValue string `xml:\"value,attr,omitempty\"`", + "\t\t} `xml:\"property\"`", + "\t} `xml:\"properties\"`", + "\tTestcase []TestCase `xml:\"testcase\"`", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "Build", + "qualifiedName": "ClaimBuilder.Build", + "exported": true, + "receiver": "ClaimBuilder", + "signature": "func(string)()", + "doc": "ClaimBuilder.Build generates a claim file with results and timestamps\n\nThis method records the current time as the claim's end time, retrieves\nreconciled test results from the database, marshals the complete claim\nstructure into JSON, writes that data to the specified output file, and logs\nthe creation location. It relies on helper functions for marshalling and file\nwriting and uses UTC formatting for consistency.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:198", + "calls": [ + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "name": "Format", + "kind": "function" + }, + { + "name": "UTC", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "GetReconciledResults", + "kind": "function", + "source": [ + "func GetReconciledResults() map[string]claim.Result {", + "\tresultMap := make(map[string]claim.Result)", + "\tfor key := range resultsDB {", + "\t\t// initializes the result map, if necessary", + "\t\tif _, ok := resultMap[key]; !ok {", + "\t\t\tresultMap[key] = claim.Result{}", + "\t\t}", + "", + "\t\tresultMap[key] = resultsDB[key]", + "\t}", + "\treturn resultMap", + "}" + ] + }, + { + "name": "MarshalClaimOutput", + "kind": "function", + "source": [ + "func MarshalClaimOutput(claimRoot *claim.Root) []byte {", + "\tpayload, err := j.MarshalIndent(claimRoot, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to generate the claim: %v\", err)", + "\t}", + "\treturn payload", + "}" + ] + }, + { + "name": "WriteClaimOutput", + "kind": "function", + "source": [ + "func WriteClaimOutput(claimOutputFile string, payload []byte) {", + "\tlog.Info(\"Writing claim data to %s\", claimOutputFile)", + "\terr := os.WriteFile(claimOutputFile, payload, claimFilePermissions)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing claim data:\\n%s\", string(payload))", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *ClaimBuilder) Build(outputFile string) {", + "\tendTime := time.Now()", + "", + "\tc.claimRoot.Claim.Metadata.EndTime = endTime.UTC().Format(DateTimeFormatDirective)", + "\tc.claimRoot.Claim.Results = checksdb.GetReconciledResults()", + "", + "\t// Marshal the claim and output to file", + "\tpayload := MarshalClaimOutput(c.claimRoot)", + "\tWriteClaimOutput(outputFile, payload)", + "", + "\tlog.Info(\"Claim file created at %s\", outputFile)", + "}" + ] + }, + { + "name": "Reset", + "qualifiedName": "ClaimBuilder.Reset", + "exported": true, + "receiver": "ClaimBuilder", + "signature": "func()()", + "doc": "ClaimBuilder.Reset Updates the claim's start timestamp\n\nThe method assigns the current UTC time, formatted with the predefined\ndirective, to the Claim.Metadata.StartTime field of the builder. It performs\nthis operation in place and does not return a value.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:345", + "calls": [ + { + "name": "Format", + "kind": "function" + }, + { + "name": "UTC", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *ClaimBuilder) Reset() {", + "\tc.claimRoot.Claim.Metadata.StartTime = time.Now().UTC().Format(DateTimeFormatDirective)", + "}" + ] + }, + { + "name": "ToJUnitXML", + "qualifiedName": "ClaimBuilder.ToJUnitXML", + "exported": true, + "receiver": "ClaimBuilder", + "signature": "func(string, time.Time, time.Time)()", + "doc": "ClaimBuilder.ToJUnitXML Generate a JUnit XML file from claim data\n\nThis method builds a structured JUnit XML representation of the current claim\nresults, marshals it into indented XML, and writes it to the specified file\npath with appropriate permissions. It logs progress and aborts execution if\nmarshalling or file writing fails.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:323", + "calls": [ + { + "name": "populateXMLFromClaim", + "kind": "function", + "source": [ + "func populateXMLFromClaim(c claim.Claim, startTime, endTime time.Time) TestSuitesXML {", + "\tconst (", + "\t\tTestSuiteName = \"CNF Certification Test Suite\"", + "\t)", + "", + "\t// Collector all of the Test IDs", + "\tallTestIDs := []string{}", + "\tfor testID := range c.Results {", + "\t\tallTestIDs = append(allTestIDs, c.Results[testID].TestID.Id)", + "\t}", + "", + "\t// Sort the test IDs", + "\tsort.Strings(allTestIDs)", + "", + "\txmlOutput := TestSuitesXML{}", + "\t// \u003ctestsuites\u003e", + "\txmlOutput.Tests = strconv.Itoa(len(c.Results))", + "", + "\t// Count all of the failed tests in the suite", + "\tfailedTests := 0", + "\tfor testID := range c.Results {", + "\t\tif c.Results[testID].State == TestStateFailed {", + "\t\t\tfailedTests++", + "\t\t}", + "\t}", + "", + "\t// Count all of the skipped tests in the suite", + "\tskippedTests := 0", + "\tfor testID := range c.Results {", + "\t\tif c.Results[testID].State == TestStateSkipped {", + "\t\t\tskippedTests++", + "\t\t}", + "\t}", + "", + "\txmlOutput.Failures = strconv.Itoa(failedTests)", + "\txmlOutput.Disabled = strconv.Itoa(skippedTests)", + "\txmlOutput.Errors = strconv.Itoa(0)", + "\txmlOutput.Time = strconv.FormatFloat(endTime.Sub(startTime).Seconds(), 'f', 5, 64)", + "", + "\t// \u003ctestsuite\u003e", + "\txmlOutput.Testsuite.Name = TestSuiteName", + "\txmlOutput.Testsuite.Tests = strconv.Itoa(len(c.Results))", + "\t// Counters for failed and skipped tests", + "\txmlOutput.Testsuite.Failures = strconv.Itoa(failedTests)", + "\txmlOutput.Testsuite.Skipped = strconv.Itoa(skippedTests)", + "\txmlOutput.Testsuite.Errors = strconv.Itoa(0)", + "", + "\txmlOutput.Testsuite.Time = strconv.FormatFloat(endTime.Sub(startTime).Seconds(), 'f', 5, 64)", + "\txmlOutput.Testsuite.Timestamp = time.Now().UTC().Format(DateTimeFormatDirective)", + "", + "\t// \u003cproperties\u003e", + "", + "\t// \u003ctestcase\u003e", + "\t// Loop through all of the sorted test IDs", + "\tfor _, testID := range allTestIDs {", + "\t\ttestCase := TestCase{}", + "\t\ttestCase.Name = testID", + "\t\ttestCase.Classname = TestSuiteName", + "\t\ttestCase.Status = c.Results[testID].State", + "", + "\t\t// Clean the time strings to remove the \" m=\" suffix", + "\t\tstart, err := time.Parse(DateTimeFormatDirective, strings.Split(c.Results[testID].StartTime, \" m=\")[0])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse start time: %v\", err)", + "\t\t}", + "\t\tend, err := time.Parse(DateTimeFormatDirective, strings.Split(c.Results[testID].EndTime, \" m=\")[0])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse end time: %v\", err)", + "\t\t}", + "", + "\t\t// Calculate the duration of the test case", + "\t\tdifference := end.Sub(start)", + "\t\ttestCase.Time = strconv.FormatFloat(difference.Seconds(), 'f', 10, 64)", + "", + "\t\t// Populate the skipped message if the test case was skipped", + "\t\tif testCase.Status == TestStateSkipped {", + "\t\t\ttestCase.Skipped = \u0026SkippedMessage{}", + "\t\t\ttestCase.Skipped.Text = c.Results[testID].SkipReason", + "\t\t} else {", + "\t\t\ttestCase.Skipped = nil", + "\t\t}", + "", + "\t\t// Populate the failure message if the test case failed", + "\t\tif testCase.Status == TestStateFailed {", + "\t\t\ttestCase.Failure = \u0026FailureMessage{}", + "\t\t\ttestCase.Failure.Text = c.Results[testID].CheckDetails", + "\t\t} else {", + "\t\t\ttestCase.Failure = nil", + "\t\t}", + "", + "\t\t// Append the test case to the test suite", + "\t\txmlOutput.Testsuite.Testcase = append(xmlOutput.Testsuite.Testcase, testCase)", + "\t}", + "", + "\treturn xmlOutput", + "}" + ] + }, + { + "pkgPath": "encoding/xml", + "name": "MarshalIndent", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "os", + "name": "WriteFile", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *ClaimBuilder) ToJUnitXML(outputFile string, startTime, endTime time.Time) {", + "\t// Create the JUnit XML file from the claim output.", + "\txmlOutput := populateXMLFromClaim(*c.claimRoot.Claim, startTime, endTime)", + "", + "\t// Write the JUnit XML file.", + "\tpayload, err := xml.MarshalIndent(xmlOutput, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to generate the xml: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Writing JUnit XML file: %s\", outputFile)", + "\terr = os.WriteFile(outputFile, payload, claimFilePermissions)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to write the xml file\")", + "\t}", + "}" + ] + }, + { + "name": "CreateClaimRoot", + "qualifiedName": "CreateClaimRoot", + "exported": true, + "signature": "func()(*claim.Root)", + "doc": "CreateClaimRoot Initializes a claim root with current UTC timestamp\n\nThe function obtains the present moment, formats it as an ISO‑8601 string\nin UTC, and embeds that value into a new claim structure. It returns a\npointer to this freshly constructed root object for use by higher‑level\nbuilders.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:491", + "calls": [ + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "name": "Format", + "kind": "function" + }, + { + "name": "UTC", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "NewClaimBuilder", + "kind": "function", + "source": [ + "func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) {", + "\tif os.Getenv(\"UNIT_TEST\") == \"true\" {", + "\t\treturn \u0026ClaimBuilder{", + "\t\t\tclaimRoot: CreateClaimRoot(),", + "\t\t}, nil", + "\t}", + "", + "\tlog.Debug(\"Creating claim file builder.\")", + "\tconfigurations, err := MarshalConfigurations(env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"configuration node missing because of: %v\", err)", + "\t}", + "", + "\tclaimConfigurations := map[string]interface{}{}", + "\tUnmarshalConfigurations(configurations, claimConfigurations)", + "", + "\troot := CreateClaimRoot()", + "", + "\troot.Claim.Configurations = claimConfigurations", + "\troot.Claim.Nodes = GenerateNodes()", + "\troot.Claim.Versions = \u0026claim.Versions{", + "\t\tCertSuite: versions.GitDisplayRelease,", + "\t\tCertSuiteGitCommit: versions.GitCommit,", + "\t\tOcClient: diagnostics.GetVersionOcClient(),", + "\t\tOcp: diagnostics.GetVersionOcp(),", + "\t\tK8s: diagnostics.GetVersionK8s(),", + "\t\tClaimFormat: versions.ClaimFormatVersion,", + "\t}", + "", + "\treturn \u0026ClaimBuilder{", + "\t\tclaimRoot: root,", + "\t}, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CreateClaimRoot() *claim.Root {", + "\t// Initialize the claim with the start time.", + "\tstartTime := time.Now()", + "\treturn \u0026claim.Root{", + "\t\tClaim: \u0026claim.Claim{", + "\t\t\tMetadata: \u0026claim.Metadata{", + "\t\t\t\tStartTime: startTime.UTC().Format(DateTimeFormatDirective),", + "\t\t\t},", + "\t\t},", + "\t}", + "}" + ] + }, + { + "name": "GenerateNodes", + "qualifiedName": "GenerateNodes", + "exported": true, + "signature": "func()(map[string]interface{})", + "doc": "GenerateNodes Collects node information for claim files\n\nThis function aggregates several pieces of data about the cluster nodes,\nincluding a JSON representation of each node, CNI plugin details, hardware\ncharacteristics, and CSI driver status. It retrieves this information by\ncalling diagnostic helpers that query the test environment or Kubernetes API.\nThe resulting map is returned for inclusion in claim documents.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:470", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetNodeJSON", + "kind": "function", + "source": [ + "func GetNodeJSON() (out map[string]interface{}) {", + "\tenv := provider.GetTestEnvironment()", + "", + "\tnodesJSON, err := json.Marshal(env.Nodes)", + "\tif err != nil {", + "\t\tlog.Error(\"Could not Marshall env.Nodes, err=%v\", err)", + "\t}", + "", + "\terr = json.Unmarshal(nodesJSON, \u0026out)", + "\tif err != nil {", + "\t\tlog.Error(\"Could not unMarshall env.Nodes, err=%v\", err)", + "\t}", + "", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetCniPlugins", + "kind": "function", + "source": [ + "func GetCniPlugins() (out map[string][]interface{}) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string][]interface{})", + "\tfor _, probePod := range env.ProbePods {", + "\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, cniPluginsCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cniPluginsCommand, probePod.String())", + "\t\t\tcontinue", + "\t\t}", + "\t\tdecoded := []interface{}{}", + "\t\terr = json.Unmarshal([]byte(outStr), \u0026decoded)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not decode json file because of: %s\", err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = decoded", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetHwInfoAllNodes", + "kind": "function", + "source": [ + "func GetHwInfoAllNodes() (out map[string]NodeHwInfo) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string]NodeHwInfo)", + "\tfor _, probePod := range env.ProbePods {", + "\t\thw := NodeHwInfo{}", + "\t\tlscpu, err := getHWJsonOutput(probePod, o, lscpuCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lscpu for node %s\", probePod.Spec.NodeName)", + "\t\t} else {", + "\t\t\tvar ok bool", + "\t\t\ttemp, ok := lscpu.(map[string]interface{})", + "\t\t\tif !ok {", + "\t\t\t\tlog.Error(\"problem casting lscpu field for node %s, lscpu=%v\", probePod.Spec.NodeName, lscpu)", + "\t\t\t} else {", + "\t\t\t\thw.Lscpu = temp[\"lscpu\"]", + "\t\t\t}", + "\t\t}", + "\t\thw.IPconfig, err = getHWJsonOutput(probePod, o, ipCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting ip config for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lsblk, err = getHWJsonOutput(probePod, o, lsblkCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lsblk for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lspci, err = getHWTextOutput(probePod, o, lspciCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lspci for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = hw", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetCsiDriver", + "kind": "function", + "source": [ + "func GetCsiDriver() (out map[string]interface{}) {", + "\to := clientsholder.GetClientsHolder()", + "\tcsiDriver, err := o.K8sClient.StorageV1().CSIDrivers().List(context.TODO(), apimachineryv1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Fail CSIDrivers.list err:%s\", err)", + "\t\treturn out", + "\t}", + "\tscheme := runtime.NewScheme()", + "\terr = storagev1.AddToScheme(scheme)", + "\tif err != nil {", + "\t\tlog.Error(\"Fail AddToScheme err:%s\", err)", + "\t\treturn out", + "\t}", + "\tcodec := serializer.NewCodecFactory(scheme).LegacyCodec(storagev1.SchemeGroupVersion)", + "\tdata, err := runtime.Encode(codec, csiDriver)", + "\tif err != nil {", + "\t\tlog.Error(\"Fail to encode Nodes to json, er: %s\", err)", + "\t\treturn out", + "\t}", + "", + "\terr = json.Unmarshal(data, \u0026out)", + "\tif err != nil {", + "\t\tlog.Error(\"failed to marshall nodes json, err: %v\", err)", + "\t\treturn out", + "\t}", + "\treturn out", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "NewClaimBuilder", + "kind": "function", + "source": [ + "func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) {", + "\tif os.Getenv(\"UNIT_TEST\") == \"true\" {", + "\t\treturn \u0026ClaimBuilder{", + "\t\t\tclaimRoot: CreateClaimRoot(),", + "\t\t}, nil", + "\t}", + "", + "\tlog.Debug(\"Creating claim file builder.\")", + "\tconfigurations, err := MarshalConfigurations(env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"configuration node missing because of: %v\", err)", + "\t}", + "", + "\tclaimConfigurations := map[string]interface{}{}", + "\tUnmarshalConfigurations(configurations, claimConfigurations)", + "", + "\troot := CreateClaimRoot()", + "", + "\troot.Claim.Configurations = claimConfigurations", + "\troot.Claim.Nodes = GenerateNodes()", + "\troot.Claim.Versions = \u0026claim.Versions{", + "\t\tCertSuite: versions.GitDisplayRelease,", + "\t\tCertSuiteGitCommit: versions.GitCommit,", + "\t\tOcClient: diagnostics.GetVersionOcClient(),", + "\t\tOcp: diagnostics.GetVersionOcp(),", + "\t\tK8s: diagnostics.GetVersionK8s(),", + "\t\tClaimFormat: versions.ClaimFormatVersion,", + "\t}", + "", + "\treturn \u0026ClaimBuilder{", + "\t\tclaimRoot: root,", + "\t}, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GenerateNodes() map[string]interface{} {", + "\tconst (", + "\t\tnodeSummaryField = \"nodeSummary\"", + "\t\tcniPluginsField = \"cniPlugins\"", + "\t\tnodesHwInfo = \"nodesHwInfo\"", + "\t\tcsiDriverInfo = \"csiDriver\"", + "\t)", + "\tnodes := map[string]interface{}{}", + "\tnodes[nodeSummaryField] = diagnostics.GetNodeJSON() // add node summary", + "\tnodes[cniPluginsField] = diagnostics.GetCniPlugins() // add cni plugins", + "\tnodes[nodesHwInfo] = diagnostics.GetHwInfoAllNodes() // add nodes hardware information", + "\tnodes[csiDriverInfo] = diagnostics.GetCsiDriver() // add csi drivers info", + "\treturn nodes", + "}" + ] + }, + { + "name": "GetConfigurationFromClaimFile", + "qualifiedName": "GetConfigurationFromClaimFile", + "exported": true, + "signature": "func(string)(*provider.TestEnvironment, error)", + "doc": "GetConfigurationFromClaimFile extracts test environment configuration from a claim file\n\nThe function reads the specified claim file, unmarshals its JSON contents\ninto an intermediate structure, then marshals the embedded configuration\nsection back to JSON before decoding it into a TestEnvironment object. It\nreturns that object and any error encountered during reading or parsing. The\nprocess uses logging for read failures and ensures errors propagate to the\ncaller.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:419", + "calls": [ + { + "name": "ReadClaimFile", + "kind": "function", + "source": [ + "func ReadClaimFile(claimFileName string) (data []byte, err error) {", + "\tdata, err = os.ReadFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadFile failed with err: %v\", err)", + "\t}", + "\tlog.Info(\"Reading claim file at path: %s\", claimFileName)", + "\treturn data, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "name": "UnmarshalClaim", + "kind": "function", + "source": [ + "func UnmarshalClaim(claimFile []byte, claimRoot *claim.Root) {", + "\terr := j.Unmarshal(claimFile, \u0026claimRoot)", + "\tif err != nil {", + "\t\tlog.Fatal(\"error unmarshalling claim file: %v\", err)", + "\t}", + "}" + ] + }, + { + "pkgPath": "encoding/json", + "name": "Marshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetConfigurationFromClaimFile(claimFileName string) (env *provider.TestEnvironment, err error) {", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn env, err", + "\t}", + "\tvar aRoot claim.Root", + "\tfmt.Printf(\"%s\", data)", + "\tUnmarshalClaim(data, \u0026aRoot)", + "\tconfigJSON, err := j.Marshal(aRoot.Claim.Configurations)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot convert config to json\")", + "\t}", + "\terr = j.Unmarshal(configJSON, \u0026env)", + "\treturn env, err", + "}" + ] + }, + { + "name": "MarshalClaimOutput", + "qualifiedName": "MarshalClaimOutput", + "exported": true, + "signature": "func(*claim.Root)([]byte)", + "doc": "MarshalClaimOutput Serializes a claim structure into formatted JSON\n\nThe function receives a pointer to the root of a claim object and attempts to\nmarshal it into indented JSON. If marshalling fails, it logs a fatal error\nand terminates the program. On success, it returns the resulting byte slice\nfor further use.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:442", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "MarshalIndent", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "ClaimBuilder.Build", + "kind": "function", + "source": [ + "func (c *ClaimBuilder) Build(outputFile string) {", + "\tendTime := time.Now()", + "", + "\tc.claimRoot.Claim.Metadata.EndTime = endTime.UTC().Format(DateTimeFormatDirective)", + "\tc.claimRoot.Claim.Results = checksdb.GetReconciledResults()", + "", + "\t// Marshal the claim and output to file", + "\tpayload := MarshalClaimOutput(c.claimRoot)", + "\tWriteClaimOutput(outputFile, payload)", + "", + "\tlog.Info(\"Claim file created at %s\", outputFile)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "SanitizeClaimFile", + "kind": "function", + "source": [ + "func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error) {", + "\tlog.Info(\"Sanitizing claim file %s\", claimFileName)", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tvar aRoot claim.Root", + "\tUnmarshalClaim(data, \u0026aRoot)", + "", + "\t// Remove the results that do not match the labels filter", + "\tfor testID := range aRoot.Claim.Results {", + "\t\tevaluator, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to create labels expression evaluator: %v\", err)", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\t_, gatheredLabels := identifiers.GetTestIDAndLabels(*aRoot.Claim.Results[testID].TestID)", + "", + "\t\tif !evaluator.Eval(gatheredLabels) {", + "\t\t\tlog.Info(\"Removing test ID: %s from the claim\", testID)", + "\t\t\tdelete(aRoot.Claim.Results, testID)", + "\t\t}", + "\t}", + "", + "\tWriteClaimOutput(claimFileName, MarshalClaimOutput(\u0026aRoot))", + "\treturn claimFileName, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func MarshalClaimOutput(claimRoot *claim.Root) []byte {", + "\tpayload, err := j.MarshalIndent(claimRoot, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to generate the claim: %v\", err)", + "\t}", + "\treturn payload", + "}" + ] + }, + { + "name": "MarshalConfigurations", + "qualifiedName": "MarshalConfigurations", + "exported": true, + "signature": "func(*provider.TestEnvironment)([]byte, error)", + "doc": "MarshalConfigurations Converts test environment data into JSON bytes\n\nThis routine accepts a pointer to the test configuration structure, falls\nback to a default instance if nil, and marshals it into a JSON byte slice.\nErrors during marshalling are logged as errors and returned for callers to\nhandle. The function returns the resulting byte slice along with any error\nencountered.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:356", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "pkgPath": "encoding/json", + "name": "Marshal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "NewClaimBuilder", + "kind": "function", + "source": [ + "func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) {", + "\tif os.Getenv(\"UNIT_TEST\") == \"true\" {", + "\t\treturn \u0026ClaimBuilder{", + "\t\t\tclaimRoot: CreateClaimRoot(),", + "\t\t}, nil", + "\t}", + "", + "\tlog.Debug(\"Creating claim file builder.\")", + "\tconfigurations, err := MarshalConfigurations(env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"configuration node missing because of: %v\", err)", + "\t}", + "", + "\tclaimConfigurations := map[string]interface{}{}", + "\tUnmarshalConfigurations(configurations, claimConfigurations)", + "", + "\troot := CreateClaimRoot()", + "", + "\troot.Claim.Configurations = claimConfigurations", + "\troot.Claim.Nodes = GenerateNodes()", + "\troot.Claim.Versions = \u0026claim.Versions{", + "\t\tCertSuite: versions.GitDisplayRelease,", + "\t\tCertSuiteGitCommit: versions.GitCommit,", + "\t\tOcClient: diagnostics.GetVersionOcClient(),", + "\t\tOcp: diagnostics.GetVersionOcp(),", + "\t\tK8s: diagnostics.GetVersionK8s(),", + "\t\tClaimFormat: versions.ClaimFormatVersion,", + "\t}", + "", + "\treturn \u0026ClaimBuilder{", + "\t\tclaimRoot: root,", + "\t}, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func MarshalConfigurations(env *provider.TestEnvironment) (configurations []byte, err error) {", + "\tconfig := env", + "\tif config == nil {", + "\t\t*config = provider.GetTestEnvironment()", + "\t}", + "\tconfigurations, err = j.Marshal(\u0026config)", + "\tif err != nil {", + "\t\tlog.Error(\"Error converting configurations to JSON: %v\", err)", + "\t\treturn configurations, err", + "\t}", + "\treturn configurations, nil", + "}" + ] + }, + { + "name": "NewClaimBuilder", + "qualifiedName": "NewClaimBuilder", + "exported": true, + "signature": "func(*provider.TestEnvironment)(*ClaimBuilder, error)", + "doc": "NewClaimBuilder Creates a claim builder from test environment\n\nThe function accepts a test environment, marshals its configuration into\nJSON, unmarshals it back into a map, and populates a new claim root with\nconfigurations, node information, and version data. It handles unit test mode\nby skipping marshalling steps. The resulting ClaimBuilder contains the fully\nprepared claim structure for later serialization.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:157", + "calls": [ + { + "pkgPath": "os", + "name": "Getenv", + "kind": "function" + }, + { + "name": "CreateClaimRoot", + "kind": "function", + "source": [ + "func CreateClaimRoot() *claim.Root {", + "\t// Initialize the claim with the start time.", + "\tstartTime := time.Now()", + "\treturn \u0026claim.Root{", + "\t\tClaim: \u0026claim.Claim{", + "\t\t\tMetadata: \u0026claim.Metadata{", + "\t\t\t\tStartTime: startTime.UTC().Format(DateTimeFormatDirective),", + "\t\t\t},", + "\t\t},", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "MarshalConfigurations", + "kind": "function", + "source": [ + "func MarshalConfigurations(env *provider.TestEnvironment) (configurations []byte, err error) {", + "\tconfig := env", + "\tif config == nil {", + "\t\t*config = provider.GetTestEnvironment()", + "\t}", + "\tconfigurations, err = j.Marshal(\u0026config)", + "\tif err != nil {", + "\t\tlog.Error(\"Error converting configurations to JSON: %v\", err)", + "\t\treturn configurations, err", + "\t}", + "\treturn configurations, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "UnmarshalConfigurations", + "kind": "function", + "source": [ + "func UnmarshalConfigurations(configurations []byte, claimConfigurations map[string]interface{}) {", + "\terr := j.Unmarshal(configurations, \u0026claimConfigurations)", + "\tif err != nil {", + "\t\tlog.Fatal(\"error unmarshalling configurations: %v\", err)", + "\t}", + "}" + ] + }, + { + "name": "CreateClaimRoot", + "kind": "function", + "source": [ + "func CreateClaimRoot() *claim.Root {", + "\t// Initialize the claim with the start time.", + "\tstartTime := time.Now()", + "\treturn \u0026claim.Root{", + "\t\tClaim: \u0026claim.Claim{", + "\t\t\tMetadata: \u0026claim.Metadata{", + "\t\t\t\tStartTime: startTime.UTC().Format(DateTimeFormatDirective),", + "\t\t\t},", + "\t\t},", + "\t}", + "}" + ] + }, + { + "name": "GenerateNodes", + "kind": "function", + "source": [ + "func GenerateNodes() map[string]interface{} {", + "\tconst (", + "\t\tnodeSummaryField = \"nodeSummary\"", + "\t\tcniPluginsField = \"cniPlugins\"", + "\t\tnodesHwInfo = \"nodesHwInfo\"", + "\t\tcsiDriverInfo = \"csiDriver\"", + "\t)", + "\tnodes := map[string]interface{}{}", + "\tnodes[nodeSummaryField] = diagnostics.GetNodeJSON() // add node summary", + "\tnodes[cniPluginsField] = diagnostics.GetCniPlugins() // add cni plugins", + "\tnodes[nodesHwInfo] = diagnostics.GetHwInfoAllNodes() // add nodes hardware information", + "\tnodes[csiDriverInfo] = diagnostics.GetCsiDriver() // add csi drivers info", + "\treturn nodes", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetVersionOcClient", + "kind": "function", + "source": [ + "func GetVersionOcClient() (out string) {", + "\treturn \"n/a, (not using oc or kubectl client)\"", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetVersionOcp", + "kind": "function", + "source": [ + "func GetVersionOcp() (out string) {", + "\tenv := provider.GetTestEnvironment()", + "\tif !provider.IsOCPCluster() {", + "\t\treturn \"n/a, (non-OpenShift cluster)\"", + "\t}", + "\treturn env.OpenshiftVersion", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetVersionK8s", + "kind": "function", + "source": [ + "func GetVersionK8s() (out string) {", + "\tenv := provider.GetTestEnvironment()", + "\treturn env.K8sVersion", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) {", + "\tif os.Getenv(\"UNIT_TEST\") == \"true\" {", + "\t\treturn \u0026ClaimBuilder{", + "\t\t\tclaimRoot: CreateClaimRoot(),", + "\t\t}, nil", + "\t}", + "", + "\tlog.Debug(\"Creating claim file builder.\")", + "\tconfigurations, err := MarshalConfigurations(env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"configuration node missing because of: %v\", err)", + "\t}", + "", + "\tclaimConfigurations := map[string]interface{}{}", + "\tUnmarshalConfigurations(configurations, claimConfigurations)", + "", + "\troot := CreateClaimRoot()", + "", + "\troot.Claim.Configurations = claimConfigurations", + "\troot.Claim.Nodes = GenerateNodes()", + "\troot.Claim.Versions = \u0026claim.Versions{", + "\t\tCertSuite: versions.GitDisplayRelease,", + "\t\tCertSuiteGitCommit: versions.GitCommit,", + "\t\tOcClient: diagnostics.GetVersionOcClient(),", + "\t\tOcp: diagnostics.GetVersionOcp(),", + "\t\tK8s: diagnostics.GetVersionK8s(),", + "\t\tClaimFormat: versions.ClaimFormatVersion,", + "\t}", + "", + "\treturn \u0026ClaimBuilder{", + "\t\tclaimRoot: root,", + "\t}, nil", + "}" + ] + }, + { + "name": "ReadClaimFile", + "qualifiedName": "ReadClaimFile", + "exported": true, + "signature": "func(string)([]byte, error)", + "doc": "ReadClaimFile Reads the contents of a claim file\n\nThe function attempts to read a file at the provided path using standard I/O\noperations. It logs any errors encountered during reading but always returns\nthe data slice, even if an error occurs, leaving error handling to the\ncaller. A log entry records the file path that was accessed.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:402", + "calls": [ + { + "pkgPath": "os", + "name": "ReadFile", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "GetConfigurationFromClaimFile", + "kind": "function", + "source": [ + "func GetConfigurationFromClaimFile(claimFileName string) (env *provider.TestEnvironment, err error) {", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn env, err", + "\t}", + "\tvar aRoot claim.Root", + "\tfmt.Printf(\"%s\", data)", + "\tUnmarshalClaim(data, \u0026aRoot)", + "\tconfigJSON, err := j.Marshal(aRoot.Claim.Configurations)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot convert config to json\")", + "\t}", + "\terr = j.Unmarshal(configJSON, \u0026env)", + "\treturn env, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "SanitizeClaimFile", + "kind": "function", + "source": [ + "func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error) {", + "\tlog.Info(\"Sanitizing claim file %s\", claimFileName)", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tvar aRoot claim.Root", + "\tUnmarshalClaim(data, \u0026aRoot)", + "", + "\t// Remove the results that do not match the labels filter", + "\tfor testID := range aRoot.Claim.Results {", + "\t\tevaluator, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to create labels expression evaluator: %v\", err)", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\t_, gatheredLabels := identifiers.GetTestIDAndLabels(*aRoot.Claim.Results[testID].TestID)", + "", + "\t\tif !evaluator.Eval(gatheredLabels) {", + "\t\t\tlog.Info(\"Removing test ID: %s from the claim\", testID)", + "\t\t\tdelete(aRoot.Claim.Results, testID)", + "\t\t}", + "\t}", + "", + "\tWriteClaimOutput(claimFileName, MarshalClaimOutput(\u0026aRoot))", + "\treturn claimFileName, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ReadClaimFile(claimFileName string) (data []byte, err error) {", + "\tdata, err = os.ReadFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadFile failed with err: %v\", err)", + "\t}", + "\tlog.Info(\"Reading claim file at path: %s\", claimFileName)", + "\treturn data, nil", + "}" + ] + }, + { + "name": "SanitizeClaimFile", + "qualifiedName": "SanitizeClaimFile", + "exported": true, + "signature": "func(string, string)(string, error)", + "doc": "SanitizeClaimFile Removes results that do not match a labels filter\n\nThe function reads the claim file, unmarshals it into a structured claim\nobject, and then iterates over each test result. For every result it\nevaluates the provided label expression against the test’s labels; if the\nevaluation fails, that result is deleted from the claim. After filtering, the\nmodified claim is written back to the original file path, which is returned\nalong with any error encountered during processing.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:511", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "ReadClaimFile", + "kind": "function", + "source": [ + "func ReadClaimFile(claimFileName string) (data []byte, err error) {", + "\tdata, err = os.ReadFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadFile failed with err: %v\", err)", + "\t}", + "\tlog.Info(\"Reading claim file at path: %s\", claimFileName)", + "\treturn data, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "UnmarshalClaim", + "kind": "function", + "source": [ + "func UnmarshalClaim(claimFile []byte, claimRoot *claim.Root) {", + "\terr := j.Unmarshal(claimFile, \u0026claimRoot)", + "\tif err != nil {", + "\t\tlog.Fatal(\"error unmarshalling claim file: %v\", err)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/labels", + "name": "NewLabelsExprEvaluator", + "kind": "function", + "source": [ + "func NewLabelsExprEvaluator(labelsExpr string) (LabelsExprEvaluator, error) {", + "\tgoLikeExpr := strings.ReplaceAll(labelsExpr, \"-\", \"_\")", + "\tgoLikeExpr = strings.ReplaceAll(goLikeExpr, \",\", \"||\")", + "", + "\tnode, err := parser.ParseExpr(goLikeExpr)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to parse labels expression %s: %v\", labelsExpr, err)", + "\t}", + "", + "\treturn labelsExprParser{", + "\t\tastRootNode: node,", + "\t}, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "name": "Eval", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "delete", + "kind": "function" + }, + { + "name": "WriteClaimOutput", + "kind": "function", + "source": [ + "func WriteClaimOutput(claimOutputFile string, payload []byte) {", + "\tlog.Info(\"Writing claim data to %s\", claimOutputFile)", + "\terr := os.WriteFile(claimOutputFile, payload, claimFilePermissions)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing claim data:\\n%s\", string(payload))", + "\t}", + "}" + ] + }, + { + "name": "MarshalClaimOutput", + "kind": "function", + "source": [ + "func MarshalClaimOutput(claimRoot *claim.Root) []byte {", + "\tpayload, err := j.MarshalIndent(claimRoot, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to generate the claim: %v\", err)", + "\t}", + "\treturn payload", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error) {", + "\tlog.Info(\"Sanitizing claim file %s\", claimFileName)", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tvar aRoot claim.Root", + "\tUnmarshalClaim(data, \u0026aRoot)", + "", + "\t// Remove the results that do not match the labels filter", + "\tfor testID := range aRoot.Claim.Results {", + "\t\tevaluator, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to create labels expression evaluator: %v\", err)", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\t_, gatheredLabels := identifiers.GetTestIDAndLabels(*aRoot.Claim.Results[testID].TestID)", + "", + "\t\tif !evaluator.Eval(gatheredLabels) {", + "\t\t\tlog.Info(\"Removing test ID: %s from the claim\", testID)", + "\t\t\tdelete(aRoot.Claim.Results, testID)", + "\t\t}", + "\t}", + "", + "\tWriteClaimOutput(claimFileName, MarshalClaimOutput(\u0026aRoot))", + "\treturn claimFileName, nil", + "}" + ] + }, + { + "name": "UnmarshalClaim", + "qualifiedName": "UnmarshalClaim", + "exported": true, + "signature": "func([]byte, *claim.Root)()", + "doc": "UnmarshalClaim parses a claim file into a structured root object\n\nThis function takes raw bytes of a claim file and a pointer to a Root\nstructure, attempting to unmarshal the data using JSON decoding. If\nunmarshalling fails, it logs a fatal error and terminates the program. On\nsuccess, the provided Root instance is populated with the decoded\ninformation.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:389", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "GetConfigurationFromClaimFile", + "kind": "function", + "source": [ + "func GetConfigurationFromClaimFile(claimFileName string) (env *provider.TestEnvironment, err error) {", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn env, err", + "\t}", + "\tvar aRoot claim.Root", + "\tfmt.Printf(\"%s\", data)", + "\tUnmarshalClaim(data, \u0026aRoot)", + "\tconfigJSON, err := j.Marshal(aRoot.Claim.Configurations)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"cannot convert config to json\")", + "\t}", + "\terr = j.Unmarshal(configJSON, \u0026env)", + "\treturn env, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "SanitizeClaimFile", + "kind": "function", + "source": [ + "func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error) {", + "\tlog.Info(\"Sanitizing claim file %s\", claimFileName)", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tvar aRoot claim.Root", + "\tUnmarshalClaim(data, \u0026aRoot)", + "", + "\t// Remove the results that do not match the labels filter", + "\tfor testID := range aRoot.Claim.Results {", + "\t\tevaluator, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to create labels expression evaluator: %v\", err)", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\t_, gatheredLabels := identifiers.GetTestIDAndLabels(*aRoot.Claim.Results[testID].TestID)", + "", + "\t\tif !evaluator.Eval(gatheredLabels) {", + "\t\t\tlog.Info(\"Removing test ID: %s from the claim\", testID)", + "\t\t\tdelete(aRoot.Claim.Results, testID)", + "\t\t}", + "\t}", + "", + "\tWriteClaimOutput(claimFileName, MarshalClaimOutput(\u0026aRoot))", + "\treturn claimFileName, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func UnmarshalClaim(claimFile []byte, claimRoot *claim.Root) {", + "\terr := j.Unmarshal(claimFile, \u0026claimRoot)", + "\tif err != nil {", + "\t\tlog.Fatal(\"error unmarshalling claim file: %v\", err)", + "\t}", + "}" + ] + }, + { + "name": "UnmarshalConfigurations", + "qualifiedName": "UnmarshalConfigurations", + "exported": true, + "signature": "func([]byte, map[string]interface{})()", + "doc": "UnmarshalConfigurations converts a JSON byte stream into a map of configurations\n\nThe function takes raw configuration data as a byte slice and decodes it into\na provided map using the standard JSON unmarshaler. If decoding fails, it\nlogs a fatal error and terminates the program. The resulting map is populated\nwith key/value pairs representing configuration settings.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:375", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "NewClaimBuilder", + "kind": "function", + "source": [ + "func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) {", + "\tif os.Getenv(\"UNIT_TEST\") == \"true\" {", + "\t\treturn \u0026ClaimBuilder{", + "\t\t\tclaimRoot: CreateClaimRoot(),", + "\t\t}, nil", + "\t}", + "", + "\tlog.Debug(\"Creating claim file builder.\")", + "\tconfigurations, err := MarshalConfigurations(env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"configuration node missing because of: %v\", err)", + "\t}", + "", + "\tclaimConfigurations := map[string]interface{}{}", + "\tUnmarshalConfigurations(configurations, claimConfigurations)", + "", + "\troot := CreateClaimRoot()", + "", + "\troot.Claim.Configurations = claimConfigurations", + "\troot.Claim.Nodes = GenerateNodes()", + "\troot.Claim.Versions = \u0026claim.Versions{", + "\t\tCertSuite: versions.GitDisplayRelease,", + "\t\tCertSuiteGitCommit: versions.GitCommit,", + "\t\tOcClient: diagnostics.GetVersionOcClient(),", + "\t\tOcp: diagnostics.GetVersionOcp(),", + "\t\tK8s: diagnostics.GetVersionK8s(),", + "\t\tClaimFormat: versions.ClaimFormatVersion,", + "\t}", + "", + "\treturn \u0026ClaimBuilder{", + "\t\tclaimRoot: root,", + "\t}, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func UnmarshalConfigurations(configurations []byte, claimConfigurations map[string]interface{}) {", + "\terr := j.Unmarshal(configurations, \u0026claimConfigurations)", + "\tif err != nil {", + "\t\tlog.Fatal(\"error unmarshalling configurations: %v\", err)", + "\t}", + "}" + ] + }, + { + "name": "WriteClaimOutput", + "qualifiedName": "WriteClaimOutput", + "exported": true, + "signature": "func(string, []byte)()", + "doc": "WriteClaimOutput Saves claim payload to a file\n\nThis routine writes a byte slice containing claim data to the specified path\nusing standard file permissions. If the write fails, it logs a fatal error\nand terminates the program. The function provides no return value.", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:455", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "os", + "name": "WriteFile", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "string", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "ClaimBuilder.Build", + "kind": "function", + "source": [ + "func (c *ClaimBuilder) Build(outputFile string) {", + "\tendTime := time.Now()", + "", + "\tc.claimRoot.Claim.Metadata.EndTime = endTime.UTC().Format(DateTimeFormatDirective)", + "\tc.claimRoot.Claim.Results = checksdb.GetReconciledResults()", + "", + "\t// Marshal the claim and output to file", + "\tpayload := MarshalClaimOutput(c.claimRoot)", + "\tWriteClaimOutput(outputFile, payload)", + "", + "\tlog.Info(\"Claim file created at %s\", outputFile)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "SanitizeClaimFile", + "kind": "function", + "source": [ + "func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error) {", + "\tlog.Info(\"Sanitizing claim file %s\", claimFileName)", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tvar aRoot claim.Root", + "\tUnmarshalClaim(data, \u0026aRoot)", + "", + "\t// Remove the results that do not match the labels filter", + "\tfor testID := range aRoot.Claim.Results {", + "\t\tevaluator, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to create labels expression evaluator: %v\", err)", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\t_, gatheredLabels := identifiers.GetTestIDAndLabels(*aRoot.Claim.Results[testID].TestID)", + "", + "\t\tif !evaluator.Eval(gatheredLabels) {", + "\t\t\tlog.Info(\"Removing test ID: %s from the claim\", testID)", + "\t\t\tdelete(aRoot.Claim.Results, testID)", + "\t\t}", + "\t}", + "", + "\tWriteClaimOutput(claimFileName, MarshalClaimOutput(\u0026aRoot))", + "\treturn claimFileName, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func WriteClaimOutput(claimOutputFile string, payload []byte) {", + "\tlog.Info(\"Writing claim data to %s\", claimOutputFile)", + "\terr := os.WriteFile(claimOutputFile, payload, claimFilePermissions)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing claim data:\\n%s\", string(payload))", + "\t}", + "}" + ] + }, + { + "name": "populateXMLFromClaim", + "qualifiedName": "populateXMLFromClaim", + "exported": false, + "signature": "func(claim.Claim, time.Time, time.Time)(TestSuitesXML)", + "doc": "populateXMLFromClaim Builds a JUnit XML representation of claim test results\n\nThe function collects all test IDs from the claim, counts failures and skips,\nand constructs a TestSuitesXML structure with aggregated suite metrics. It\niterates over sorted test IDs to create individual TestCase entries,\ncalculating each case's duration and attaching skipped or failure messages as\nneeded. The resulting XML object is returned for marshaling into a file.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:220", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Strings", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "FormatFloat", + "kind": "function" + }, + { + "name": "Seconds", + "kind": "function" + }, + { + "name": "Sub", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "FormatFloat", + "kind": "function" + }, + { + "name": "Seconds", + "kind": "function" + }, + { + "name": "Sub", + "kind": "function" + }, + { + "name": "Format", + "kind": "function" + }, + { + "name": "UTC", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Parse", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "time", + "name": "Parse", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "Sub", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "FormatFloat", + "kind": "function" + }, + { + "name": "Seconds", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "ClaimBuilder.ToJUnitXML", + "kind": "function", + "source": [ + "func (c *ClaimBuilder) ToJUnitXML(outputFile string, startTime, endTime time.Time) {", + "\t// Create the JUnit XML file from the claim output.", + "\txmlOutput := populateXMLFromClaim(*c.claimRoot.Claim, startTime, endTime)", + "", + "\t// Write the JUnit XML file.", + "\tpayload, err := xml.MarshalIndent(xmlOutput, \"\", \" \")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to generate the xml: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Writing JUnit XML file: %s\", outputFile)", + "\terr = os.WriteFile(outputFile, payload, claimFilePermissions)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to write the xml file\")", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func populateXMLFromClaim(c claim.Claim, startTime, endTime time.Time) TestSuitesXML {", + "\tconst (", + "\t\tTestSuiteName = \"CNF Certification Test Suite\"", + "\t)", + "", + "\t// Collector all of the Test IDs", + "\tallTestIDs := []string{}", + "\tfor testID := range c.Results {", + "\t\tallTestIDs = append(allTestIDs, c.Results[testID].TestID.Id)", + "\t}", + "", + "\t// Sort the test IDs", + "\tsort.Strings(allTestIDs)", + "", + "\txmlOutput := TestSuitesXML{}", + "\t// \u003ctestsuites\u003e", + "\txmlOutput.Tests = strconv.Itoa(len(c.Results))", + "", + "\t// Count all of the failed tests in the suite", + "\tfailedTests := 0", + "\tfor testID := range c.Results {", + "\t\tif c.Results[testID].State == TestStateFailed {", + "\t\t\tfailedTests++", + "\t\t}", + "\t}", + "", + "\t// Count all of the skipped tests in the suite", + "\tskippedTests := 0", + "\tfor testID := range c.Results {", + "\t\tif c.Results[testID].State == TestStateSkipped {", + "\t\t\tskippedTests++", + "\t\t}", + "\t}", + "", + "\txmlOutput.Failures = strconv.Itoa(failedTests)", + "\txmlOutput.Disabled = strconv.Itoa(skippedTests)", + "\txmlOutput.Errors = strconv.Itoa(0)", + "\txmlOutput.Time = strconv.FormatFloat(endTime.Sub(startTime).Seconds(), 'f', 5, 64)", + "", + "\t// \u003ctestsuite\u003e", + "\txmlOutput.Testsuite.Name = TestSuiteName", + "\txmlOutput.Testsuite.Tests = strconv.Itoa(len(c.Results))", + "\t// Counters for failed and skipped tests", + "\txmlOutput.Testsuite.Failures = strconv.Itoa(failedTests)", + "\txmlOutput.Testsuite.Skipped = strconv.Itoa(skippedTests)", + "\txmlOutput.Testsuite.Errors = strconv.Itoa(0)", + "", + "\txmlOutput.Testsuite.Time = strconv.FormatFloat(endTime.Sub(startTime).Seconds(), 'f', 5, 64)", + "\txmlOutput.Testsuite.Timestamp = time.Now().UTC().Format(DateTimeFormatDirective)", + "", + "\t// \u003cproperties\u003e", + "", + "\t// \u003ctestcase\u003e", + "\t// Loop through all of the sorted test IDs", + "\tfor _, testID := range allTestIDs {", + "\t\ttestCase := TestCase{}", + "\t\ttestCase.Name = testID", + "\t\ttestCase.Classname = TestSuiteName", + "\t\ttestCase.Status = c.Results[testID].State", + "", + "\t\t// Clean the time strings to remove the \" m=\" suffix", + "\t\tstart, err := time.Parse(DateTimeFormatDirective, strings.Split(c.Results[testID].StartTime, \" m=\")[0])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse start time: %v\", err)", + "\t\t}", + "\t\tend, err := time.Parse(DateTimeFormatDirective, strings.Split(c.Results[testID].EndTime, \" m=\")[0])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse end time: %v\", err)", + "\t\t}", + "", + "\t\t// Calculate the duration of the test case", + "\t\tdifference := end.Sub(start)", + "\t\ttestCase.Time = strconv.FormatFloat(difference.Seconds(), 'f', 10, 64)", + "", + "\t\t// Populate the skipped message if the test case was skipped", + "\t\tif testCase.Status == TestStateSkipped {", + "\t\t\ttestCase.Skipped = \u0026SkippedMessage{}", + "\t\t\ttestCase.Skipped.Text = c.Results[testID].SkipReason", + "\t\t} else {", + "\t\t\ttestCase.Skipped = nil", + "\t\t}", + "", + "\t\t// Populate the failure message if the test case failed", + "\t\tif testCase.Status == TestStateFailed {", + "\t\t\ttestCase.Failure = \u0026FailureMessage{}", + "\t\t\ttestCase.Failure.Text = c.Results[testID].CheckDetails", + "\t\t} else {", + "\t\t\ttestCase.Failure = nil", + "\t\t}", + "", + "\t\t// Append the test case to the test suite", + "\t\txmlOutput.Testsuite.Testcase = append(xmlOutput.Testsuite.Testcase, testCase)", + "\t}", + "", + "\treturn xmlOutput", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "CNFFeatureValidationJunitXMLFileName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:42" + }, + { + "name": "CNFFeatureValidationReportKey", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:43" + }, + { + "name": "DateTimeFormatDirective", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:45" + }, + { + "name": "TestStateFailed", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:48" + }, + { + "name": "TestStateSkipped", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:49" + }, + { + "name": "claimFilePermissions", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/claimhelper/claimhelper.go:41" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/collector", + "name": "collector", + "files": 1, + "imports": [ + "bytes", + "io", + "mime/multipart", + "net/http", + "os", + "time" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "SendClaimFileToCollector", + "qualifiedName": "SendClaimFileToCollector", + "exported": true, + "signature": "func(string, string, string, string, string)(error)", + "doc": "SendClaimFileToCollector Sends a claim file to a collector endpoint\n\nThe function builds an HTTP POST request that includes the claim file and\nauthentication fields, then executes it with a timeout. It returns any error\nencountered during request creation or execution; successful completion\nresults in nil.", + "position": "/Users/deliedit/dev/certsuite/pkg/collector/collector.go:116", + "calls": [ + { + "name": "createSendToCollectorPostRequest", + "kind": "function", + "source": [ + "func createSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partnerName, password string) (*http.Request, error) {", + "\t// Create a new buffer to hold the form-data", + "\tvar buffer bytes.Buffer", + "\tw := multipart.NewWriter(\u0026buffer)", + "", + "\t// Add the claim file to the request", + "\terr := addClaimFileToPostRequest(w, claimFilePath)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\t// Add the executed by, partner name and password fields to the request", + "\terr = addVarFieldsToPostRequest(w, executedBy, partnerName, password)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tw.Close()", + "", + "\t// Create POST request with the form-data as body", + "\treq, err := http.NewRequest(\"POST\", endPoint, \u0026buffer)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treq.Header.Set(\"Content-Type\", w.FormDataContentType())", + "", + "\treturn req, nil", + "}" + ] + }, + { + "name": "Do", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SendClaimFileToCollector(endPoint, claimFilePath, executedBy, partnerName, password string) error {", + "\t// Temporary end point", + "\tpostReq, err := createSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partnerName, password)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\tclient := \u0026http.Client{", + "\t\tTimeout: collectorUploadTimeout, // 30 second timeout for collector uploads", + "\t}", + "\tresp, err := client.Do(postReq)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tdefer resp.Body.Close()", + "\treturn nil", + "}" + ] + }, + { + "name": "addClaimFileToPostRequest", + "qualifiedName": "addClaimFileToPostRequest", + "exported": false, + "signature": "func(*multipart.Writer, string)(error)", + "doc": "addClaimFileToPostRequest Adds a claim file as multipart form data\n\nThe function opens the specified file, creates a new part in the multipart\nwriter using that file's name, copies the file contents into the part, and\nthen returns any error encountered during these steps. It closes the file\nautomatically with defer to avoid resource leaks. The result is ready for\ninclusion in an HTTP POST request.", + "position": "/Users/deliedit/dev/certsuite/pkg/collector/collector.go:24", + "calls": [ + { + "pkgPath": "os", + "name": "Open", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "name": "CreateFormFile", + "kind": "function" + }, + { + "pkgPath": "io", + "name": "Copy", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/collector", + "name": "createSendToCollectorPostRequest", + "kind": "function", + "source": [ + "func createSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partnerName, password string) (*http.Request, error) {", + "\t// Create a new buffer to hold the form-data", + "\tvar buffer bytes.Buffer", + "\tw := multipart.NewWriter(\u0026buffer)", + "", + "\t// Add the claim file to the request", + "\terr := addClaimFileToPostRequest(w, claimFilePath)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\t// Add the executed by, partner name and password fields to the request", + "\terr = addVarFieldsToPostRequest(w, executedBy, partnerName, password)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tw.Close()", + "", + "\t// Create POST request with the form-data as body", + "\treq, err := http.NewRequest(\"POST\", endPoint, \u0026buffer)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treq.Header.Set(\"Content-Type\", w.FormDataContentType())", + "", + "\treturn req, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func addClaimFileToPostRequest(w *multipart.Writer, claimFilePath string) error {", + "\tclaimFile, err := os.Open(claimFilePath)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tdefer claimFile.Close()", + "\tfw, err := w.CreateFormFile(\"claimFile\", claimFilePath)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tif _, err = io.Copy(fw, claimFile); err != nil {", + "\t\treturn err", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "name": "addVarFieldsToPostRequest", + "qualifiedName": "addVarFieldsToPostRequest", + "exported": false, + "signature": "func(*multipart.Writer, string, string, string)(error)", + "doc": "addVarFieldsToPostRequest Adds form fields for execution details\n\nThis function writes three key-value pairs into a multipart request: the user\nwho executed the operation, the partner name, and the decoded password. It\ncreates each field using the writer's CreateFormField method and then writes\nthe corresponding string value. If any step fails it returns an error;\notherwise it completes silently.", + "position": "/Users/deliedit/dev/certsuite/pkg/collector/collector.go:47", + "calls": [ + { + "name": "CreateFormField", + "kind": "function" + }, + { + "name": "Write", + "kind": "function" + }, + { + "name": "CreateFormField", + "kind": "function" + }, + { + "name": "Write", + "kind": "function" + }, + { + "name": "CreateFormField", + "kind": "function" + }, + { + "name": "Write", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/collector", + "name": "createSendToCollectorPostRequest", + "kind": "function", + "source": [ + "func createSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partnerName, password string) (*http.Request, error) {", + "\t// Create a new buffer to hold the form-data", + "\tvar buffer bytes.Buffer", + "\tw := multipart.NewWriter(\u0026buffer)", + "", + "\t// Add the claim file to the request", + "\terr := addClaimFileToPostRequest(w, claimFilePath)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\t// Add the executed by, partner name and password fields to the request", + "\terr = addVarFieldsToPostRequest(w, executedBy, partnerName, password)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tw.Close()", + "", + "\t// Create POST request with the form-data as body", + "\treq, err := http.NewRequest(\"POST\", endPoint, \u0026buffer)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treq.Header.Set(\"Content-Type\", w.FormDataContentType())", + "", + "\treturn req, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func addVarFieldsToPostRequest(w *multipart.Writer, executedBy, partnerName, password string) error {", + "\tfw, err := w.CreateFormField(\"executed_by\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tif _, err = fw.Write([]byte(executedBy)); err != nil {", + "\t\treturn err", + "\t}", + "", + "\tfw, err = w.CreateFormField(\"partner_name\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tif _, err = fw.Write([]byte(partnerName)); err != nil {", + "\t\treturn err", + "\t}", + "", + "\tfw, err = w.CreateFormField(\"decoded_password\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tif _, err = fw.Write([]byte(password)); err != nil {", + "\t\treturn err", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "name": "createSendToCollectorPostRequest", + "qualifiedName": "createSendToCollectorPostRequest", + "exported": false, + "signature": "func(string, string, string, string, string)(*http.Request, error)", + "doc": "createSendToCollectorPostRequest Creates a multipart POST request to upload a claim file\n\nThis function builds an HTTP POST request with form-data that includes the\nspecified claim file and several text fields: executed_by, partner_name, and\ndecoded_password. It writes these parts into a buffer using a multipart\nwriter, sets the appropriate content type header, and returns the constructed\nrequest or an error if any step fails.", + "position": "/Users/deliedit/dev/certsuite/pkg/collector/collector.go:81", + "calls": [ + { + "pkgPath": "mime/multipart", + "name": "NewWriter", + "kind": "function" + }, + { + "name": "addClaimFileToPostRequest", + "kind": "function", + "source": [ + "func addClaimFileToPostRequest(w *multipart.Writer, claimFilePath string) error {", + "\tclaimFile, err := os.Open(claimFilePath)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tdefer claimFile.Close()", + "\tfw, err := w.CreateFormFile(\"claimFile\", claimFilePath)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tif _, err = io.Copy(fw, claimFile); err != nil {", + "\t\treturn err", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "name": "addVarFieldsToPostRequest", + "kind": "function", + "source": [ + "func addVarFieldsToPostRequest(w *multipart.Writer, executedBy, partnerName, password string) error {", + "\tfw, err := w.CreateFormField(\"executed_by\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tif _, err = fw.Write([]byte(executedBy)); err != nil {", + "\t\treturn err", + "\t}", + "", + "\tfw, err = w.CreateFormField(\"partner_name\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tif _, err = fw.Write([]byte(partnerName)); err != nil {", + "\t\treturn err", + "\t}", + "", + "\tfw, err = w.CreateFormField(\"decoded_password\")", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tif _, err = fw.Write([]byte(password)); err != nil {", + "\t\treturn err", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "name": "Close", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "NewRequest", + "kind": "function" + }, + { + "name": "Set", + "kind": "function" + }, + { + "name": "FormDataContentType", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/collector", + "name": "SendClaimFileToCollector", + "kind": "function", + "source": [ + "func SendClaimFileToCollector(endPoint, claimFilePath, executedBy, partnerName, password string) error {", + "\t// Temporary end point", + "\tpostReq, err := createSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partnerName, password)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "", + "\tclient := \u0026http.Client{", + "\t\tTimeout: collectorUploadTimeout, // 30 second timeout for collector uploads", + "\t}", + "\tresp, err := client.Do(postReq)", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tdefer resp.Body.Close()", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partnerName, password string) (*http.Request, error) {", + "\t// Create a new buffer to hold the form-data", + "\tvar buffer bytes.Buffer", + "\tw := multipart.NewWriter(\u0026buffer)", + "", + "\t// Add the claim file to the request", + "\terr := addClaimFileToPostRequest(w, claimFilePath)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\t// Add the executed by, partner name and password fields to the request", + "\terr = addVarFieldsToPostRequest(w, executedBy, partnerName, password)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tw.Close()", + "", + "\t// Create POST request with the form-data as body", + "\treq, err := http.NewRequest(\"POST\", endPoint, \u0026buffer)", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "\treq.Header.Set(\"Content-Type\", w.FormDataContentType())", + "", + "\treturn req, nil", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "collectorUploadTimeout", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/collector/collector.go:14" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "name": "compatibility", + "files": 1, + "imports": [ + "github.com/hashicorp/go-version", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "strings", + "time" + ], + "structs": [ + { + "name": "VersionInfo", + "exported": true, + "doc": "VersionInfo Holds release cycle dates and supported OS versions\n\nThis structure stores the General Availability, Full Support Ends, and\nMaintenance Support Ends dates along with minimum supported RHCOS version and\na list of accepted RHEL versions. The date fields are time.Time values that\nindicate key lifecycle milestones for an OpenShift product. The string slice\nrecords which Red Hat Enterprise Linux releases are compatible, allowing\ncallers to validate platform compatibility.", + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:51", + "fields": { + "FSEDate": "time.Time", + "GADate": "time.Time", + "MSEDate": "time.Time", + "MinRHCOSVersion": "string", + "RHELVersionsAccepted": "[]string" + }, + "methodNames": null, + "source": [ + "type VersionInfo struct {", + "\tGADate time.Time // General Availability Date", + "\tFSEDate time.Time // Full Support Ends Date", + "\tMSEDate time.Time // Maintenance Support Ends Date", + "", + "\tMinRHCOSVersion string // Minimum RHCOS Version supported", + "\tRHELVersionsAccepted []string // Contains either specific versions or a minimum version eg. \"7.9 or later\" or \"7.9 and 8.4\"", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "BetaRHCOSVersionsFoundToMatch", + "qualifiedName": "BetaRHCOSVersionsFoundToMatch", + "exported": true, + "signature": "func(string, string)(bool)", + "doc": "BetaRHCOSVersionsFoundToMatch Determines if both machine and OCP versions are beta releases that match\n\nThe function reduces each input to its major.minor form and checks whether\nthese truncated versions appear in a predefined list of beta releases. If\neither version is not listed, it returns false. When both are present, it\nconfirms they are identical and returns true.", + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:264", + "calls": [ + { + "name": "FindMajorMinor", + "kind": "function", + "source": [ + "func FindMajorMinor(version string) string {", + "\tsplitVersion := strings.Split(version, \".\")", + "\treturn splitVersion[0] + \".\" + splitVersion[1]", + "}" + ] + }, + { + "name": "FindMajorMinor", + "kind": "function", + "source": [ + "func FindMajorMinor(version string) string {", + "\tsplitVersion := strings.Split(version, \".\")", + "\treturn splitVersion[0] + \".\" + splitVersion[1]", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "name": "IsRHCOSCompatible", + "kind": "function", + "source": [ + "func IsRHCOSCompatible(machineVersion, ocpVersion string) bool {", + "\tif machineVersion == \"\" || ocpVersion == \"\" {", + "\t\treturn false", + "\t}", + "", + "\t// Exception for beta versions", + "\tif BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion) {", + "\t\treturn true", + "\t}", + "", + "\t// Split the incoming version on the \".\" and make sure we are only looking at major.minor.", + "\tocpVersion = FindMajorMinor(ocpVersion)", + "", + "\tlifecycleInfo := GetLifeCycleDates()", + "\tif entry, ok := lifecycleInfo[ocpVersion]; ok {", + "\t\t// Collect the machine version and the entry version", + "\t\tmv, err := gv.NewVersion(machineVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error parsing machineVersion: %s err: %v\", machineVersion, err)", + "\t\t\treturn false", + "\t\t}", + "\t\tev, err := gv.NewVersion(entry.MinRHCOSVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error parsing MinRHCOSVersion: %s err: %v\", entry.MinRHCOSVersion, err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// If the machine version \u003e= the entry version", + "\t\treturn mv.GreaterThanOrEqual(ev)", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion string) bool {", + "\tocpVersion = FindMajorMinor(ocpVersion)", + "\tmachineVersion = FindMajorMinor(machineVersion)", + "", + "\t// Check if the versions exist in the beta list", + "\tif !stringhelper.StringInSlice(ocpBetaVersions, ocpVersion, false) || !stringhelper.StringInSlice(ocpBetaVersions, machineVersion, false) {", + "\t\treturn false", + "\t}", + "", + "\t// Check if the versions match", + "\treturn ocpVersion == machineVersion", + "}" + ] + }, + { + "name": "DetermineOCPStatus", + "qualifiedName": "DetermineOCPStatus", + "exported": true, + "signature": "func(string, time.Time)(string)", + "doc": "DetermineOCPStatus Determine the support status of an OpenShift version based on lifecycle dates\n\nThe function accepts a version string and a date, normalizes the version to\nmajor.minor form, looks up lifecycle information from a local map, then\ncompares the provided date against GA, FSE, and MSE milestones. It returns\none of several status strings indicating whether the version is pre‑GA,\ngenerally available, in maintenance support, or end‑of‑life. If the input\nis empty or not found in the map, an unknown status is returned.", + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:373", + "calls": [ + { + "name": "IsZero", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "GetLifeCycleDates", + "kind": "function", + "source": [ + "func GetLifeCycleDates() map[string]VersionInfo {", + "\treturn ocpLifeCycleDates", + "}" + ] + }, + { + "name": "IsZero", + "kind": "function" + }, + { + "name": "Before", + "kind": "function" + }, + { + "name": "Equal", + "kind": "function" + }, + { + "name": "After", + "kind": "function" + }, + { + "name": "Before", + "kind": "function" + }, + { + "name": "Equal", + "kind": "function" + }, + { + "name": "After", + "kind": "function" + }, + { + "name": "Before", + "kind": "function" + }, + { + "name": "Equal", + "kind": "function" + }, + { + "name": "After", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func DetermineOCPStatus(version string, date time.Time) string {", + "\t// Safeguard against empty values being passed in", + "\tif version == \"\" || date.IsZero() {", + "\t\treturn OCPStatusUnknown", + "\t}", + "", + "\t// Split the incoming version on the \".\" and make sure we are only looking at major.minor.", + "\tsplitVersion := strings.Split(version, \".\")", + "\tversion = splitVersion[0] + \".\" + splitVersion[1]", + "", + "\t// Check if the version exists in our local map", + "\tlifecycleDates := GetLifeCycleDates()", + "\tif entry, ok := lifecycleDates[version]; ok {", + "\t\t// Safeguard against the latest versions not having a date set for FSEDate set.", + "\t\t// See the OpenShift lifecycle website link (above) for more details on this.", + "\t\tif entry.FSEDate.IsZero() {", + "\t\t\tentry.FSEDate = entry.MSEDate", + "\t\t}", + "", + "\t\t// Pre-GA", + "\t\tif date.Before(entry.GADate) {", + "\t\t\treturn OCPStatusPreGA", + "\t\t}", + "\t\t// Generally Available", + "\t\tif date.Equal(entry.GADate) || date.After(entry.GADate) \u0026\u0026 date.Before(entry.FSEDate) {", + "\t\t\treturn OCPStatusGA", + "\t\t}", + "\t\t// Maintenance Support", + "\t\tif date.Equal(entry.FSEDate) || (date.After(entry.FSEDate) \u0026\u0026 date.Before(entry.MSEDate)) {", + "\t\t\treturn OCPStatusMS", + "\t\t}", + "\t\t// End of Life", + "\t\tif date.Equal(entry.MSEDate) || date.After(entry.MSEDate) {", + "\t\t\treturn OCPStatusEOL", + "\t\t}", + "\t}", + "", + "\treturn OCPStatusUnknown", + "}" + ] + }, + { + "name": "FindMajorMinor", + "qualifiedName": "FindMajorMinor", + "exported": true, + "signature": "func(string)(string)", + "doc": "FindMajorMinor Extracts the major and minor components of a version string\n\nThe function splits an input string on periods, then concatenates the first\ntwo segments separated by a dot to form a \"major.minor\" representation. It is\nused to normalize full version strings before comparison or lookup. The\nreturned value is a plain string containing only the major and minor parts.", + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:318", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "name": "BetaRHCOSVersionsFoundToMatch", + "kind": "function", + "source": [ + "func BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion string) bool {", + "\tocpVersion = FindMajorMinor(ocpVersion)", + "\tmachineVersion = FindMajorMinor(machineVersion)", + "", + "\t// Check if the versions exist in the beta list", + "\tif !stringhelper.StringInSlice(ocpBetaVersions, ocpVersion, false) || !stringhelper.StringInSlice(ocpBetaVersions, machineVersion, false) {", + "\t\treturn false", + "\t}", + "", + "\t// Check if the versions match", + "\treturn ocpVersion == machineVersion", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "name": "IsRHCOSCompatible", + "kind": "function", + "source": [ + "func IsRHCOSCompatible(machineVersion, ocpVersion string) bool {", + "\tif machineVersion == \"\" || ocpVersion == \"\" {", + "\t\treturn false", + "\t}", + "", + "\t// Exception for beta versions", + "\tif BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion) {", + "\t\treturn true", + "\t}", + "", + "\t// Split the incoming version on the \".\" and make sure we are only looking at major.minor.", + "\tocpVersion = FindMajorMinor(ocpVersion)", + "", + "\tlifecycleInfo := GetLifeCycleDates()", + "\tif entry, ok := lifecycleInfo[ocpVersion]; ok {", + "\t\t// Collect the machine version and the entry version", + "\t\tmv, err := gv.NewVersion(machineVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error parsing machineVersion: %s err: %v\", machineVersion, err)", + "\t\t\treturn false", + "\t\t}", + "\t\tev, err := gv.NewVersion(entry.MinRHCOSVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error parsing MinRHCOSVersion: %s err: %v\", entry.MinRHCOSVersion, err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// If the machine version \u003e= the entry version", + "\t\treturn mv.GreaterThanOrEqual(ev)", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func FindMajorMinor(version string) string {", + "\tsplitVersion := strings.Split(version, \".\")", + "\treturn splitVersion[0] + \".\" + splitVersion[1]", + "}" + ] + }, + { + "name": "GetLifeCycleDates", + "qualifiedName": "GetLifeCycleDates", + "exported": true, + "signature": "func()(map[string]VersionInfo)", + "doc": "GetLifeCycleDates Retrieves a map of OpenShift version lifecycle information\n\nThis function returns a predefined mapping that associates each major.minor\nOpenShift release with its lifecycle dates, minimum supported RHEL versions,\nand accepted RHEL releases. The returned data structure is used by other\nfunctions to determine compatibility status for clusters, machines, and\noperating systems. No parameters are required, and the map is returned\ndirectly from an internal variable.", + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:254", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "name": "DetermineOCPStatus", + "kind": "function", + "source": [ + "func DetermineOCPStatus(version string, date time.Time) string {", + "\t// Safeguard against empty values being passed in", + "\tif version == \"\" || date.IsZero() {", + "\t\treturn OCPStatusUnknown", + "\t}", + "", + "\t// Split the incoming version on the \".\" and make sure we are only looking at major.minor.", + "\tsplitVersion := strings.Split(version, \".\")", + "\tversion = splitVersion[0] + \".\" + splitVersion[1]", + "", + "\t// Check if the version exists in our local map", + "\tlifecycleDates := GetLifeCycleDates()", + "\tif entry, ok := lifecycleDates[version]; ok {", + "\t\t// Safeguard against the latest versions not having a date set for FSEDate set.", + "\t\t// See the OpenShift lifecycle website link (above) for more details on this.", + "\t\tif entry.FSEDate.IsZero() {", + "\t\t\tentry.FSEDate = entry.MSEDate", + "\t\t}", + "", + "\t\t// Pre-GA", + "\t\tif date.Before(entry.GADate) {", + "\t\t\treturn OCPStatusPreGA", + "\t\t}", + "\t\t// Generally Available", + "\t\tif date.Equal(entry.GADate) || date.After(entry.GADate) \u0026\u0026 date.Before(entry.FSEDate) {", + "\t\t\treturn OCPStatusGA", + "\t\t}", + "\t\t// Maintenance Support", + "\t\tif date.Equal(entry.FSEDate) || (date.After(entry.FSEDate) \u0026\u0026 date.Before(entry.MSEDate)) {", + "\t\t\treturn OCPStatusMS", + "\t\t}", + "\t\t// End of Life", + "\t\tif date.Equal(entry.MSEDate) || date.After(entry.MSEDate) {", + "\t\t\treturn OCPStatusEOL", + "\t\t}", + "\t}", + "", + "\treturn OCPStatusUnknown", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "name": "IsRHCOSCompatible", + "kind": "function", + "source": [ + "func IsRHCOSCompatible(machineVersion, ocpVersion string) bool {", + "\tif machineVersion == \"\" || ocpVersion == \"\" {", + "\t\treturn false", + "\t}", + "", + "\t// Exception for beta versions", + "\tif BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion) {", + "\t\treturn true", + "\t}", + "", + "\t// Split the incoming version on the \".\" and make sure we are only looking at major.minor.", + "\tocpVersion = FindMajorMinor(ocpVersion)", + "", + "\tlifecycleInfo := GetLifeCycleDates()", + "\tif entry, ok := lifecycleInfo[ocpVersion]; ok {", + "\t\t// Collect the machine version and the entry version", + "\t\tmv, err := gv.NewVersion(machineVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error parsing machineVersion: %s err: %v\", machineVersion, err)", + "\t\t\treturn false", + "\t\t}", + "\t\tev, err := gv.NewVersion(entry.MinRHCOSVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error parsing MinRHCOSVersion: %s err: %v\", entry.MinRHCOSVersion, err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// If the machine version \u003e= the entry version", + "\t\treturn mv.GreaterThanOrEqual(ev)", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "name": "IsRHELCompatible", + "kind": "function", + "source": [ + "func IsRHELCompatible(machineVersion, ocpVersion string) bool {", + "\tif machineVersion == \"\" || ocpVersion == \"\" {", + "\t\treturn false", + "\t}", + "", + "\tlifecycleInfo := GetLifeCycleDates()", + "\tif entry, ok := lifecycleInfo[ocpVersion]; ok {", + "\t\tif len(entry.RHELVersionsAccepted) \u003e= 2 { //nolint:mnd", + "\t\t\t// Need to be a specific major.minor version", + "\t\t\tfor _, v := range entry.RHELVersionsAccepted {", + "\t\t\t\tif v == machineVersion {", + "\t\t\t\t\treturn true", + "\t\t\t\t}", + "\t\t\t}", + "\t\t} else {", + "\t\t\t// Collect the machine version and the entry version", + "\t\t\tmv, _ := gv.NewVersion(machineVersion)", + "\t\t\tev, _ := gv.NewVersion(entry.RHELVersionsAccepted[0])", + "", + "\t\t\t// If the machine version \u003e= the entry version", + "\t\t\treturn mv.GreaterThanOrEqual(ev)", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetLifeCycleDates() map[string]VersionInfo {", + "\treturn ocpLifeCycleDates", + "}" + ] + }, + { + "name": "IsRHCOSCompatible", + "qualifiedName": "IsRHCOSCompatible", + "exported": true, + "signature": "func(string, string)(bool)", + "doc": "IsRHCOSCompatible Determines if a machine’s RHCOS version is supported for a given OpenShift release\n\nThe function checks whether the supplied machine version meets the minimum\nrequired RHCOS version for the specified OpenShift version. It first handles\nbeta releases by comparing major.minor versions, then looks up lifecycle data\nto retrieve the minimum acceptable RHCOS version and verifies compatibility\nusing semantic version comparison. If any validation fails, it logs an error\nand returns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:331", + "calls": [ + { + "name": "BetaRHCOSVersionsFoundToMatch", + "kind": "function", + "source": [ + "func BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion string) bool {", + "\tocpVersion = FindMajorMinor(ocpVersion)", + "\tmachineVersion = FindMajorMinor(machineVersion)", + "", + "\t// Check if the versions exist in the beta list", + "\tif !stringhelper.StringInSlice(ocpBetaVersions, ocpVersion, false) || !stringhelper.StringInSlice(ocpBetaVersions, machineVersion, false) {", + "\t\treturn false", + "\t}", + "", + "\t// Check if the versions match", + "\treturn ocpVersion == machineVersion", + "}" + ] + }, + { + "name": "FindMajorMinor", + "kind": "function", + "source": [ + "func FindMajorMinor(version string) string {", + "\tsplitVersion := strings.Split(version, \".\")", + "\treturn splitVersion[0] + \".\" + splitVersion[1]", + "}" + ] + }, + { + "name": "GetLifeCycleDates", + "kind": "function", + "source": [ + "func GetLifeCycleDates() map[string]VersionInfo {", + "\treturn ocpLifeCycleDates", + "}" + ] + }, + { + "pkgPath": "github.com/hashicorp/go-version", + "name": "NewVersion", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/hashicorp/go-version", + "name": "NewVersion", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "GreaterThanOrEqual", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testNodeOperatingSystemStatus", + "kind": "function", + "source": [ + "func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tfailedControlPlaneNodes := []string{}", + "\tfailedWorkerNodes := []string{}", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, node := range env.Nodes {", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\t// Get the OSImage which should tell us what version of operating system the node is running.", + "\t\tcheck.LogInfo(\"Node %q is running operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "", + "\t\t// Control plane nodes must be RHCOS (also CentOS Stream starting in OCP 4.13)", + "\t\t// Per the release notes from OCP documentation:", + "\t\t// \"You must use RHCOS machines for the control plane, and you can use either RHCOS or RHEL for compute machines.\"", + "\t\tif node.IsControlPlaneNode() \u0026\u0026 !node.IsRHCOS() \u0026\u0026 !node.IsCSCOS() {", + "\t\t\tcheck.LogError(\"Control plane node %q has been found to be running an incompatible operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "\t\t\tfailedControlPlaneNodes = append(failedControlPlaneNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Control plane node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Worker nodes can either be RHEL or RHCOS", + "\t\tif node.IsWorkerNode() {", + "\t\t\t//nolint:gocritic", + "\t\t\tif node.IsRHCOS() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetRHCOSVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather RHCOS version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather RHCOS version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tif shortVersion == operatingsystem.NotFoundStr {", + "\t\t\t\t\tcheck.LogInfo(\"Node %q has an RHCOS operating system that is not found in our internal database. Skipping as to not cause failures due to database mismatch.\", nodeName)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the node's RHCOS version and the OpenShift version are not compatible, the node fails.", + "\t\t\t\tcheck.LogDebug(\"Comparing RHCOS shortVersion %q to openshiftVersion %q\", shortVersion, env.OpenshiftVersion)", + "\t\t\t\tif !compatibility.IsRHCOSCompatible(shortVersion, env.OpenshiftVersion) {", + "\t\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible version of RHCOS %q\", nodeName, shortVersion)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).", + "\t\t\t\t\t\tAddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\tcheck.LogInfo(\"Worker node %q has been found to be running a compatible version of RHCOS %q\", nodeName, shortVersion)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running a compatible OS\", true).", + "\t\t\t\t\tAddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t} else if node.IsCSCOS() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetCSCOSVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather CentOS Stream CoreOS version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather CentOS Stream CoreOS version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// Warning: CentOS Stream CoreOS has not been released yet in any", + "\t\t\t\t// OCP RC/GA versions, so for the moment, we cannot compare the", + "\t\t\t\t// version with the OCP one, or retrieve it on the internal database", + "\t\t\t\tmsg := `", + "\t\t\t\t\tNode %s is using CentOS Stream CoreOS %s, which is not being used yet in any", + "\t\t\t\t\tOCP RC/GA version. Relaxing the conditions to check the OS as a result.", + "\t\t\t\t\t`", + "\t\t\t\tcheck.LogDebug(msg, nodeName, shortVersion)", + "\t\t\t} else if node.IsRHEL() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetRHELVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather RHEL version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather RHEL version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the node's RHEL version and the OpenShift version are not compatible, the node fails.", + "\t\t\t\tcheck.LogDebug(\"Comparing RHEL shortVersion %q to openshiftVersion %q\", shortVersion, env.OpenshiftVersion)", + "\t\t\t\tif !compatibility.IsRHELCompatible(shortVersion, env.OpenshiftVersion) {", + "\t\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible version of RHEL %q\", nodeName, shortVersion)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"Worker node %q has been found to be running a compatible version of RHEL %q\", nodeName, shortVersion)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running a compatible OS\", true).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tif n := len(failedControlPlaneNodes); n \u003e 0 {", + "\t\tcheck.LogError(\"Number of control plane nodes running non-RHCOS based operating systems: %d\", n)", + "\t}", + "", + "\tif n := len(failedWorkerNodes); n \u003e 0 {", + "\t\tcheck.LogError(\"Number of worker nodes running non-RHCOS or non-RHEL based operating systems: %d\", n)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsRHCOSCompatible(machineVersion, ocpVersion string) bool {", + "\tif machineVersion == \"\" || ocpVersion == \"\" {", + "\t\treturn false", + "\t}", + "", + "\t// Exception for beta versions", + "\tif BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion) {", + "\t\treturn true", + "\t}", + "", + "\t// Split the incoming version on the \".\" and make sure we are only looking at major.minor.", + "\tocpVersion = FindMajorMinor(ocpVersion)", + "", + "\tlifecycleInfo := GetLifeCycleDates()", + "\tif entry, ok := lifecycleInfo[ocpVersion]; ok {", + "\t\t// Collect the machine version and the entry version", + "\t\tmv, err := gv.NewVersion(machineVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error parsing machineVersion: %s err: %v\", machineVersion, err)", + "\t\t\treturn false", + "\t\t}", + "\t\tev, err := gv.NewVersion(entry.MinRHCOSVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error parsing MinRHCOSVersion: %s err: %v\", entry.MinRHCOSVersion, err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// If the machine version \u003e= the entry version", + "\t\treturn mv.GreaterThanOrEqual(ev)", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "IsRHELCompatible", + "qualifiedName": "IsRHELCompatible", + "exported": true, + "signature": "func(string, string)(bool)", + "doc": "IsRHELCompatible Determines if a machine’s RHEL version is supported for a given OpenShift release\n\nThe function takes the short RHEL version of a node and an OpenShift cluster\nversion, then checks against a lifecycle database to see if that RHEL release\nis accepted. If multiple RHEL versions are listed for the OpenShift release\nit requires an exact match; otherwise it compares major.minor numbers to\nensure the machine version is not older. It returns true only when the\nversion criteria are satisfied, otherwise false.", + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:285", + "calls": [ + { + "name": "GetLifeCycleDates", + "kind": "function", + "source": [ + "func GetLifeCycleDates() map[string]VersionInfo {", + "\treturn ocpLifeCycleDates", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/hashicorp/go-version", + "name": "NewVersion", + "kind": "function" + }, + { + "pkgPath": "github.com/hashicorp/go-version", + "name": "NewVersion", + "kind": "function" + }, + { + "name": "GreaterThanOrEqual", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testNodeOperatingSystemStatus", + "kind": "function", + "source": [ + "func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tfailedControlPlaneNodes := []string{}", + "\tfailedWorkerNodes := []string{}", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, node := range env.Nodes {", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\t// Get the OSImage which should tell us what version of operating system the node is running.", + "\t\tcheck.LogInfo(\"Node %q is running operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "", + "\t\t// Control plane nodes must be RHCOS (also CentOS Stream starting in OCP 4.13)", + "\t\t// Per the release notes from OCP documentation:", + "\t\t// \"You must use RHCOS machines for the control plane, and you can use either RHCOS or RHEL for compute machines.\"", + "\t\tif node.IsControlPlaneNode() \u0026\u0026 !node.IsRHCOS() \u0026\u0026 !node.IsCSCOS() {", + "\t\t\tcheck.LogError(\"Control plane node %q has been found to be running an incompatible operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "\t\t\tfailedControlPlaneNodes = append(failedControlPlaneNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Control plane node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Worker nodes can either be RHEL or RHCOS", + "\t\tif node.IsWorkerNode() {", + "\t\t\t//nolint:gocritic", + "\t\t\tif node.IsRHCOS() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetRHCOSVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather RHCOS version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather RHCOS version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tif shortVersion == operatingsystem.NotFoundStr {", + "\t\t\t\t\tcheck.LogInfo(\"Node %q has an RHCOS operating system that is not found in our internal database. Skipping as to not cause failures due to database mismatch.\", nodeName)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the node's RHCOS version and the OpenShift version are not compatible, the node fails.", + "\t\t\t\tcheck.LogDebug(\"Comparing RHCOS shortVersion %q to openshiftVersion %q\", shortVersion, env.OpenshiftVersion)", + "\t\t\t\tif !compatibility.IsRHCOSCompatible(shortVersion, env.OpenshiftVersion) {", + "\t\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible version of RHCOS %q\", nodeName, shortVersion)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).", + "\t\t\t\t\t\tAddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\tcheck.LogInfo(\"Worker node %q has been found to be running a compatible version of RHCOS %q\", nodeName, shortVersion)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running a compatible OS\", true).", + "\t\t\t\t\tAddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t} else if node.IsCSCOS() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetCSCOSVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather CentOS Stream CoreOS version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather CentOS Stream CoreOS version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// Warning: CentOS Stream CoreOS has not been released yet in any", + "\t\t\t\t// OCP RC/GA versions, so for the moment, we cannot compare the", + "\t\t\t\t// version with the OCP one, or retrieve it on the internal database", + "\t\t\t\tmsg := `", + "\t\t\t\t\tNode %s is using CentOS Stream CoreOS %s, which is not being used yet in any", + "\t\t\t\t\tOCP RC/GA version. Relaxing the conditions to check the OS as a result.", + "\t\t\t\t\t`", + "\t\t\t\tcheck.LogDebug(msg, nodeName, shortVersion)", + "\t\t\t} else if node.IsRHEL() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetRHELVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather RHEL version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather RHEL version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the node's RHEL version and the OpenShift version are not compatible, the node fails.", + "\t\t\t\tcheck.LogDebug(\"Comparing RHEL shortVersion %q to openshiftVersion %q\", shortVersion, env.OpenshiftVersion)", + "\t\t\t\tif !compatibility.IsRHELCompatible(shortVersion, env.OpenshiftVersion) {", + "\t\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible version of RHEL %q\", nodeName, shortVersion)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"Worker node %q has been found to be running a compatible version of RHEL %q\", nodeName, shortVersion)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running a compatible OS\", true).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tif n := len(failedControlPlaneNodes); n \u003e 0 {", + "\t\tcheck.LogError(\"Number of control plane nodes running non-RHCOS based operating systems: %d\", n)", + "\t}", + "", + "\tif n := len(failedWorkerNodes); n \u003e 0 {", + "\t\tcheck.LogError(\"Number of worker nodes running non-RHCOS or non-RHEL based operating systems: %d\", n)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsRHELCompatible(machineVersion, ocpVersion string) bool {", + "\tif machineVersion == \"\" || ocpVersion == \"\" {", + "\t\treturn false", + "\t}", + "", + "\tlifecycleInfo := GetLifeCycleDates()", + "\tif entry, ok := lifecycleInfo[ocpVersion]; ok {", + "\t\tif len(entry.RHELVersionsAccepted) \u003e= 2 { //nolint:mnd", + "\t\t\t// Need to be a specific major.minor version", + "\t\t\tfor _, v := range entry.RHELVersionsAccepted {", + "\t\t\t\tif v == machineVersion {", + "\t\t\t\t\treturn true", + "\t\t\t\t}", + "\t\t\t}", + "\t\t} else {", + "\t\t\t// Collect the machine version and the entry version", + "\t\t\tmv, _ := gv.NewVersion(machineVersion)", + "\t\t\tev, _ := gv.NewVersion(entry.RHELVersionsAccepted[0])", + "", + "\t\t\t// If the machine version \u003e= the entry version", + "\t\t\treturn mv.GreaterThanOrEqual(ev)", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "globals": [ + { + "name": "ocpBetaVersions", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:61" + }, + { + "name": "ocpLifeCycleDates", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:62" + } + ], + "consts": [ + { + "name": "OCPStatusEOL", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:38" + }, + { + "name": "OCPStatusGA", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:36" + }, + { + "name": "OCPStatusMS", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:37" + }, + { + "name": "OCPStatusPreGA", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:40" + }, + { + "name": "OCPStatusUnknown", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/compatibility/compatibility.go:39" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "configuration", + "files": 3, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "gopkg.in/yaml.v3", + "os", + "time" + ], + "structs": [ + { + "name": "AcceptedKernelTaintsInfo", + "exported": true, + "doc": "AcceptedKernelTaintsInfo stores information about kernel module taints used in tests\n\nThis structure holds the name of a kernel module that, when loaded, causes\nspecific taints on nodes. The module field is used by the test suite to\nidentify which taints should be accepted during certification testing. It\nfacilitates configuration of test environments that require certain kernel\nbehavior.", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/configuration.go:44", + "fields": { + "Module": "string" + }, + "methodNames": null, + "source": [ + "type AcceptedKernelTaintsInfo struct {", + "", + "\t// Accepted modules that cause taints that we want to supply to the test suite", + "\tModule string `yaml:\"module\" json:\"module\"`", + "}" + ] + }, + { + "name": "ConnectAPIConfig", + "exported": true, + "doc": "ConnectAPIConfig configuration holder for accessing the Red Hat Connect API\n\nIt stores the credentials, project identifier, endpoint address, and optional\nproxy settings required to communicate with the Red Hat Connect service. Each\nfield is mapped to YAML and JSON keys so it can be loaded from configuration\nfiles or environment variables.", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/configuration.go:113", + "fields": { + "APIKey": "string", + "BaseURL": "string", + "ProjectID": "string", + "ProxyPort": "string", + "ProxyURL": "string" + }, + "methodNames": null, + "source": [ + "type ConnectAPIConfig struct {", + "\t// APIKey is the API key for the Red Hat Connect", + "\tAPIKey string `yaml:\"apiKey\" json:\"apiKey\"`", + "\t// ProjectID is the project ID for the Red Hat Connect", + "\tProjectID string `yaml:\"projectID\" json:\"projectID\"`", + "\t// BaseURL is the base URL for the Red Hat Connect API", + "\tBaseURL string `yaml:\"baseURL\" json:\"baseURL\"`", + "\t// ProxyURL is the proxy URL for the Red Hat Connect API", + "\tProxyURL string `yaml:\"proxyURL\" json:\"proxyURL\"`", + "\t// ProxyPort is the proxy port for the Red Hat Connect API", + "\tProxyPort string `yaml:\"proxyPort\" json:\"proxyPort\"`", + "}" + ] + }, + { + "name": "CrdFilter", + "exported": true, + "doc": "CrdFilter filters CustomResourceDefinitions by name suffix and scaling capability\n\nThis structure holds criteria for selecting CRDs from a configuration. The\nNameSuffix field specifies a string that must appear at the end of a CRD’s\nname to be considered a match. The Scalable boolean indicates whether only\nscalable CRDs should be included in the filtered set.", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/configuration.go:92", + "fields": { + "NameSuffix": "string", + "Scalable": "bool" + }, + "methodNames": null, + "source": [ + "type CrdFilter struct {", + "\tNameSuffix string `yaml:\"nameSuffix\" json:\"nameSuffix\"`", + "\tScalable bool `yaml:\"scalable\" json:\"scalable\"`", + "}" + ] + }, + { + "name": "ManagedDeploymentsStatefulsets", + "exported": true, + "doc": "ManagedDeploymentsStatefulsets Represents the identifier of a StatefulSet in a managed deployment\n\nThis structure stores the name of a Kubernetes StatefulSet that should be\ntracked or controlled by the system. It is used as part of configuration\ndata, typically loaded from YAML or JSON files, to specify which stateful\nsets are relevant for monitoring or management tasks.", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/configuration.go:103", + "fields": { + "Name": "string" + }, + "methodNames": null, + "source": [ + "type ManagedDeploymentsStatefulsets struct {", + "\tName string `yaml:\"name\" json:\"name\"`", + "}" + ] + }, + { + "name": "Namespace", + "exported": true, + "doc": "Namespace Represents a Kubernetes namespace configuration\n\nThis structure holds information about a single namespace, primarily its name\nused for identification in the cluster. The name is serialized to YAML or\nJSON under the key \"name\". It serves as a basic unit for configuring\nnamespace-specific settings within the application.", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/configuration.go:82", + "fields": { + "Name": "string" + }, + "methodNames": null, + "source": [ + "type Namespace struct {", + "\tName string `yaml:\"name\" json:\"name\"`", + "}" + ] + }, + { + "name": "SkipHelmChartList", + "exported": true, + "doc": "SkipHelmChartList Specifies a Helm chart to exclude from catalog checks\n\nThis structure holds the identifier for an operator bundle package or image\nversion that should be omitted when verifying existence against the RedHat\ncatalog. The Name field contains the exact name used in the catalog lookup.\nWhen populated, the system will skip any validation or processing related to\nthis chart.", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/configuration.go:32", + "fields": { + "Name": "string" + }, + "methodNames": null, + "source": [ + "type SkipHelmChartList struct {", + "\t// Name is the name of the `operator bundle package name` or `image-version` that you want to check if exists in the RedHat catalog", + "\tName string `yaml:\"name\" json:\"name\"`", + "}" + ] + }, + { + "name": "SkipScalingTestDeploymentsInfo", + "exported": true, + "doc": "SkipScalingTestDeploymentsInfo Lists deployments excluded from scaling tests\n\nThis structure stores a deployment's name and namespace that should be\nignored during scaling test runs. By including these entries in the\nconfiguration, the testing framework bypasses any checks or actions that\ncould interfere with or corrupt the selected deployments.", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/configuration.go:56", + "fields": { + "Name": "string", + "Namespace": "string" + }, + "methodNames": null, + "source": [ + "type SkipScalingTestDeploymentsInfo struct {", + "", + "\t// Deployment name and namespace that can be skipped by the scaling tests", + "\tName string `yaml:\"name\" json:\"name\"`", + "\tNamespace string `yaml:\"namespace\" json:\"namespace\"`", + "}" + ] + }, + { + "name": "SkipScalingTestStatefulSetsInfo", + "exported": true, + "doc": "SkipScalingTestStatefulSetsInfo Specifies statefulsets excluded from scaling tests\n\nThis structure holds the name and namespace of a StatefulSet that should be\nignored during scaling test runs to avoid potential failures or conflicts. By\nlisting such StatefulSets, the testing framework can bypass them while still\nevaluating other components.", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/configuration.go:69", + "fields": { + "Name": "string", + "Namespace": "string" + }, + "methodNames": null, + "source": [ + "type SkipScalingTestStatefulSetsInfo struct {", + "", + "\t// StatefulSet name and namespace that can be skipped by the scaling tests", + "\tName string `yaml:\"name\" json:\"name\"`", + "\tNamespace string `yaml:\"namespace\" json:\"namespace\"`", + "}" + ] + }, + { + "name": "TestConfiguration", + "exported": true, + "doc": "TestConfiguration holds configuration values used during test execution\n\nThis struct groups settings that control which namespaces, pods, operators,\nand CRDs are considered in a test run. It also contains parameters for the\ncollector application and connection to an external API. The fields support\nfiltering, skipping certain resources, and specifying accepted kernel taints\nor protocol names.", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/configuration.go:133", + "fields": { + "AcceptedKernelTaints": "[]AcceptedKernelTaintsInfo", + "CollectorAppEndpoint": "string", + "CollectorAppPassword": "string", + "ConnectAPIConfig": "ConnectAPIConfig", + "CrdFilters": "[]CrdFilter", + "ExecutedBy": "string", + "ManagedDeployments": "[]ManagedDeploymentsStatefulsets", + "ManagedStatefulsets": "[]ManagedDeploymentsStatefulsets", + "OperatorsUnderTestLabels": "[]string", + "PartnerName": "string", + "PodsUnderTestLabels": "[]string", + "ProbeDaemonSetNamespace": "string", + "ServicesIgnoreList": "[]string", + "SkipHelmChartList": "[]SkipHelmChartList", + "SkipScalingTestDeployments": "[]SkipScalingTestDeploymentsInfo", + "SkipScalingTestStatefulSets": "[]SkipScalingTestStatefulSetsInfo", + "TargetNameSpaces": "[]Namespace", + "ValidProtocolNames": "[]string" + }, + "methodNames": null, + "source": [ + "type TestConfiguration struct {", + "\t// targetNameSpaces to be used in", + "\tTargetNameSpaces []Namespace `yaml:\"targetNameSpaces,omitempty\" json:\"targetNameSpaces,omitempty\"`", + "\t// labels identifying pods under test", + "\tPodsUnderTestLabels []string `yaml:\"podsUnderTestLabels,omitempty\" json:\"podsUnderTestLabels,omitempty\"`", + "\t// labels identifying operators unde test", + "\tOperatorsUnderTestLabels []string `yaml:\"operatorsUnderTestLabels,omitempty\" json:\"operatorsUnderTestLabels,omitempty\"`", + "\t// CRDs section.", + "\tCrdFilters []CrdFilter `yaml:\"targetCrdFilters,omitempty\" json:\"targetCrdFilters,omitempty\"`", + "\tManagedDeployments []ManagedDeploymentsStatefulsets `yaml:\"managedDeployments,omitempty\" json:\"managedDeployments,omitempty\"`", + "\tManagedStatefulsets []ManagedDeploymentsStatefulsets `yaml:\"managedStatefulsets,omitempty\" json:\"managedStatefulsets,omitempty\"`", + "", + "\t// AcceptedKernelTaints", + "\tAcceptedKernelTaints []AcceptedKernelTaintsInfo `yaml:\"acceptedKernelTaints,omitempty\" json:\"acceptedKernelTaints,omitempty\"`", + "\tSkipHelmChartList []SkipHelmChartList `yaml:\"skipHelmChartList,omitempty\" json:\"skipHelmChartList,omitempty\"`", + "\t// SkipScalingTestDeploymentNames", + "\tSkipScalingTestDeployments []SkipScalingTestDeploymentsInfo `yaml:\"skipScalingTestDeployments,omitempty\" json:\"skipScalingTestDeployments,omitempty\"`", + "\t// SkipScalingTestStatefulSetNames", + "\tSkipScalingTestStatefulSets []SkipScalingTestStatefulSetsInfo `yaml:\"skipScalingTestStatefulSets,omitempty\" json:\"skipScalingTestStatefulSets,omitempty\"`", + "\tValidProtocolNames []string `yaml:\"validProtocolNames,omitempty\" json:\"validProtocolNames,omitempty\"`", + "\tServicesIgnoreList []string `yaml:\"servicesignorelist,omitempty\" json:\"servicesignorelist,omitempty\"`", + "\tProbeDaemonSetNamespace string `yaml:\"probeDaemonSetNamespace,omitempty\" json:\"probeDaemonSetNamespace,omitempty\"`", + "\t// Collector's parameters", + "\tExecutedBy string `yaml:\"executedBy,omitempty\" json:\"executedBy,omitempty\"`", + "\tPartnerName string `yaml:\"partnerName,omitempty\" json:\"partnerName,omitempty\"`", + "\tCollectorAppPassword string `yaml:\"collectorAppPassword,omitempty\" json:\"collectorAppPassword,omitempty\"`", + "\tCollectorAppEndpoint string `yaml:\"collectorAppEndpoint,omitempty\" json:\"collectorAppEndpoint,omitempty\"`", + "\t// ConnectAPIConfig contains the configuration for the Red Hat Connect API", + "\tConnectAPIConfig ConnectAPIConfig `yaml:\"connectAPIConfig,omitempty\" json:\"connectAPIConfig,omitempty\"`", + "}" + ] + }, + { + "name": "TestParameters", + "exported": true, + "doc": "TestParameters holds configuration settings for test execution\n\nThis structure contains a collection of fields that control how tests are\nrun, including resource limits, image repositories, API connection details,\nand output options. It also flags whether to include non-running pods, enable\ndata collection or XML creation, and sets timeouts and log levels for the\ntest environment.", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/configuration.go:171", + "fields": { + "AllowNonRunning": "bool", + "AllowPreflightInsecure": "bool", + "CertSuiteImageRepo": "string", + "CertSuiteProbeImage": "string", + "ConfigFile": "string", + "ConnectAPIBaseURL": "string", + "ConnectAPIKey": "string", + "ConnectAPIProxyPort": "string", + "ConnectAPIProxyURL": "string", + "ConnectProjectID": "string", + "DaemonsetCPULim": "string", + "DaemonsetCPUReq": "string", + "DaemonsetMemLim": "string", + "DaemonsetMemReq": "string", + "EnableDataCollection": "bool", + "EnableXMLCreation": "bool", + "IncludeWebFilesInOutputFolder": "bool", + "Intrusive": "bool", + "Kubeconfig": "string", + "LabelsFilter": "string", + "LogLevel": "string", + "OfflineDB": "string", + "OmitArtifactsZipFile": "bool", + "OutputDir": "string", + "PfltDockerconfig": "string", + "SanitizeClaim": "bool", + "ServerMode": "bool", + "Timeout": "time.Duration" + }, + "methodNames": null, + "source": [ + "type TestParameters struct {", + "\tKubeconfig string", + "\tConfigFile string", + "\tPfltDockerconfig string", + "\tOutputDir string", + "\tLabelsFilter string", + "\tLogLevel string", + "\tOfflineDB string", + "\tDaemonsetCPUReq string", + "\tDaemonsetCPULim string", + "\tDaemonsetMemReq string", + "\tDaemonsetMemLim string", + "\tSanitizeClaim bool", + "\tCertSuiteImageRepo string", + "\tCertSuiteProbeImage string", + "\tIntrusive bool", + "\tAllowPreflightInsecure bool", + "\tIncludeWebFilesInOutputFolder bool", + "\tOmitArtifactsZipFile bool", + "\tEnableDataCollection bool", + "\tEnableXMLCreation bool", + "\tServerMode bool", + "\tTimeout time.Duration", + "\tConnectAPIKey string", + "\tConnectProjectID string", + "\tConnectAPIBaseURL string", + "\tConnectAPIProxyURL string", + "\tConnectAPIProxyPort string", + "\t// AllowNonRunning determines whether autodiscovery includes non-Running pods", + "\tAllowNonRunning bool", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "GetTestParameters", + "qualifiedName": "GetTestParameters", + "exported": true, + "signature": "func()(*TestParameters)", + "doc": "GetTestParameters Retrieves the current global test configuration\n\nThis function returns a pointer to the singleton TestParameters instance that\nholds all runtime settings for the certification suite. The parameters are\ninitialized once at program start and can be modified through command‑line\nflags or environment variables before use. Subsequent calls return the same\ninstance, allowing different parts of the application to read shared\nconfiguration values.", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/utils.go:76", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run", + "name": "initTestParamsFromFlags", + "kind": "function", + "source": [ + "func initTestParamsFromFlags(cmd *cobra.Command) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Fetch test params from flags", + "\ttestParams.OutputDir, _ = cmd.Flags().GetString(\"output-dir\")", + "\ttestParams.LabelsFilter, _ = cmd.Flags().GetString(\"label-filter\")", + "\ttestParams.ServerMode, _ = cmd.Flags().GetBool(\"server-mode\")", + "\ttestParams.ConfigFile, _ = cmd.Flags().GetString(\"config-file\")", + "\ttestParams.Kubeconfig, _ = cmd.Flags().GetString(\"kubeconfig\")", + "\ttestParams.OmitArtifactsZipFile, _ = cmd.Flags().GetBool(\"omit-artifacts-zip-file\")", + "\ttestParams.LogLevel, _ = cmd.Flags().GetString(\"log-level\")", + "\ttestParams.OfflineDB, _ = cmd.Flags().GetString(\"offline-db\")", + "\ttestParams.PfltDockerconfig, _ = cmd.Flags().GetString(\"preflight-dockerconfig\")", + "\ttestParams.Intrusive, _ = cmd.Flags().GetBool(\"intrusive\")", + "\ttestParams.AllowPreflightInsecure, _ = cmd.Flags().GetBool(\"allow-preflight-insecure\")", + "\ttestParams.IncludeWebFilesInOutputFolder, _ = cmd.Flags().GetBool(\"include-web-files\")", + "\ttestParams.EnableDataCollection, _ = cmd.Flags().GetBool(\"enable-data-collection\")", + "\ttestParams.EnableXMLCreation, _ = cmd.Flags().GetBool(\"create-xml-junit-file\")", + "\ttestParams.CertSuiteProbeImage, _ = cmd.Flags().GetString(\"certsuite-probe-image\")", + "\ttestParams.DaemonsetCPUReq, _ = cmd.Flags().GetString(\"daemonset-cpu-req\")", + "\ttestParams.DaemonsetCPULim, _ = cmd.Flags().GetString(\"daemonset-cpu-lim\")", + "\ttestParams.DaemonsetMemReq, _ = cmd.Flags().GetString(\"daemonset-mem-req\")", + "\ttestParams.DaemonsetMemLim, _ = cmd.Flags().GetString(\"daemonset-mem-lim\")", + "\ttestParams.SanitizeClaim, _ = cmd.Flags().GetBool(\"sanitize-claim\")", + "\ttestParams.AllowNonRunning, _ = cmd.Flags().GetBool(\"allow-non-running\")", + "\ttestParams.ConnectAPIKey, _ = cmd.Flags().GetString(\"connect-api-key\")", + "\ttestParams.ConnectProjectID, _ = cmd.Flags().GetString(\"connect-project-id\")", + "\ttestParams.ConnectAPIBaseURL, _ = cmd.Flags().GetString(\"connect-api-base-url\")", + "\ttestParams.ConnectAPIProxyURL, _ = cmd.Flags().GetString(\"connect-api-proxy-url\")", + "\ttestParams.ConnectAPIProxyPort, _ = cmd.Flags().GetString(\"connect-api-proxy-port\")", + "\ttimeoutStr, _ := cmd.Flags().GetString(\"timeout\")", + "", + "\t// Check if the output directory exists and, if not, create it", + "\tif _, err := os.Stat(testParams.OutputDir); os.IsNotExist(err) {", + "\t\tvar dirPerm fs.FileMode = 0o755 // default permissions for a directory", + "\t\terr := os.MkdirAll(testParams.OutputDir, dirPerm)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not create directory %q, err: %v\", testParams.OutputDir, err)", + "\t\t}", + "\t} else if err != nil {", + "\t\treturn fmt.Errorf(\"could not check directory %q, err: %v\", testParams.OutputDir, err)", + "\t}", + "", + "\t// Process the timeout flag", + "\tconst timeoutDefaultvalue = 24 * time.Hour", + "\ttimeout, err := time.ParseDuration(timeoutStr)", + "\tif err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to parse timeout flag %q, err: %v. Using default timeout value %v\", timeoutStr, err, timeoutDefaultvalue)", + "\t\ttestParams.Timeout = timeoutDefaultvalue", + "\t} else {", + "\t\ttestParams.Timeout = timeout", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run", + "name": "runTestSuite", + "kind": "function", + "source": [ + "func runTestSuite(cmd *cobra.Command, _ []string) error {", + "\terr := initTestParamsFromFlags(cmd)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to initialize the test parameters, err: %v\", err)", + "\t}", + "", + "\ttestParams := configuration.GetTestParameters()", + "\tif testParams.ServerMode {", + "\t\tlog.Info(\"Running Certification Suite in web server mode\")", + "\t\twebserver.StartServer(testParams.OutputDir)", + "\t} else {", + "\t\tcertsuite.Startup()", + "\t\tdefer certsuite.Shutdown()", + "\t\tlog.Info(\"Running Certification Suite in stand-alone mode\")", + "\t\terr := certsuite.Run(testParams.LabelsFilter, testParams.OutputDir)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to run Certification Suite: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "FindPodsByLabels", + "kind": "function", + "source": [ + "func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, namespaces []string) (runningPods, allPods []corev1.Pod) {", + "\trunningPods = []corev1.Pod{}", + "\tallPods = []corev1.Pod{}", + "\tallowNonRunning := configuration.GetTestParameters().AllowNonRunning", + "\t// Iterate through namespaces", + "\tfor _, ns := range namespaces {", + "\t\tvar pods *corev1.PodList", + "\t\tif len(labels) \u003e 0 {", + "\t\t\tpods = findPodsMatchingAtLeastOneLabel(oc, labels, ns)", + "\t\t} else {", + "\t\t\t// If labels are not provided in the namespace under test, they are tested by the CNF suite", + "\t\t\tlog.Debug(\"Searching Pods in namespace %s without label\", ns)", + "\t\t\tvar err error", + "\t\t\tpods, err = oc.Pods(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error when listing pods in ns=%s, err: %v\", ns, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t}", + "\t\t// Filter out any pod set to be deleted", + "\t\tfor i := 0; i \u003c len(pods.Items); i++ {", + "\t\t\tif pods.Items[i].DeletionTimestamp == nil {", + "\t\t\t\tif allowNonRunning || pods.Items[i].Status.Phase == corev1.PodRunning {", + "\t\t\t\t\trunningPods = append(runningPods, pods.Items[i])", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tallPods = append(allPods, pods.Items[i])", + "\t\t}", + "\t}", + "", + "\treturn runningPods, allPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Startup", + "kind": "function", + "source": [ + "func Startup() {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Create an evaluator to filter test cases with labels", + "\tif err := checksdb.InitLabelsExprEvaluator(testParams.LabelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(testParams.OutputDir, testParams.LogLevel); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\t// Diagnostic functions will run when no labels are provided.", + "\tif testParams.LabelsFilter == noLabelsFilterExpr {", + "\t\tlog.Warn(\"The Best Practices Test Suite will run in diagnostic mode so no test case will be launched\")", + "\t}", + "", + "\t// Set clientsholder singleton with the filenames from the env vars.", + "\t_ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...)", + "\tLoadChecksDB(testParams.LabelsFilter)", + "", + "\tlog.Info(\"Certsuite Version: %v\", versions.GitVersion())", + "\tlog.Info(\"Claim Format Version: %s\", versions.ClaimFormatVersion)", + "\tlog.Info(\"Labels filter: %v\", testParams.LabelsFilter)", + "\tlog.Info(\"Log level: %s\", strings.ToUpper(testParams.LogLevel))", + "", + "\tcli.PrintBanner()", + "", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "\tfmt.Printf(\"Checks filter: %s\\n\", testParams.LabelsFilter)", + "\tfmt.Printf(\"Output folder: %s\\n\", testParams.OutputDir)", + "\tfmt.Printf(\"Log file: %s (level=%s)\\n\", log.LogFileName, testParams.LogLevel)", + "\tfmt.Printf(\"\\n\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "getK8sClientsConfigFileNames", + "kind": "function", + "source": [ + "func getK8sClientsConfigFileNames() []string {", + "\tparams := configuration.GetTestParameters()", + "\tfileNames := []string{}", + "\tif params.Kubeconfig != \"\" {", + "\t\t// Add the kubeconfig path", + "\t\tfileNames = append(fileNames, params.Kubeconfig)", + "\t}", + "\thomeDir := os.Getenv(\"HOME\")", + "\tif homeDir != \"\" {", + "\t\tkubeConfigFilePath := filepath.Join(homeDir, \".kube\", \"config\")", + "\t\t// Check if the kubeconfig path exists", + "\t\tif _, err := os.Stat(kubeConfigFilePath); err == nil {", + "\t\t\tlog.Info(\"kubeconfig path %s is present\", kubeConfigFilePath)", + "\t\t\t// Only add the kubeconfig to the list of paths if it exists, since it is not added by the user", + "\t\t\tfileNames = append(fileNames, kubeConfigFilePath)", + "\t\t} else {", + "\t\t\tlog.Info(\"kubeconfig path %s is not present\", kubeConfigFilePath)", + "\t\t}", + "\t}", + "", + "\treturn fileNames", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "deployDaemonSet", + "kind": "function", + "source": [ + "func deployDaemonSet(namespace string) error {", + "\tk8sPrivilegedDs.SetDaemonSetClient(clientsholder.GetClientsHolder().K8sClient)", + "", + "\tdsImage := env.params.CertSuiteProbeImage", + "\tif k8sPrivilegedDs.IsDaemonSetReady(DaemonSetName, namespace, dsImage) {", + "\t\treturn nil", + "\t}", + "", + "\tmatchLabels := make(map[string]string)", + "\tmatchLabels[\"name\"] = DaemonSetName", + "\tmatchLabels[\"redhat-best-practices-for-k8s.com/app\"] = DaemonSetName", + "\t_, err := k8sPrivilegedDs.CreateDaemonSet(DaemonSetName, namespace, containerName, dsImage, matchLabels, probePodsTimeout,", + "\t\tconfiguration.GetTestParameters().DaemonsetCPUReq,", + "\t\tconfiguration.GetTestParameters().DaemonsetCPULim,", + "\t\tconfiguration.GetTestParameters().DaemonsetMemReq,", + "\t\tconfiguration.GetTestParameters().DaemonsetMemLim,", + "\t\tcorev1.PullIfNotPresent,", + "\t)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not deploy certsuite daemonset, err=%v\", err)", + "\t}", + "\terr = k8sPrivilegedDs.WaitDaemonsetReady(namespace, DaemonSetName, probePodsTimeout)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"timed out waiting for certsuite daemonset, err=%v\", err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "ShouldRun", + "kind": "function", + "source": [ + "func ShouldRun(labelsExpr string) bool {", + "\tenv = provider.GetTestEnvironment()", + "\tpreflightAllowedLabels := []string{common.PreflightTestKey, identifiers.TagPreflight}", + "", + "\tif !labelsAllowTestRun(labelsExpr, preflightAllowedLabels) {", + "\t\treturn false", + "\t}", + "", + "\t// Add safeguard against running the preflight tests if the docker config does not exist.", + "\tpreflightDockerConfigFile := configuration.GetTestParameters().PfltDockerconfig", + "\tif preflightDockerConfigFile == \"\" || preflightDockerConfigFile == \"NA\" {", + "\t\tlog.Warn(\"Skipping the preflight suite because the Docker Config file is not provided.\")", + "\t\tenv.SkipPreflight = true", + "\t}", + "", + "\treturn true", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "name": "LoadConfiguration", + "qualifiedName": "LoadConfiguration", + "exported": true, + "signature": "func(string)(TestConfiguration, error)", + "doc": "LoadConfiguration Loads and parses a configuration file once\n\nThe function reads the specified YAML file, unmarshals its contents into a\nTestConfiguration structure, and caches the result for subsequent calls. It\nlogs progress and warns if the probe daemonset namespace is missing,\ndefaulting it to a predefined value. Errors during reading or unmarshalling\nare returned alongside the configuration.", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/utils.go:39", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "os", + "name": "ReadFile", + "kind": "function" + }, + { + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadConfiguration(filePath string) (TestConfiguration, error) {", + "\tif confLoaded {", + "\t\tlog.Debug(\"config file already loaded, return previous element\")", + "\t\treturn configuration, nil", + "\t}", + "", + "\tlog.Info(\"Loading config from file: %s\", filePath)", + "\tcontents, err := os.ReadFile(filePath)", + "\tif err != nil {", + "\t\treturn configuration, err", + "\t}", + "", + "\terr = yaml.Unmarshal(contents, \u0026configuration)", + "\tif err != nil {", + "\t\treturn configuration, err", + "\t}", + "", + "\t// Set default namespace for the probe daemonset pods, in case it was not set.", + "\tif configuration.ProbeDaemonSetNamespace == \"\" {", + "\t\tlog.Warn(\"No namespace configured for the probe daemonset. Defaulting to namespace %q\", defaultProbeDaemonSetNamespace)", + "\t\tconfiguration.ProbeDaemonSetNamespace = defaultProbeDaemonSetNamespace", + "\t} else {", + "\t\tlog.Info(\"Namespace for probe daemonset: %s\", configuration.ProbeDaemonSetNamespace)", + "\t}", + "", + "\tconfLoaded = true", + "\treturn configuration, nil", + "}" + ] + } + ], + "globals": [ + { + "name": "confLoaded", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/utils.go:28" + }, + { + "name": "configuration", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/utils.go:27" + }, + { + "name": "parameters", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/utils.go:29" + } + ], + "consts": [ + { + "name": "defaultProbeDaemonSetNamespace", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/configuration/configuration.go:22" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "diagnostics", + "files": 1, + "imports": [ + "context", + "encoding/json", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "k8s.io/api/core/v1", + "k8s.io/api/storage/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/serializer", + "strings" + ], + "structs": [ + { + "name": "NodeHwInfo", + "exported": true, + "doc": "NodeHwInfo Container for node hardware details\n\nThis structure stores parsed output from various system utilities, including\nCPU information, IP configuration, block device layout, and PCI devices. Each\nfield holds the raw or processed data returned by the diagnostics functions.\nThe struct is populated per-node and used to aggregate hardware profiles\nacross a cluster.", + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:52", + "fields": { + "IPconfig": "interface{}", + "Lsblk": "interface{}", + "Lscpu": "interface{}", + "Lspci": "[]string" + }, + "methodNames": null, + "source": [ + "type NodeHwInfo struct {", + "\tLscpu interface{}", + "\tIPconfig interface{}", + "\tLsblk interface{}", + "\tLspci []string", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "GetCniPlugins", + "qualifiedName": "GetCniPlugins", + "exported": true, + "signature": "func()(map[string][]interface{})", + "doc": "GetCniPlugins Retrieves CNI plugin information from probe pods\n\nThis function gathers the JSON output of a command run inside each probe pod\nto collect installed CNI plugins for every node. It executes the command,\nparses the returned JSON into generic interface slices, and maps them by node\nname. Errors during execution or decoding are logged and that node is\nskipped.", + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:66", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "String", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "GenerateNodes", + "kind": "function", + "source": [ + "func GenerateNodes() map[string]interface{} {", + "\tconst (", + "\t\tnodeSummaryField = \"nodeSummary\"", + "\t\tcniPluginsField = \"cniPlugins\"", + "\t\tnodesHwInfo = \"nodesHwInfo\"", + "\t\tcsiDriverInfo = \"csiDriver\"", + "\t)", + "\tnodes := map[string]interface{}{}", + "\tnodes[nodeSummaryField] = diagnostics.GetNodeJSON() // add node summary", + "\tnodes[cniPluginsField] = diagnostics.GetCniPlugins() // add cni plugins", + "\tnodes[nodesHwInfo] = diagnostics.GetHwInfoAllNodes() // add nodes hardware information", + "\tnodes[csiDriverInfo] = diagnostics.GetCsiDriver() // add csi drivers info", + "\treturn nodes", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetCniPlugins() (out map[string][]interface{}) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string][]interface{})", + "\tfor _, probePod := range env.ProbePods {", + "\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, cniPluginsCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cniPluginsCommand, probePod.String())", + "\t\t\tcontinue", + "\t\t}", + "\t\tdecoded := []interface{}{}", + "\t\terr = json.Unmarshal([]byte(outStr), \u0026decoded)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not decode json file because of: %s\", err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = decoded", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "GetCsiDriver", + "qualifiedName": "GetCsiDriver", + "exported": true, + "signature": "func()(map[string]interface{})", + "doc": "GetCsiDriver Retrieves a list of CSI drivers from the Kubernetes cluster\n\nThis function accesses the Kubernetes client holder to query the StorageV1\nAPI for all CSI drivers, encodes the result into JSON, and then unmarshals it\ninto a map. Errors during listing, scheme setup, encoding, or decoding are\nlogged and cause an empty map to be returned. The resulting map contains\ndriver details suitable for inclusion in diagnostic reports.", + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:196", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "CSIDrivers", + "kind": "function" + }, + { + "name": "StorageV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/runtime", + "name": "NewScheme", + "kind": "function" + }, + { + "pkgPath": "k8s.io/api/storage/v1", + "name": "AddToScheme", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "LegacyCodec", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/runtime/serializer", + "name": "NewCodecFactory", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/runtime", + "name": "Encode", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "GenerateNodes", + "kind": "function", + "source": [ + "func GenerateNodes() map[string]interface{} {", + "\tconst (", + "\t\tnodeSummaryField = \"nodeSummary\"", + "\t\tcniPluginsField = \"cniPlugins\"", + "\t\tnodesHwInfo = \"nodesHwInfo\"", + "\t\tcsiDriverInfo = \"csiDriver\"", + "\t)", + "\tnodes := map[string]interface{}{}", + "\tnodes[nodeSummaryField] = diagnostics.GetNodeJSON() // add node summary", + "\tnodes[cniPluginsField] = diagnostics.GetCniPlugins() // add cni plugins", + "\tnodes[nodesHwInfo] = diagnostics.GetHwInfoAllNodes() // add nodes hardware information", + "\tnodes[csiDriverInfo] = diagnostics.GetCsiDriver() // add csi drivers info", + "\treturn nodes", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetCsiDriver() (out map[string]interface{}) {", + "\to := clientsholder.GetClientsHolder()", + "\tcsiDriver, err := o.K8sClient.StorageV1().CSIDrivers().List(context.TODO(), apimachineryv1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Fail CSIDrivers.list err:%s\", err)", + "\t\treturn out", + "\t}", + "\tscheme := runtime.NewScheme()", + "\terr = storagev1.AddToScheme(scheme)", + "\tif err != nil {", + "\t\tlog.Error(\"Fail AddToScheme err:%s\", err)", + "\t\treturn out", + "\t}", + "\tcodec := serializer.NewCodecFactory(scheme).LegacyCodec(storagev1.SchemeGroupVersion)", + "\tdata, err := runtime.Encode(codec, csiDriver)", + "\tif err != nil {", + "\t\tlog.Error(\"Fail to encode Nodes to json, er: %s\", err)", + "\t\treturn out", + "\t}", + "", + "\terr = json.Unmarshal(data, \u0026out)", + "\tif err != nil {", + "\t\tlog.Error(\"failed to marshall nodes json, err: %v\", err)", + "\t\treturn out", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "GetHwInfoAllNodes", + "qualifiedName": "GetHwInfoAllNodes", + "exported": true, + "signature": "func()(map[string]NodeHwInfo)", + "doc": "GetHwInfoAllNodes Collects hardware details from all probe pods\n\nThis function iterates over each probe pod defined in the test environment,\nexecuting a series of commands to gather CPU, memory, network, block device,\nand PCI information. The results are parsed into a structured map keyed by\nnode name, with errors logged but not stopping the collection for other\nnodes. It returns a map where each entry contains a NodeHwInfo struct holding\nthe gathered data.", + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:96", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "getHWJsonOutput", + "kind": "function", + "source": [ + "func getHWJsonOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) (out interface{}, err error) {", + "\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn out, fmt.Errorf(\"command %s failed with error err: %v, stderr: %s\", cmd, err, errStr)", + "\t}", + "\terr = json.Unmarshal([]byte(outStr), \u0026out)", + "\tif err != nil {", + "\t\treturn out, fmt.Errorf(\"could not decode json file because of: %s\", err)", + "\t}", + "\treturn out, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "getHWJsonOutput", + "kind": "function", + "source": [ + "func getHWJsonOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) (out interface{}, err error) {", + "\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn out, fmt.Errorf(\"command %s failed with error err: %v, stderr: %s\", cmd, err, errStr)", + "\t}", + "\terr = json.Unmarshal([]byte(outStr), \u0026out)", + "\tif err != nil {", + "\t\treturn out, fmt.Errorf(\"could not decode json file because of: %s\", err)", + "\t}", + "\treturn out, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "getHWJsonOutput", + "kind": "function", + "source": [ + "func getHWJsonOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) (out interface{}, err error) {", + "\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn out, fmt.Errorf(\"command %s failed with error err: %v, stderr: %s\", cmd, err, errStr)", + "\t}", + "\terr = json.Unmarshal([]byte(outStr), \u0026out)", + "\tif err != nil {", + "\t\treturn out, fmt.Errorf(\"could not decode json file because of: %s\", err)", + "\t}", + "\treturn out, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "getHWTextOutput", + "kind": "function", + "source": [ + "func getHWTextOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) (out []string, err error) {", + "\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn out, fmt.Errorf(\"command %s failed with error err: %v, stderr: %s\", lspciCommand, err, errStr)", + "\t}", + "", + "\treturn strings.Split(outStr, \"\\n\"), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "GenerateNodes", + "kind": "function", + "source": [ + "func GenerateNodes() map[string]interface{} {", + "\tconst (", + "\t\tnodeSummaryField = \"nodeSummary\"", + "\t\tcniPluginsField = \"cniPlugins\"", + "\t\tnodesHwInfo = \"nodesHwInfo\"", + "\t\tcsiDriverInfo = \"csiDriver\"", + "\t)", + "\tnodes := map[string]interface{}{}", + "\tnodes[nodeSummaryField] = diagnostics.GetNodeJSON() // add node summary", + "\tnodes[cniPluginsField] = diagnostics.GetCniPlugins() // add cni plugins", + "\tnodes[nodesHwInfo] = diagnostics.GetHwInfoAllNodes() // add nodes hardware information", + "\tnodes[csiDriverInfo] = diagnostics.GetCsiDriver() // add csi drivers info", + "\treturn nodes", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetHwInfoAllNodes() (out map[string]NodeHwInfo) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string]NodeHwInfo)", + "\tfor _, probePod := range env.ProbePods {", + "\t\thw := NodeHwInfo{}", + "\t\tlscpu, err := getHWJsonOutput(probePod, o, lscpuCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lscpu for node %s\", probePod.Spec.NodeName)", + "\t\t} else {", + "\t\t\tvar ok bool", + "\t\t\ttemp, ok := lscpu.(map[string]interface{})", + "\t\t\tif !ok {", + "\t\t\t\tlog.Error(\"problem casting lscpu field for node %s, lscpu=%v\", probePod.Spec.NodeName, lscpu)", + "\t\t\t} else {", + "\t\t\t\thw.Lscpu = temp[\"lscpu\"]", + "\t\t\t}", + "\t\t}", + "\t\thw.IPconfig, err = getHWJsonOutput(probePod, o, ipCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting ip config for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lsblk, err = getHWJsonOutput(probePod, o, lsblkCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lsblk for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lspci, err = getHWTextOutput(probePod, o, lspciCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lspci for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = hw", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "GetNodeJSON", + "qualifiedName": "GetNodeJSON", + "exported": true, + "signature": "func()(map[string]interface{})", + "doc": "GetNodeJSON Retrieves a JSON representation of node information\n\nThe function obtains the test environment, marshals its Nodes field into\nJSON, then unmarshals that data back into a generic map structure for use\nelsewhere. It logs errors if either marshaling or unmarshaling fails and\nreturns the resulting map.", + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:173", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "pkgPath": "encoding/json", + "name": "Marshal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "GenerateNodes", + "kind": "function", + "source": [ + "func GenerateNodes() map[string]interface{} {", + "\tconst (", + "\t\tnodeSummaryField = \"nodeSummary\"", + "\t\tcniPluginsField = \"cniPlugins\"", + "\t\tnodesHwInfo = \"nodesHwInfo\"", + "\t\tcsiDriverInfo = \"csiDriver\"", + "\t)", + "\tnodes := map[string]interface{}{}", + "\tnodes[nodeSummaryField] = diagnostics.GetNodeJSON() // add node summary", + "\tnodes[cniPluginsField] = diagnostics.GetCniPlugins() // add cni plugins", + "\tnodes[nodesHwInfo] = diagnostics.GetHwInfoAllNodes() // add nodes hardware information", + "\tnodes[csiDriverInfo] = diagnostics.GetCsiDriver() // add csi drivers info", + "\treturn nodes", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNodeJSON() (out map[string]interface{}) {", + "\tenv := provider.GetTestEnvironment()", + "", + "\tnodesJSON, err := json.Marshal(env.Nodes)", + "\tif err != nil {", + "\t\tlog.Error(\"Could not Marshall env.Nodes, err=%v\", err)", + "\t}", + "", + "\terr = json.Unmarshal(nodesJSON, \u0026out)", + "\tif err != nil {", + "\t\tlog.Error(\"Could not unMarshall env.Nodes, err=%v\", err)", + "\t}", + "", + "\treturn out", + "}" + ] + }, + { + "name": "GetVersionK8s", + "qualifiedName": "GetVersionK8s", + "exported": true, + "signature": "func()(string)", + "doc": "GetVersionK8s Returns the Kubernetes version used in the test environment\n\nThis function obtains the current test environment configuration and extracts\nthe Kubernetes version string. It accesses the global environment state via\nprovider.and returns the K8sVersion field. The result is a plain string\nrepresenting the cluster's Kubernetes release.", + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:230", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "NewClaimBuilder", + "kind": "function", + "source": [ + "func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) {", + "\tif os.Getenv(\"UNIT_TEST\") == \"true\" {", + "\t\treturn \u0026ClaimBuilder{", + "\t\t\tclaimRoot: CreateClaimRoot(),", + "\t\t}, nil", + "\t}", + "", + "\tlog.Debug(\"Creating claim file builder.\")", + "\tconfigurations, err := MarshalConfigurations(env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"configuration node missing because of: %v\", err)", + "\t}", + "", + "\tclaimConfigurations := map[string]interface{}{}", + "\tUnmarshalConfigurations(configurations, claimConfigurations)", + "", + "\troot := CreateClaimRoot()", + "", + "\troot.Claim.Configurations = claimConfigurations", + "\troot.Claim.Nodes = GenerateNodes()", + "\troot.Claim.Versions = \u0026claim.Versions{", + "\t\tCertSuite: versions.GitDisplayRelease,", + "\t\tCertSuiteGitCommit: versions.GitCommit,", + "\t\tOcClient: diagnostics.GetVersionOcClient(),", + "\t\tOcp: diagnostics.GetVersionOcp(),", + "\t\tK8s: diagnostics.GetVersionK8s(),", + "\t\tClaimFormat: versions.ClaimFormatVersion,", + "\t}", + "", + "\treturn \u0026ClaimBuilder{", + "\t\tclaimRoot: root,", + "\t}, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetVersionK8s() (out string) {", + "\tenv := provider.GetTestEnvironment()", + "\treturn env.K8sVersion", + "}" + ] + }, + { + "name": "GetVersionOcClient", + "qualifiedName": "GetVersionOcClient", + "exported": true, + "signature": "func()(string)", + "doc": "GetVersionOcClient Returns a placeholder indicating oc client is not used\n\nThe function simply provides the text \"n/a, \" to signal that no OpenShift\nclient version information is available in this context.", + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:253", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "NewClaimBuilder", + "kind": "function", + "source": [ + "func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) {", + "\tif os.Getenv(\"UNIT_TEST\") == \"true\" {", + "\t\treturn \u0026ClaimBuilder{", + "\t\t\tclaimRoot: CreateClaimRoot(),", + "\t\t}, nil", + "\t}", + "", + "\tlog.Debug(\"Creating claim file builder.\")", + "\tconfigurations, err := MarshalConfigurations(env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"configuration node missing because of: %v\", err)", + "\t}", + "", + "\tclaimConfigurations := map[string]interface{}{}", + "\tUnmarshalConfigurations(configurations, claimConfigurations)", + "", + "\troot := CreateClaimRoot()", + "", + "\troot.Claim.Configurations = claimConfigurations", + "\troot.Claim.Nodes = GenerateNodes()", + "\troot.Claim.Versions = \u0026claim.Versions{", + "\t\tCertSuite: versions.GitDisplayRelease,", + "\t\tCertSuiteGitCommit: versions.GitCommit,", + "\t\tOcClient: diagnostics.GetVersionOcClient(),", + "\t\tOcp: diagnostics.GetVersionOcp(),", + "\t\tK8s: diagnostics.GetVersionK8s(),", + "\t\tClaimFormat: versions.ClaimFormatVersion,", + "\t}", + "", + "\treturn \u0026ClaimBuilder{", + "\t\tclaimRoot: root,", + "\t}, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetVersionOcClient() (out string) {", + "\treturn \"n/a, (not using oc or kubectl client)\"", + "}" + ] + }, + { + "name": "GetVersionOcp", + "qualifiedName": "GetVersionOcp", + "exported": true, + "signature": "func()(string)", + "doc": "GetVersionOcp Retrieves the OpenShift version of the current environment\n\nThis function first obtains test environment data, then checks whether the\ncluster is an OpenShift instance. If it is not, a placeholder string\nindicating a non‑OpenShift cluster is returned; otherwise the stored\nOpenshiftVersion value is provided as output.", + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:241", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "IsOCPCluster", + "kind": "function", + "source": [ + "func IsOCPCluster() bool {", + "\treturn env.OpenshiftVersion != autodiscover.NonOpenshiftClusterVersion", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "NewClaimBuilder", + "kind": "function", + "source": [ + "func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) {", + "\tif os.Getenv(\"UNIT_TEST\") == \"true\" {", + "\t\treturn \u0026ClaimBuilder{", + "\t\t\tclaimRoot: CreateClaimRoot(),", + "\t\t}, nil", + "\t}", + "", + "\tlog.Debug(\"Creating claim file builder.\")", + "\tconfigurations, err := MarshalConfigurations(env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"configuration node missing because of: %v\", err)", + "\t}", + "", + "\tclaimConfigurations := map[string]interface{}{}", + "\tUnmarshalConfigurations(configurations, claimConfigurations)", + "", + "\troot := CreateClaimRoot()", + "", + "\troot.Claim.Configurations = claimConfigurations", + "\troot.Claim.Nodes = GenerateNodes()", + "\troot.Claim.Versions = \u0026claim.Versions{", + "\t\tCertSuite: versions.GitDisplayRelease,", + "\t\tCertSuiteGitCommit: versions.GitCommit,", + "\t\tOcClient: diagnostics.GetVersionOcClient(),", + "\t\tOcp: diagnostics.GetVersionOcp(),", + "\t\tK8s: diagnostics.GetVersionK8s(),", + "\t\tClaimFormat: versions.ClaimFormatVersion,", + "\t}", + "", + "\treturn \u0026ClaimBuilder{", + "\t\tclaimRoot: root,", + "\t}, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetVersionOcp() (out string) {", + "\tenv := provider.GetTestEnvironment()", + "\tif !provider.IsOCPCluster() {", + "\t\treturn \"n/a, (non-OpenShift cluster)\"", + "\t}", + "\treturn env.OpenshiftVersion", + "}" + ] + }, + { + "name": "getHWJsonOutput", + "qualifiedName": "getHWJsonOutput", + "exported": false, + "signature": "func(*corev1.Pod, clientsholder.Command, string)(interface{}, error)", + "doc": "getHWJsonOutput Executes a command in a pod and decodes its JSON output\n\nThis function runs the supplied shell command inside a specified container of\na pod, captures the standard output, and unmarshals it into an interface. If\nthe command fails or returns non‑empty stderr, an error is returned.\nSuccessful execution yields the parsed JSON data.", + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:137", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetHwInfoAllNodes", + "kind": "function", + "source": [ + "func GetHwInfoAllNodes() (out map[string]NodeHwInfo) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string]NodeHwInfo)", + "\tfor _, probePod := range env.ProbePods {", + "\t\thw := NodeHwInfo{}", + "\t\tlscpu, err := getHWJsonOutput(probePod, o, lscpuCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lscpu for node %s\", probePod.Spec.NodeName)", + "\t\t} else {", + "\t\t\tvar ok bool", + "\t\t\ttemp, ok := lscpu.(map[string]interface{})", + "\t\t\tif !ok {", + "\t\t\t\tlog.Error(\"problem casting lscpu field for node %s, lscpu=%v\", probePod.Spec.NodeName, lscpu)", + "\t\t\t} else {", + "\t\t\t\thw.Lscpu = temp[\"lscpu\"]", + "\t\t\t}", + "\t\t}", + "\t\thw.IPconfig, err = getHWJsonOutput(probePod, o, ipCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting ip config for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lsblk, err = getHWJsonOutput(probePod, o, lsblkCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lsblk for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lspci, err = getHWTextOutput(probePod, o, lspciCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lspci for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = hw", + "\t}", + "\treturn out", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getHWJsonOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) (out interface{}, err error) {", + "\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn out, fmt.Errorf(\"command %s failed with error err: %v, stderr: %s\", cmd, err, errStr)", + "\t}", + "\terr = json.Unmarshal([]byte(outStr), \u0026out)", + "\tif err != nil {", + "\t\treturn out, fmt.Errorf(\"could not decode json file because of: %s\", err)", + "\t}", + "\treturn out, nil", + "}" + ] + }, + { + "name": "getHWTextOutput", + "qualifiedName": "getHWTextOutput", + "exported": false, + "signature": "func(*corev1.Pod, clientsholder.Command, string)([]string, error)", + "doc": "getHWTextOutput Runs a command in a pod container and returns its output lines\n\nThe function constructs a context for the specified pod and container, then\nexecutes the given command using the client holder. If the command fails or\nproduces error output, it returns an error describing the failure. On\nsuccess, it splits the standard output by newline characters and returns the\nresulting slice of strings.", + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:157", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetHwInfoAllNodes", + "kind": "function", + "source": [ + "func GetHwInfoAllNodes() (out map[string]NodeHwInfo) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string]NodeHwInfo)", + "\tfor _, probePod := range env.ProbePods {", + "\t\thw := NodeHwInfo{}", + "\t\tlscpu, err := getHWJsonOutput(probePod, o, lscpuCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lscpu for node %s\", probePod.Spec.NodeName)", + "\t\t} else {", + "\t\t\tvar ok bool", + "\t\t\ttemp, ok := lscpu.(map[string]interface{})", + "\t\t\tif !ok {", + "\t\t\t\tlog.Error(\"problem casting lscpu field for node %s, lscpu=%v\", probePod.Spec.NodeName, lscpu)", + "\t\t\t} else {", + "\t\t\t\thw.Lscpu = temp[\"lscpu\"]", + "\t\t\t}", + "\t\t}", + "\t\thw.IPconfig, err = getHWJsonOutput(probePod, o, ipCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting ip config for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lsblk, err = getHWJsonOutput(probePod, o, lsblkCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lsblk for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lspci, err = getHWTextOutput(probePod, o, lspciCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lspci for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = hw", + "\t}", + "\treturn out", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getHWTextOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) (out []string, err error) {", + "\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn out, fmt.Errorf(\"command %s failed with error err: %v, stderr: %s\", lspciCommand, err, errStr)", + "\t}", + "", + "\treturn strings.Split(outStr, \"\\n\"), nil", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "cniPluginsCommand", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:42" + }, + { + "name": "ipCommand", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:39" + }, + { + "name": "lsblkCommand", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:40" + }, + { + "name": "lscpuCommand", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:38" + }, + { + "name": "lspciCommand", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/diagnostics/diagnostics.go:41" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/junit", + "name": "junit", + "files": 1, + "imports": null, + "structs": null, + "interfaces": null, + "functions": null, + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/labels", + "name": "labels", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "go/ast", + "go/parser", + "go/token", + "strings" + ], + "structs": [ + { + "name": "labelsExprParser", + "exported": false, + "doc": "labelsExprParser Parses and evaluates label expressions against a list of labels\n\nIt walks the abstract syntax tree of an expression, checking identifiers\nagainst provided labels, handling parentheses, logical NOT, AND, OR\noperators, and reporting unexpected nodes. The result is true if the\nexpression matches the label set, otherwise false.", + "position": "/Users/deliedit/dev/certsuite/pkg/labels/labels.go:29", + "fields": { + "astRootNode": "ast.Expr" + }, + "methodNames": [ + "Eval" + ], + "source": [ + "type labelsExprParser struct {", + "\tastRootNode ast.Expr", + "}" + ] + } + ], + "interfaces": [ + { + "name": "LabelsExprEvaluator", + "exported": true, + "doc": "LabelsExprEvaluator Evaluates label sets for compliance\n\nThe evaluator takes an array of strings representing labels and returns true\nif they satisfy the underlying expression rules, otherwise false. It\nencapsulates the logic needed to determine whether a given set of labels\nmatches the expected pattern or condition defined by the system.", + "position": "/Users/deliedit/dev/certsuite/pkg/labels/labels.go:19", + "methods": [ + "Eval" + ], + "source": [ + "type LabelsExprEvaluator interface {", + "\tEval(labels []string) bool", + "}" + ] + } + ], + "functions": [ + { + "name": "NewLabelsExprEvaluator", + "qualifiedName": "NewLabelsExprEvaluator", + "exported": true, + "signature": "func(string)(LabelsExprEvaluator, error)", + "doc": "NewLabelsExprEvaluator Creates an evaluator that checks label expressions\n\nThe function transforms a comma-separated string of labels into a\nGo-compatible boolean expression, parses it into an abstract syntax tree, and\nreturns an evaluator object. It replaces hyphens with underscores and commas\nwith logical OR operators before parsing. If the input cannot be parsed, an\nerror is returned.", + "position": "/Users/deliedit/dev/certsuite/pkg/labels/labels.go:40", + "calls": [ + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "go/parser", + "name": "ParseExpr", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "InitLabelsExprEvaluator", + "kind": "function", + "source": [ + "func InitLabelsExprEvaluator(labelsFilter string) error {", + "\t// Expand the abstract \"all\" label into actual existing labels", + "\tif labelsFilter == \"all\" {", + "\t\tallTags := []string{identifiers.TagCommon, identifiers.TagExtended,", + "\t\t\tidentifiers.TagFarEdge, identifiers.TagTelco}", + "\t\tlabelsFilter = strings.Join(allTags, \",\")", + "\t}", + "", + "\teval, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not create a label evaluator, err: %v\", err)", + "\t}", + "", + "\tlabelsExprEvaluator = eval", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "SanitizeClaimFile", + "kind": "function", + "source": [ + "func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error) {", + "\tlog.Info(\"Sanitizing claim file %s\", claimFileName)", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tvar aRoot claim.Root", + "\tUnmarshalClaim(data, \u0026aRoot)", + "", + "\t// Remove the results that do not match the labels filter", + "\tfor testID := range aRoot.Claim.Results {", + "\t\tevaluator, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to create labels expression evaluator: %v\", err)", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\t_, gatheredLabels := identifiers.GetTestIDAndLabels(*aRoot.Claim.Results[testID].TestID)", + "", + "\t\tif !evaluator.Eval(gatheredLabels) {", + "\t\t\tlog.Info(\"Removing test ID: %s from the claim\", testID)", + "\t\t\tdelete(aRoot.Claim.Results, testID)", + "\t\t}", + "\t}", + "", + "\tWriteClaimOutput(claimFileName, MarshalClaimOutput(\u0026aRoot))", + "\treturn claimFileName, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewLabelsExprEvaluator(labelsExpr string) (LabelsExprEvaluator, error) {", + "\tgoLikeExpr := strings.ReplaceAll(labelsExpr, \"-\", \"_\")", + "\tgoLikeExpr = strings.ReplaceAll(goLikeExpr, \",\", \"||\")", + "", + "\tnode, err := parser.ParseExpr(goLikeExpr)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to parse labels expression %s: %v\", labelsExpr, err)", + "\t}", + "", + "\treturn labelsExprParser{", + "\t\tastRootNode: node,", + "\t}, nil", + "}" + ] + }, + { + "name": "Eval", + "qualifiedName": "labelsExprParser.Eval", + "exported": true, + "receiver": "labelsExprParser", + "signature": "func([]string)(bool)", + "doc": "labelsExprParser.Eval Evaluates a logical expression against a set of labels\n\nThis method builds a lookup map from the supplied label strings, normalizing\ndashes to underscores for matching. It then recursively traverses an abstract\nsyntax tree representing the expression, evaluating identifiers, parentheses,\nunary NOT, and binary AND/OR operators using the lookup map. The result is a\nboolean indicating whether the labels satisfy the expression.", + "position": "/Users/deliedit/dev/certsuite/pkg/labels/labels.go:61", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "name": "visit", + "kind": "function" + }, + { + "name": "visit", + "kind": "function" + }, + { + "name": "visit", + "kind": "function" + }, + { + "name": "visit", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "visit", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (exprParser labelsExprParser) Eval(labels []string) bool {", + "\t// Define a map for fast name/ident checking when visiting nodes.", + "\tlabelsMap := make(map[string]bool)", + "\tfor _, label := range labels {", + "\t\tlabelsMap[strings.ReplaceAll(label, \"-\", \"_\")] = true", + "\t}", + "", + "\t// Visit function to walk the labels expression's AST.", + "\tvar visit func(e ast.Expr) bool", + "\tvisit = func(e ast.Expr) bool {", + "\t\tswitch v := e.(type) {", + "\t\tcase *ast.Ident:", + "\t\t\t// If the expression is an identifier, check if it exists in the wordMap.", + "\t\t\tif _, ok := labelsMap[v.Name]; !ok {", + "\t\t\t\treturn false", + "\t\t\t}", + "\t\t\treturn true", + "\t\tcase *ast.ParenExpr:", + "\t\t\treturn visit(v.X)", + "\t\tcase *ast.UnaryExpr:", + "\t\t\tif v.Op == token.NOT {", + "\t\t\t\treturn !visit(v.X)", + "\t\t\t}", + "\t\tcase *ast.BinaryExpr:", + "\t\t\t// If the expression is a binary expression, evaluate both operands.", + "\t\t\tleft := visit(v.X)", + "\t\t\tright := visit(v.Y)", + "\t\t\tswitch v.Op {", + "\t\t\tcase token.LAND:", + "\t\t\t\treturn left \u0026\u0026 right", + "\t\t\tcase token.LOR:", + "\t\t\t\treturn left || right", + "\t\t\tdefault:", + "\t\t\t\treturn false", + "\t\t\t}", + "\t\tdefault:", + "\t\t\tlog.Error(\"Unexpected/not-implemented expr: %v\", v)", + "\t\t\treturn false", + "\t\t}", + "\t\treturn false", + "\t}", + "", + "\treturn visit(exprParser.astRootNode)", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "name": "podhelper", + "files": 1, + "imports": [ + "context", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/client-go/dynamic" + ], + "structs": [ + { + "name": "TopOwner", + "exported": true, + "doc": "TopOwner represents the highest-level resource owning a pod\n\nThe structure holds identifying information about a pod's ultimate owner,\nincluding its API version, kind, name, and namespace. It is used by helper\nfunctions to map pods back to the root resource that created them. The fields\nare all strings and can be populated from Kubernetes object metadata.", + "position": "/Users/deliedit/dev/certsuite/pkg/podhelper/podhelper.go:20", + "fields": { + "APIVersion": "string", + "Kind": "string", + "Name": "string", + "Namespace": "string" + }, + "methodNames": null, + "source": [ + "type TopOwner struct {", + "\tAPIVersion string", + "\tKind string", + "\tName string", + "\tNamespace string", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "GetPodTopOwner", + "qualifiedName": "GetPodTopOwner", + "exported": true, + "signature": "func(string, []metav1.OwnerReference)(map[string]TopOwner, error)", + "doc": "GetPodTopOwner Finds the highest-level owners of a pod\n\nThis function starts with the namespace and owner references of a pod, then\nwalks through each reference to resolve the actual resource objects via\ndynamic client calls. It recursively follows owner chains until it reaches\nresources without further owners, recording those as top owners in a map\nkeyed by name. The result is returned along with any errors encountered\nduring resolution.", + "position": "/Users/deliedit/dev/certsuite/pkg/podhelper/podhelper.go:35", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "followOwnerReferences", + "kind": "function", + "source": [ + "func followOwnerReferences(resourceList []*metav1.APIResourceList, dynamicClient dynamic.Interface, topOwners map[string]TopOwner, namespace string, ownerRefs []metav1.OwnerReference) (err error) {", + "\tfor _, ownerRef := range ownerRefs {", + "\t\tapiResource, err := searchAPIResource(ownerRef.Kind, ownerRef.APIVersion, resourceList)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"error searching APIResource for owner reference %v: %v\", ownerRef, err)", + "\t\t}", + "", + "\t\tgv, err := schema.ParseGroupVersion(ownerRef.APIVersion)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"failed to parse apiVersion %q: %v\", ownerRef.APIVersion, err)", + "\t\t}", + "", + "\t\tgvr := schema.GroupVersionResource{", + "\t\t\tGroup: gv.Group,", + "\t\t\tVersion: gv.Version,", + "\t\t\tResource: apiResource.Name,", + "\t\t}", + "", + "\t\t// If the owner reference is a non-namespaced resource (like Node), we need to change the namespace to empty string.", + "\t\tif !apiResource.Namespaced {", + "\t\t\tnamespace = \"\"", + "\t\t}", + "", + "\t\t// Get the owner resource, but don't care if it's not found: it might happen for ocp jobs that are constantly", + "\t\t// spawned and removed after completion.", + "\t\tresource, err := dynamicClient.Resource(gvr).Namespace(namespace).Get(context.TODO(), ownerRef.Name, metav1.GetOptions{})", + "\t\tif err != nil \u0026\u0026 !k8serrors.IsNotFound(err) {", + "\t\t\treturn fmt.Errorf(\"could not get object indicated by owner references %+v (gvr=%+v): %v\", ownerRef, gvr, err)", + "\t\t}", + "", + "\t\t// Get owner references of the unstructured object", + "\t\townerReferences := resource.GetOwnerReferences()", + "\t\t// if no owner references, we have reached the top record it", + "\t\tif len(ownerReferences) == 0 {", + "\t\t\ttopOwners[ownerRef.Name] = TopOwner{APIVersion: ownerRef.APIVersion, Kind: ownerRef.Kind, Name: ownerRef.Name, Namespace: namespace}", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\terr = followOwnerReferences(resourceList, dynamicClient, topOwners, namespace, ownerReferences)", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getOperandPodsFromTestCsvs", + "kind": "function", + "source": [ + "func getOperandPodsFromTestCsvs(testCsvs []*olmv1Alpha.ClusterServiceVersion, pods []corev1.Pod) ([]*corev1.Pod, error) {", + "\t// Helper var to store all the managed crds from the operators under test", + "\t// They map key is \"Kind.group/version\" or \"Kind.APIversion\", which should be the same.", + "\t// e.g.: \"Subscription.operators.coreos.com/v1alpha1\"", + "\tcrds := map[string]*olmv1Alpha.ClusterServiceVersion{}", + "", + "\t// First, iterate on each testCsv to fill the helper crds map.", + "\tfor _, csv := range testCsvs {", + "\t\townedCrds := csv.Spec.CustomResourceDefinitions.Owned", + "\t\tif len(ownedCrds) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range ownedCrds {", + "\t\t\tcrd := \u0026ownedCrds[i]", + "", + "\t\t\t_, group, found := strings.Cut(crd.Name, \".\")", + "\t\t\tif !found {", + "\t\t\t\treturn nil, fmt.Errorf(\"failed to parse resources and group from crd name %q\", crd.Name)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"CSV %q owns crd %v\", csv.Name, crd.Kind+\"/\"+group+\"/\"+crd.Version)", + "", + "\t\t\tcrdPath := path.Join(crd.Kind, group, crd.Version)", + "\t\t\tcrds[crdPath] = csv", + "\t\t}", + "\t}", + "", + "\t// Now, iterate on every pod in the list to check whether they're owned by any of the CRs that", + "\t// the csvs are managing.", + "\toperandPods := []*corev1.Pod{}", + "\tfor i := range pods {", + "\t\tpod := \u0026pods[i]", + "\t\towners, err := podhelper.GetPodTopOwner(pod.Namespace, pod.OwnerReferences)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to get top owners of pod %v/%v: %v\", pod.Namespace, pod.Name, err)", + "\t\t}", + "", + "\t\tfor _, owner := range owners {", + "\t\t\tversionedCrdPath := path.Join(owner.Kind, owner.APIVersion)", + "", + "\t\t\tvar csv *olmv1Alpha.ClusterServiceVersion", + "\t\t\tif csv = crds[versionedCrdPath]; csv == nil {", + "\t\t\t\t// The owner is not a CR or it's not a CR owned by any operator under test", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Pod %v/%v has owner CR %s of CRD %q (CSV %v)\", pod.Namespace, pod.Name,", + "\t\t\t\towner.Name, versionedCrdPath, csv.Name)", + "", + "\t\t\toperandPods = append(operandPods, pod)", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\treturn operandPods, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getPodsOwnedByCsv", + "kind": "function", + "source": [ + "func getPodsOwnedByCsv(csvName, operatorNamespace string, client *clientsholder.ClientsHolder) (managedPods []*corev1.Pod, err error) {", + "\t// Get all pods from the target namespace", + "\tpodsList, err := client.K8sClient.CoreV1().Pods(operatorNamespace).List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tfor index := range podsList.Items {", + "\t\t// Get the top owners of the pod", + "\t\tpod := podsList.Items[index]", + "\t\ttopOwners, err := podhelper.GetPodTopOwner(pod.Namespace, pod.OwnerReferences)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"could not get top owners of Pod %s (in namespace %s), err=%v\", pod.Name, pod.Namespace, err)", + "\t\t}", + "", + "\t\t// check if owner matches with the csv", + "\t\tfor _, owner := range topOwners {", + "\t\t\t// The owner must be in the targetNamespace", + "\t\t\tif owner.Kind == olmv1Alpha.ClusterServiceVersionKind \u0026\u0026 owner.Namespace == operatorNamespace \u0026\u0026 owner.Name == csvName {", + "\t\t\t\tmanagedPods = append(managedPods, \u0026podsList.Items[index])", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn managedPods, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.GetTopOwner", + "kind": "function", + "source": [ + "func (p *Pod) GetTopOwner() (topOwners map[string]podhelper.TopOwner, err error) {", + "\treturn podhelper.GetPodTopOwner(p.Namespace, p.OwnerReferences)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "findPodsNotBelongingToOperators", + "kind": "function", + "source": [ + "func findPodsNotBelongingToOperators(namespace string) (podsBelongingToNoOperators []string, err error) {", + "\tallPods := getAllPodsBy(namespace, env.AllPods)", + "\tfor index := range allPods {", + "\t\tpod := allPods[index]", + "\t\ttopOwners, err := podhelper.GetPodTopOwner(pod.Namespace, pod.OwnerReferences)", + "\t\tif err != nil {", + "\t\t\treturn podsBelongingToNoOperators, err", + "\t\t}", + "", + "\t\tvalidOwnerFound := false", + "\t\tfor _, owner := range topOwners {", + "\t\t\tif owner.Kind == v1alpha1.ClusterServiceVersionKind \u0026\u0026 owner.Namespace == namespace {", + "\t\t\t\tvalidOwnerFound = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !validOwnerFound {", + "\t\t\tpodsBelongingToNoOperators = append(podsBelongingToNoOperators, pod.Name)", + "\t\t}", + "\t}", + "", + "\treturn podsBelongingToNoOperators, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetPodTopOwner(podNamespace string, podOwnerReferences []metav1.OwnerReference) (topOwners map[string]TopOwner, err error) {", + "\ttopOwners = make(map[string]TopOwner)", + "\terr = followOwnerReferences(", + "\t\tclientsholder.GetClientsHolder().GroupResources,", + "\t\tclientsholder.GetClientsHolder().DynamicClient,", + "\t\ttopOwners,", + "\t\tpodNamespace,", + "\t\tpodOwnerReferences)", + "\tif err != nil {", + "\t\treturn topOwners, fmt.Errorf(\"could not get top owners, err: %v\", err)", + "\t}", + "\treturn topOwners, nil", + "}" + ] + }, + { + "name": "followOwnerReferences", + "qualifiedName": "followOwnerReferences", + "exported": false, + "signature": "func([]*metav1.APIResourceList, dynamic.Interface, map[string]TopOwner, string, []metav1.OwnerReference)(error)", + "doc": "followOwnerReferences traverses owner references to discover top‑level resources\n\nThe routine walks the chain of OwnerReference objects for a given Kubernetes\nresource, querying each referenced object until it reaches those without\nfurther owners. It records these highest-level owners in a map keyed by name,\nstoring API version, kind, and namespace information. Errors during lookup or\nparsing are returned to allow callers to handle missing or malformed\nreferences.", + "position": "/Users/deliedit/dev/certsuite/pkg/podhelper/podhelper.go:57", + "calls": [ + { + "name": "searchAPIResource", + "kind": "function", + "source": [ + "func searchAPIResource(kind, apiVersion string, apis []*metav1.APIResourceList) (*metav1.APIResource, error) {", + "\tfor _, api := range apis {", + "\t\tif api.GroupVersion != apiVersion {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range api.APIResources {", + "\t\t\tapiResource := \u0026api.APIResources[i]", + "", + "\t\t\tif kind == apiResource.Kind {", + "\t\t\t\treturn apiResource, nil", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil, fmt.Errorf(\"apiResource not found for kind=%v and APIVersion=%v\", kind, apiVersion)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/runtime/schema", + "name": "ParseGroupVersion", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "name": "Namespace", + "kind": "function" + }, + { + "name": "Resource", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/api/errors", + "name": "IsNotFound", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "GetOwnerReferences", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "followOwnerReferences", + "kind": "function", + "source": [ + "func followOwnerReferences(resourceList []*metav1.APIResourceList, dynamicClient dynamic.Interface, topOwners map[string]TopOwner, namespace string, ownerRefs []metav1.OwnerReference) (err error) {", + "\tfor _, ownerRef := range ownerRefs {", + "\t\tapiResource, err := searchAPIResource(ownerRef.Kind, ownerRef.APIVersion, resourceList)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"error searching APIResource for owner reference %v: %v\", ownerRef, err)", + "\t\t}", + "", + "\t\tgv, err := schema.ParseGroupVersion(ownerRef.APIVersion)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"failed to parse apiVersion %q: %v\", ownerRef.APIVersion, err)", + "\t\t}", + "", + "\t\tgvr := schema.GroupVersionResource{", + "\t\t\tGroup: gv.Group,", + "\t\t\tVersion: gv.Version,", + "\t\t\tResource: apiResource.Name,", + "\t\t}", + "", + "\t\t// If the owner reference is a non-namespaced resource (like Node), we need to change the namespace to empty string.", + "\t\tif !apiResource.Namespaced {", + "\t\t\tnamespace = \"\"", + "\t\t}", + "", + "\t\t// Get the owner resource, but don't care if it's not found: it might happen for ocp jobs that are constantly", + "\t\t// spawned and removed after completion.", + "\t\tresource, err := dynamicClient.Resource(gvr).Namespace(namespace).Get(context.TODO(), ownerRef.Name, metav1.GetOptions{})", + "\t\tif err != nil \u0026\u0026 !k8serrors.IsNotFound(err) {", + "\t\t\treturn fmt.Errorf(\"could not get object indicated by owner references %+v (gvr=%+v): %v\", ownerRef, gvr, err)", + "\t\t}", + "", + "\t\t// Get owner references of the unstructured object", + "\t\townerReferences := resource.GetOwnerReferences()", + "\t\t// if no owner references, we have reached the top record it", + "\t\tif len(ownerReferences) == 0 {", + "\t\t\ttopOwners[ownerRef.Name] = TopOwner{APIVersion: ownerRef.APIVersion, Kind: ownerRef.Kind, Name: ownerRef.Name, Namespace: namespace}", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\terr = followOwnerReferences(resourceList, dynamicClient, topOwners, namespace, ownerReferences)", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "name": "GetPodTopOwner", + "kind": "function", + "source": [ + "func GetPodTopOwner(podNamespace string, podOwnerReferences []metav1.OwnerReference) (topOwners map[string]TopOwner, err error) {", + "\ttopOwners = make(map[string]TopOwner)", + "\terr = followOwnerReferences(", + "\t\tclientsholder.GetClientsHolder().GroupResources,", + "\t\tclientsholder.GetClientsHolder().DynamicClient,", + "\t\ttopOwners,", + "\t\tpodNamespace,", + "\t\tpodOwnerReferences)", + "\tif err != nil {", + "\t\treturn topOwners, fmt.Errorf(\"could not get top owners, err: %v\", err)", + "\t}", + "\treturn topOwners, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "name": "followOwnerReferences", + "kind": "function", + "source": [ + "func followOwnerReferences(resourceList []*metav1.APIResourceList, dynamicClient dynamic.Interface, topOwners map[string]TopOwner, namespace string, ownerRefs []metav1.OwnerReference) (err error) {", + "\tfor _, ownerRef := range ownerRefs {", + "\t\tapiResource, err := searchAPIResource(ownerRef.Kind, ownerRef.APIVersion, resourceList)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"error searching APIResource for owner reference %v: %v\", ownerRef, err)", + "\t\t}", + "", + "\t\tgv, err := schema.ParseGroupVersion(ownerRef.APIVersion)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"failed to parse apiVersion %q: %v\", ownerRef.APIVersion, err)", + "\t\t}", + "", + "\t\tgvr := schema.GroupVersionResource{", + "\t\t\tGroup: gv.Group,", + "\t\t\tVersion: gv.Version,", + "\t\t\tResource: apiResource.Name,", + "\t\t}", + "", + "\t\t// If the owner reference is a non-namespaced resource (like Node), we need to change the namespace to empty string.", + "\t\tif !apiResource.Namespaced {", + "\t\t\tnamespace = \"\"", + "\t\t}", + "", + "\t\t// Get the owner resource, but don't care if it's not found: it might happen for ocp jobs that are constantly", + "\t\t// spawned and removed after completion.", + "\t\tresource, err := dynamicClient.Resource(gvr).Namespace(namespace).Get(context.TODO(), ownerRef.Name, metav1.GetOptions{})", + "\t\tif err != nil \u0026\u0026 !k8serrors.IsNotFound(err) {", + "\t\t\treturn fmt.Errorf(\"could not get object indicated by owner references %+v (gvr=%+v): %v\", ownerRef, gvr, err)", + "\t\t}", + "", + "\t\t// Get owner references of the unstructured object", + "\t\townerReferences := resource.GetOwnerReferences()", + "\t\t// if no owner references, we have reached the top record it", + "\t\tif len(ownerReferences) == 0 {", + "\t\t\ttopOwners[ownerRef.Name] = TopOwner{APIVersion: ownerRef.APIVersion, Kind: ownerRef.Kind, Name: ownerRef.Name, Namespace: namespace}", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\terr = followOwnerReferences(resourceList, dynamicClient, topOwners, namespace, ownerReferences)", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func followOwnerReferences(resourceList []*metav1.APIResourceList, dynamicClient dynamic.Interface, topOwners map[string]TopOwner, namespace string, ownerRefs []metav1.OwnerReference) (err error) {", + "\tfor _, ownerRef := range ownerRefs {", + "\t\tapiResource, err := searchAPIResource(ownerRef.Kind, ownerRef.APIVersion, resourceList)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"error searching APIResource for owner reference %v: %v\", ownerRef, err)", + "\t\t}", + "", + "\t\tgv, err := schema.ParseGroupVersion(ownerRef.APIVersion)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"failed to parse apiVersion %q: %v\", ownerRef.APIVersion, err)", + "\t\t}", + "", + "\t\tgvr := schema.GroupVersionResource{", + "\t\t\tGroup: gv.Group,", + "\t\t\tVersion: gv.Version,", + "\t\t\tResource: apiResource.Name,", + "\t\t}", + "", + "\t\t// If the owner reference is a non-namespaced resource (like Node), we need to change the namespace to empty string.", + "\t\tif !apiResource.Namespaced {", + "\t\t\tnamespace = \"\"", + "\t\t}", + "", + "\t\t// Get the owner resource, but don't care if it's not found: it might happen for ocp jobs that are constantly", + "\t\t// spawned and removed after completion.", + "\t\tresource, err := dynamicClient.Resource(gvr).Namespace(namespace).Get(context.TODO(), ownerRef.Name, metav1.GetOptions{})", + "\t\tif err != nil \u0026\u0026 !k8serrors.IsNotFound(err) {", + "\t\t\treturn fmt.Errorf(\"could not get object indicated by owner references %+v (gvr=%+v): %v\", ownerRef, gvr, err)", + "\t\t}", + "", + "\t\t// Get owner references of the unstructured object", + "\t\townerReferences := resource.GetOwnerReferences()", + "\t\t// if no owner references, we have reached the top record it", + "\t\tif len(ownerReferences) == 0 {", + "\t\t\ttopOwners[ownerRef.Name] = TopOwner{APIVersion: ownerRef.APIVersion, Kind: ownerRef.Kind, Name: ownerRef.Name, Namespace: namespace}", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\terr = followOwnerReferences(resourceList, dynamicClient, topOwners, namespace, ownerReferences)", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "searchAPIResource", + "qualifiedName": "searchAPIResource", + "exported": false, + "signature": "func(string, string, []*metav1.APIResourceList)(*metav1.APIResource, error)", + "doc": "searchAPIResource Finds an API resource by kind and version\n\nThe function iterates through a list of APIResourceList objects, matching the\nsupplied group-version string to each list's GroupVersion field. Within each\nmatching list it scans the contained resources for one whose Kind equals the\nprovided kind value. If found, it returns a pointer to that resource;\notherwise it reports an error indicating no match was located.", + "position": "/Users/deliedit/dev/certsuite/pkg/podhelper/podhelper.go:111", + "calls": [ + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "name": "followOwnerReferences", + "kind": "function", + "source": [ + "func followOwnerReferences(resourceList []*metav1.APIResourceList, dynamicClient dynamic.Interface, topOwners map[string]TopOwner, namespace string, ownerRefs []metav1.OwnerReference) (err error) {", + "\tfor _, ownerRef := range ownerRefs {", + "\t\tapiResource, err := searchAPIResource(ownerRef.Kind, ownerRef.APIVersion, resourceList)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"error searching APIResource for owner reference %v: %v\", ownerRef, err)", + "\t\t}", + "", + "\t\tgv, err := schema.ParseGroupVersion(ownerRef.APIVersion)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"failed to parse apiVersion %q: %v\", ownerRef.APIVersion, err)", + "\t\t}", + "", + "\t\tgvr := schema.GroupVersionResource{", + "\t\t\tGroup: gv.Group,", + "\t\t\tVersion: gv.Version,", + "\t\t\tResource: apiResource.Name,", + "\t\t}", + "", + "\t\t// If the owner reference is a non-namespaced resource (like Node), we need to change the namespace to empty string.", + "\t\tif !apiResource.Namespaced {", + "\t\t\tnamespace = \"\"", + "\t\t}", + "", + "\t\t// Get the owner resource, but don't care if it's not found: it might happen for ocp jobs that are constantly", + "\t\t// spawned and removed after completion.", + "\t\tresource, err := dynamicClient.Resource(gvr).Namespace(namespace).Get(context.TODO(), ownerRef.Name, metav1.GetOptions{})", + "\t\tif err != nil \u0026\u0026 !k8serrors.IsNotFound(err) {", + "\t\t\treturn fmt.Errorf(\"could not get object indicated by owner references %+v (gvr=%+v): %v\", ownerRef, gvr, err)", + "\t\t}", + "", + "\t\t// Get owner references of the unstructured object", + "\t\townerReferences := resource.GetOwnerReferences()", + "\t\t// if no owner references, we have reached the top record it", + "\t\tif len(ownerReferences) == 0 {", + "\t\t\ttopOwners[ownerRef.Name] = TopOwner{APIVersion: ownerRef.APIVersion, Kind: ownerRef.Kind, Name: ownerRef.Name, Namespace: namespace}", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\terr = followOwnerReferences(resourceList, dynamicClient, topOwners, namespace, ownerReferences)", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func searchAPIResource(kind, apiVersion string, apis []*metav1.APIResourceList) (*metav1.APIResource, error) {", + "\tfor _, api := range apis {", + "\t\tif api.GroupVersion != apiVersion {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tfor i := range api.APIResources {", + "\t\t\tapiResource := \u0026api.APIResources[i]", + "", + "\t\t\tif kind == apiResource.Kind {", + "\t\t\t\treturn apiResource, nil", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil, fmt.Errorf(\"apiResource not found for kind=%v and APIVersion=%v\", kind, apiVersion)", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/postmortem", + "name": "postmortem", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "k8s.io/api/core/v1" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "Log", + "qualifiedName": "Log", + "exported": true, + "signature": "func()(string)", + "doc": "Log Provides a diagnostic snapshot of the test environment\n\nThe function retrieves the current test environment, refreshes its state, and\nthen builds a multiline string summarizing node taints, pending pods that are\nnot running or succeeded, and any abnormal events. It loops over nodes to\nlist their names and taint configurations, iterates through all pods\nfiltering by status, and appends each relevant pod's string representation.\nFinally, it gathers abnormal events from the environment and returns the\ncombined output.", + "position": "/Users/deliedit/dev/certsuite/pkg/postmortem/postmortem.go:35", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "name": "SetNeedsRefresh", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodsRecreation", + "kind": "function", + "source": [ + "func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tneedsPostMortemInfo := true", + "\tdefer func() {", + "\t\tif needsPostMortemInfo {", + "\t\t\tcheck.LogDebug(\"%s\", postmortem.Log())", + "\t\t}", + "\t\t// Since we are possible exiting early, we need to make sure we set the result at the end of the function.", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}()", + "\tcheck.LogInfo(\"Testing node draining effect of deployment\")", + "\tcheck.LogInfo(\"Testing initial state for deployments\")", + "\tdefer env.SetNeedsRefresh()", + "", + "\t// Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check.", + "\t// timeout = k-mins + (1min * (num-deployments + num-statefulsets))", + "\tallPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets))", + "\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout, check.GetLogger())", + "\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\tfor _, dep := range notReadyDeployments {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment was not ready before draining any node.\", false))", + "\t\t}", + "\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset was not ready before draining any node.\", false))", + "\t\t}", + "\t\treturn", + "\t}", + "", + "\t// Filter out pods with Node Assignments present and FAIL them.", + "\t// We run into problems with this test when there are nodeSelectors assigned affecting where", + "\t// pods are scheduled. Also, they are not allowed in general, see the node-selector test case.", + "\t// Skip the safeguard for any pods that are using a runtimeClassName. This is potentially", + "\t// because pods that are derived from a performance profile might have a built-in nodeSelector.", + "\tvar podsWithNodeAssignment []*provider.Pod", + "\tfor _, put := range env.Pods {", + "\t\tif !put.IsRuntimeClassNameSpecified() \u0026\u0026 put.HasNodeSelector() {", + "\t\t\tpodsWithNodeAssignment = append(podsWithNodeAssignment, put)", + "\t\t\tcheck.LogError(\"Pod %q has been found with node selector(s): %v\", put, put.Spec.NodeSelector)", + "\t\t}", + "\t}", + "\tif len(podsWithNodeAssignment) \u003e 0 {", + "\t\tcheck.LogError(\"Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v\", podsWithNodeAssignment)", + "\t\tfor _, pod := range podsWithNodeAssignment {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has node assignment.\", false))", + "\t\t}", + "", + "\t\treturn", + "\t}", + "", + "\tfor nodeName := range podsets.GetAllNodesForAllPodSets(env.Pods) {", + "\t\tdefer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node", + "\t\terr := podrecreation.CordonHelper(nodeName, podrecreation.Cordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Error cordoning the node: %s\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node cordoning failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tcheck.LogInfo(\"Draining and Cordoning node %s: \", nodeName)", + "\t\tcount, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Getting pods list to drain failed, err=%v\", err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Getting pods list to drain failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tnodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count)", + "\t\tcheck.LogDebug(\"Draining node: %s with timeout: %s\", nodeName, nodeTimeout)", + "\t\t_, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Draining node %q failed, err=%v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Draining node failed\", false))", + "\t\t\treturn", + "\t\t}", + "", + "\t\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout, check.GetLogger())", + "\t\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\t\tfor _, dep := range notReadyDeployments {", + "\t\t\t\tcheck.LogError(\"Deployment %q not ready after draining node %q\", dep.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q not ready after draining node %q\", sts.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\treturn", + "\t\t}", + "", + "\t\terr = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogFatal(\"Error uncordoning the node: %s\", nodeName)", + "\t\t}", + "\t}", + "", + "\t// If everything went well for all nodes, the nonCompliantObjects should be empty. We need to", + "\t// manually add all the deps/sts into the compliant object lists so the check is marked as skipped.", + "\t// ToDo: Improve this.", + "\tif len(nonCompliantObjects) == 0 {", + "\t\tfor _, dep := range env.Deployments {", + "\t\t\tcheck.LogInfo(\"Deployment's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "", + "\t\tfor _, sts := range env.StatefulSets {", + "\t\t\tcheck.LogInfo(\"Statefulset's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "\t}", + "", + "\tneedsPostMortemInfo = false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Log() (out string) {", + "\t// Get current environment", + "\tenv := provider.GetTestEnvironment()", + "", + "\t// Set refresh", + "\tenv.SetNeedsRefresh()", + "", + "\t// Get up-to-date environment", + "\tenv = provider.GetTestEnvironment()", + "", + "\tout += \"\\nNode Status:\\n\"", + "\tfor _, n := range env.Nodes {", + "\t\tout += fmt.Sprintf(\"node name=%s taints=%+v\", n.Data.Name, n.Data.Spec.Taints) + \"\\n\"", + "\t}", + "\tout += \"\\nPending Pods:\\n\"", + "\tfor _, p := range env.AllPods {", + "\t\tif p.Status.Phase != corev1.PodSucceeded \u0026\u0026 p.Status.Phase != corev1.PodRunning {", + "\t\t\tout += p.String() + \"\\n\"", + "\t\t}", + "\t}", + "\tout += \"\\nAbnormal events:\\n\"", + "\tfor _, e := range env.AbnormalEvents {", + "\t\tout += e.String() + \"\\n\"", + "\t}", + "\treturn out", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "provider", + "files": 13, + "imports": [ + "bytes", + "context", + "encoding/json", + "errors", + "fmt", + "github.com/Masterminds/semver/v3", + "github.com/go-logr/logr", + "github.com/go-logr/stdr", + "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1", + "github.com/openshift/api/config/v1", + "github.com/openshift/api/machineconfiguration/v1", + "github.com/operator-framework/api/pkg/operators/v1", + "github.com/operator-framework/api/pkg/operators/v1alpha1", + "github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/apis/operators/v1", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/operatingsystem", + "github.com/redhat-best-practices-for-k8s/privileged-daemonset", + "github.com/redhat-openshift-ecosystem/openshift-preflight/artifacts", + "github.com/redhat-openshift-ecosystem/openshift-preflight/certification", + "github.com/redhat-openshift-ecosystem/openshift-preflight/container", + "github.com/redhat-openshift-ecosystem/openshift-preflight/operator", + "helm.sh/helm/v3/pkg/release", + "k8s.io/api/apps/v1", + "k8s.io/api/autoscaling/v1", + "k8s.io/api/core/v1", + "k8s.io/api/networking/v1", + "k8s.io/api/policy/v1", + "k8s.io/api/rbac/v1", + "k8s.io/api/storage/v1", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/types", + "k8s.io/client-go/kubernetes/typed/apps/v1", + "k8s.io/client-go/scale", + "log", + "os", + "regexp", + "sort", + "strconv", + "strings", + "time" + ], + "structs": [ + { + "name": "CniNetworkInterface", + "exported": true, + "doc": "CniNetworkInterface Represents a network interface configured by CNI\n\nThis struct holds details about a pod’s network attachment, including the\ninterface name, assigned IP addresses, whether it is the default route, DNS\nsettings, and additional device metadata. The fields are populated from the\nKubernetes annotation that lists all attached networks for a pod.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:183", + "fields": { + "DNS": "map[string]interface{}", + "Default": "bool", + "DeviceInfo": "deviceInfo", + "IPs": "[]string", + "Interface": "string", + "Name": "string" + }, + "methodNames": null, + "source": [ + "type CniNetworkInterface struct {", + "\tName string `json:\"name\"`", + "\tInterface string `json:\"interface\"`", + "\tIPs []string `json:\"ips\"`", + "\tDefault bool `json:\"default\"`", + "\tDNS map[string]interface{} `json:\"dns\"`", + "\tDeviceInfo deviceInfo `json:\"device-info\"`", + "}" + ] + }, + { + "name": "Container", + "exported": true, + "doc": "Container Represents a Kubernetes container with its status and metadata\n\nThis structure holds information about a container running in a pod,\nincluding the core container spec, runtime details, node assignment, and\nnamespace. It tracks the container’s current state through the status field\nand stores a unique identifier for the container instance. The struct also\nkeeps an image identifier and any preflight test results that have been run\nagainst the container.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:73", + "fields": { + "ContainerImageIdentifier": "ContainerImageIdentifier", + "Namespace": "string", + "NodeName": "string", + "Podname": "string", + "PreflightResults": "PreflightResultsDB", + "Runtime": "string", + "Status": "corev1.ContainerStatus", + "UID": "string", + "embedded:*corev1.Container": "*corev1.Container" + }, + "methodNames": [ + "GetUID", + "HasExecProbes", + "HasIgnoredContainerName", + "IsContainerRunAsNonRoot", + "IsContainerRunAsNonRootUserID", + "IsIstioProxy", + "IsReadOnlyRootFilesystem", + "IsTagEmpty", + "SetPreflightResults", + "String", + "StringLong" + ], + "source": [ + "type Container struct {", + "\t*corev1.Container", + "\tStatus corev1.ContainerStatus", + "\tNamespace string", + "\tPodname string", + "\tNodeName string", + "\tRuntime string", + "\tUID string", + "\tContainerImageIdentifier ContainerImageIdentifier", + "\tPreflightResults PreflightResultsDB", + "}" + ] + }, + { + "name": "ContainerImageIdentifier", + "exported": true, + "doc": "ContainerImageIdentifier Represents a container image reference with optional tag or digest\n\nThis structure holds the components of a container image: registry,\nrepository name, an optional tag, and an optional digest. When both tag and\ndigest are provided, the digest is used to uniquely identify the image,\noverriding the tag. The fields map directly to YAML and JSON keys for easy\nserialization.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:50", + "fields": { + "Digest": "string", + "Registry": "string", + "Repository": "string", + "Tag": "string" + }, + "methodNames": null, + "source": [ + "type ContainerImageIdentifier struct {", + "\t// Repository is the name of the image that you want to check if exists in the RedHat catalog", + "\tRepository string `yaml:\"repository\" json:\"repository\"`", + "", + "\t// Registry is the name of the registry `docker.io` of the container", + "\t// This is valid for container only and required field", + "\tRegistry string `yaml:\"registry\" json:\"registry\"`", + "", + "\t// Tag is the optional image tag. \"latest\" is implied if not specified", + "\tTag string `yaml:\"tag\" json:\"tag\"`", + "", + "\t// Digest is the image digest following the \"@\" in a URL, e.g. image@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2", + "\tDigest string `yaml:\"digest\" json:\"digest\"`", + "}" + ] + }, + { + "name": "CrScale", + "exported": true, + "doc": "CrScale Wraps a scale object with status tracking\n\nThis type extends the base scaling API object by embedding its fields and\nproviding helper methods to inspect readiness and generate a concise string\nrepresentation. The embedded struct contains both specification and current\nstatus, allowing direct access to replica counts and other properties.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/scale_object.go:21", + "fields": { + "embedded:*scalingv1.Scale": "*scalingv1.Scale" + }, + "methodNames": [ + "IsScaleObjectReady", + "ToString" + ], + "source": [ + "type CrScale struct {", + "\t*scalingv1.Scale", + "}" + ] + }, + { + "name": "CsvInstallPlan", + "exported": true, + "doc": "CsvInstallPlan Describes an operator's install plan details\n\nThis structure holds the name of the install plan along with URLs for both\nthe bundle image and the index image used in the installation process. It is\nprimarily utilized to convey necessary information when creating or managing\noperator deployments, ensuring that the correct images are referenced during\ninstallation.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:78", + "fields": { + "BundleImage": "string", + "IndexImage": "string", + "Name": "string" + }, + "methodNames": null, + "source": [ + "type CsvInstallPlan struct {", + "\t// Operator's installPlan name", + "\tName string `yaml:\"name\" json:\"name\"`", + "\t// BundleImage is the URL referencing the bundle image", + "\tBundleImage string `yaml:\"bundleImage\" json:\"bundleImage\"`", + "\t// IndexImage is the URL referencing the index image", + "\tIndexImage string `yaml:\"indexImage\" json:\"indexImage\"`", + "}" + ] + }, + { + "name": "Deployment", + "exported": true, + "doc": "Deployment Represents a Kubernetes deployment with helper methods\n\nThis type wraps the standard appsv1.Deployment object to provide convenient\noperations such as checking readiness and generating a string representation.\nIt exposes the embedded Deployment fields directly while adding methods that\nevaluate status conditions and replica counts for quick health checks.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/deployments.go:33", + "fields": { + "embedded:*appsv1.Deployment": "*appsv1.Deployment" + }, + "methodNames": [ + "IsDeploymentReady", + "ToString" + ], + "source": [ + "type Deployment struct {", + "\t*appsv1.Deployment", + "}" + ] + }, + { + "name": "Event", + "exported": true, + "doc": "Event Represents a Kubernetes event with access to all core event data\n\nThe type embeds the standard Kubernetes Event structure, giving it direct\naccess to fields such as CreationTimestamp, InvolvedObject, Reason, and\nMessage. It provides a convenient String method that formats these key\nproperties into a single readable string for logging or debugging purposes.\nThis struct is used throughout the provider package to encapsulate event\ninformation while keeping the original corev1.Event behavior intact.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/events.go:33", + "fields": { + "embedded:*corev1.Event": "*corev1.Event" + }, + "methodNames": [ + "String" + ], + "source": [ + "type Event struct {", + "\t*corev1.Event", + "}" + ] + }, + { + "name": "MachineConfig", + "exported": true, + "doc": "MachineConfig Encapsulates a machine configuration including systemd unit definitions\n\nThe structure embeds the core machine configuration type from the Kubernetes\nAPI, adding a Config field that contains systemd unit information. It holds\nan array of unit descriptors, each specifying a name and contents for a\nsystemd service file. This representation is used to unmarshal the raw JSON\nof a MachineConfig resource into usable Go objects.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:165", + "fields": { + "Config": "struct{Systemd struct{Units []struct{Contents string; Name string}}}", + "embedded:*mcv1.MachineConfig": "*mcv1.MachineConfig" + }, + "methodNames": null, + "source": [ + "type MachineConfig struct {", + "\t*mcv1.MachineConfig", + "\tConfig struct {", + "\t\tSystemd struct {", + "\t\t\tUnits []struct {", + "\t\t\t\tContents string `json:\"contents\"`", + "\t\t\t\tName string `json:\"name\"`", + "\t\t\t} `json:\"units\"`", + "\t\t} `json:\"systemd\"`", + "\t} `json:\"config\"`", + "}" + ] + }, + { + "name": "Node", + "exported": true, + "doc": "Node Encapsulates a Kubernetes node with optional machine configuration\n\nThis structure holds a reference to the underlying corev1.Node object,\nproviding convenient access to node metadata and status information. It\noptionally includes a MachineConfig for nodes managed by OpenShift, enabling\nretrieval of configuration details such as kernel settings or custom\nannotations. The struct’s methods offer helpers for OS detection, role\nidentification, workload presence, and JSON serialization.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:40", + "fields": { + "Data": "*corev1.Node", + "Mc": "MachineConfig" + }, + "methodNames": [ + "GetCSCOSVersion", + "GetRHCOSVersion", + "GetRHELVersion", + "HasWorkloadDeployed", + "IsCSCOS", + "IsControlPlaneNode", + "IsHyperThreadNode", + "IsRHCOS", + "IsRHEL", + "IsRTKernel", + "IsWorkerNode", + "MarshalJSON" + ], + "source": [ + "type Node struct {", + "\tData *corev1.Node", + "\tMc MachineConfig `json:\"-\"`", + "}" + ] + }, + { + "name": "Operator", + "exported": true, + "doc": "Operator represents an installed operator within a cluster\n\nThis data structure holds metadata about an operator, including its name,\nnamespace, deployment phase, subscription details, package information, and\nany associated install plans. It also tracks whether the operator is\ncluster‑wide or scoped to specific namespaces and stores preflight test\nresults for validation. The fields provide a comprehensive view of an\noperator’s state and configuration used by the certification framework.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:52", + "fields": { + "Channel": "string", + "Csv": "*olmv1Alpha.ClusterServiceVersion", + "InstallPlans": "[]CsvInstallPlan", + "IsClusterWide": "bool", + "Name": "string", + "Namespace": "string", + "OperandPods": "map[string]*Pod", + "Org": "string", + "Package": "string", + "PackageFromCsvName": "string", + "Phase": "olmv1Alpha.ClusterServiceVersionPhase", + "PreflightResults": "PreflightResultsDB", + "SubscriptionName": "string", + "SubscriptionNamespace": "string", + "TargetNamespaces": "[]string", + "Version": "string" + }, + "methodNames": [ + "SetPreflightResults", + "String" + ], + "source": [ + "type Operator struct {", + "\tName string `yaml:\"name\" json:\"name\"`", + "\tNamespace string `yaml:\"namespace\" json:\"namespace\"`", + "\tTargetNamespaces []string `yaml:\"targetNamespaces\" json:\"targetNamespaces,omitempty\"`", + "\tIsClusterWide bool `yaml:\"isClusterWide\" json:\"isClusterWide\"`", + "\tCsv *olmv1Alpha.ClusterServiceVersion `yaml:\"csv,omitempty\" json:\"csv,omitempty\"`", + "\tPhase olmv1Alpha.ClusterServiceVersionPhase `yaml:\"csvphase\" json:\"csvphase\"`", + "\tSubscriptionName string `yaml:\"subscriptionName\" json:\"subscriptionName\"`", + "\tSubscriptionNamespace string `yaml:\"subscriptionNamespace\" json:\"subscriptionNamespace\"`", + "\tInstallPlans []CsvInstallPlan `yaml:\"installPlans,omitempty\" json:\"installPlans,omitempty\"`", + "\tPackage string `yaml:\"package\" json:\"package\"`", + "\tOrg string `yaml:\"org\" json:\"org\"`", + "\tVersion string `yaml:\"version\" json:\"version\"`", + "\tChannel string `yaml:\"channel\" json:\"channel\"`", + "\tPackageFromCsvName string `yaml:\"packagefromcsvname\" json:\"packagefromcsvname\"`", + "\tPreflightResults PreflightResultsDB `yaml:\"operandPods\" json:\"operandPods\"`", + "\tOperandPods map[string]*Pod", + "}" + ] + }, + { + "name": "Pod", + "exported": true, + "doc": "Pod Represents a Kubernetes pod with extended metadata and helper methods\n\nThis structure embeds the corev1.Pod type and adds fields that track\nadditional information such as service account mappings, container lists,\nnetwork interface data, PCI device references, and flags indicating whether\nthe pod is an operator or operand. It also provides boolean indicators for\nskipping certain tests. The struct’s methods offer utilities for examining\nresource guarantees, CPU isolation compliance, affinity requirements,\nSR‑IOV usage, and other security and configuration checks.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:54", + "fields": { + "AllServiceAccountsMap": "*map[string]*corev1.ServiceAccount", + "Containers": "[]*Container", + "IsOperand": "bool", + "IsOperator": "bool", + "MultusNetworkInterfaces": "map[string]CniNetworkInterface", + "MultusPCIs": "[]string", + "SkipMultusNetTests": "bool", + "SkipNetTests": "bool", + "embedded:*corev1.Pod": "*corev1.Pod" + }, + "methodNames": [ + "AffinityRequired", + "CheckResourceHugePagesSize", + "ContainsIstioProxy", + "CreatedByDeploymentConfig", + "GetRunAsNonRootFalseContainers", + "GetTopOwner", + "HasHugepages", + "HasNodeSelector", + "IsAffinityCompliant", + "IsAutomountServiceAccountSetOnSA", + "IsCPUIsolationCompliant", + "IsPodGuaranteed", + "IsPodGuaranteedWithExclusiveCPUs", + "IsRunAsUserID", + "IsRuntimeClassNameSpecified", + "IsShareProcessNamespace", + "IsUsingClusterRoleBinding", + "IsUsingSRIOV", + "IsUsingSRIOVWithMTU", + "String" + ], + "source": [ + "type Pod struct {", + "\t*corev1.Pod", + "\tAllServiceAccountsMap *map[string]*corev1.ServiceAccount", + "\tContainers []*Container", + "\tMultusNetworkInterfaces map[string]CniNetworkInterface", + "\tMultusPCIs []string", + "\tSkipNetTests bool", + "\tSkipMultusNetTests bool", + "\tIsOperator bool", + "\tIsOperand bool", + "}" + ] + }, + { + "name": "PreflightResultsDB", + "exported": true, + "doc": "PreflightResultsDB Stores the outcomes of preflight checks for a container image\n\nThis structure holds lists of tests that passed, failed, or encountered\nerrors during a preflight run. Each entry contains the test name,\ndescription, remediation guidance, and any error message if applicable. The\ndata is used to report results back to callers and can be cached for reuse.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:250", + "fields": { + "Errors": "[]PreflightTest", + "Failed": "[]PreflightTest", + "Passed": "[]PreflightTest" + }, + "methodNames": null, + "source": [ + "type PreflightResultsDB struct {", + "\tPassed []PreflightTest", + "\tFailed []PreflightTest", + "\tErrors []PreflightTest", + "}" + ] + }, + { + "name": "PreflightTest", + "exported": true, + "doc": "PreflightTest Represents the outcome of a pre‑flight check\n\nThis structure holds information about a single test performed before\ndeployment, including its name, a description of what it verifies, an\noptional error if the test failed, and suggested remediation steps. When the\nError field is nil, the test succeeded; otherwise the value explains why it\ndid not pass. The struct can be used to report results in logs or user\ninterfaces.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:237", + "fields": { + "Description": "string", + "Error": "error", + "Name": "string", + "Remediation": "string" + }, + "methodNames": null, + "source": [ + "type PreflightTest struct {", + "\tName string", + "\tDescription string", + "\tRemediation string", + "\tError error", + "}" + ] + }, + { + "name": "ScaleObject", + "exported": true, + "doc": "ScaleObject Represents a Kubernetes custom resource scaling configuration\n\nThis struct holds the desired scale for a custom resource along with its\ngroup and resource identifiers. The Scale field contains the target number of\nreplicas, while GroupResourceSchema specifies which API group and kind it\napplies to. It is used by provider functions to adjust or query resource\nscaling settings.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:199", + "fields": { + "GroupResourceSchema": "schema.GroupResource", + "Scale": "CrScale" + }, + "methodNames": null, + "source": [ + "type ScaleObject struct {", + "\tScale CrScale", + "\tGroupResourceSchema schema.GroupResource", + "}" + ] + }, + { + "name": "StatefulSet", + "exported": true, + "doc": "StatefulSet Encapsulates a Kubernetes StatefulSet for simplified management\n\nThe structure embeds the official StatefulSet type, allowing direct access to\nits fields while providing helper methods. It offers functionality to\ndetermine if the set is fully ready and to produce a concise string\nrepresentation of its identity.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/statefulsets.go:33", + "fields": { + "embedded:*appsv1.StatefulSet": "*appsv1.StatefulSet" + }, + "methodNames": [ + "IsStatefulSetReady", + "ToString" + ], + "source": [ + "type StatefulSet struct {", + "\t*appsv1.StatefulSet", + "}" + ] + }, + { + "name": "TestEnvironment", + "exported": true, + "doc": "TestEnvironment Provides runtime information for test execution\n\nThis struct holds configuration, cluster state, and collected resources\nneeded during tests. It tracks pods, nodes, operators, catalogs, and various\nKubernetes objects while exposing helper methods to filter them by\ncharacteristics such as CPU isolation or affinity requirements. The data is\npopulated from the test harness and can be refreshed when the underlying\nenvironment changes.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:83", + "fields": { + "AbnormalEvents": "[]*Event", + "AllCatalogSources": "[]*olmv1Alpha.CatalogSource", + "AllCrds": "[]*apiextv1.CustomResourceDefinition", + "AllCsvs": "[]*olmv1Alpha.ClusterServiceVersion", + "AllInstallPlans": "[]*olmv1Alpha.InstallPlan", + "AllOperators": "[]*Operator", + "AllOperatorsSummary": "[]string", + "AllPackageManifests": "[]*olmpkgv1.PackageManifest", + "AllPods": "[]*Pod", + "AllServiceAccounts": "[]*corev1.ServiceAccount", + "AllServiceAccountsMap": "map[string]*corev1.ServiceAccount", + "AllServices": "[]*corev1.Service", + "AllSriovNetworkNodePolicies": "[]unstructured.Unstructured", + "AllSriovNetworks": "[]unstructured.Unstructured", + "AllSubscriptions": "[]olmv1Alpha.Subscription", + "CSVToPodListMap": "map[string][]*Pod", + "ClusterOperators": "[]configv1.ClusterOperator", + "ClusterRoleBindings": "[]rbacv1.ClusterRoleBinding", + "CollectorAppEndpoint": "string", + "CollectorAppPassword": "string", + "Config": "configuration.TestConfiguration", + "ConnectAPIBaseURL": "string", + "ConnectAPIKey": "string", + "ConnectAPIProxyPort": "string", + "ConnectAPIProxyURL": "string", + "ConnectProjectID": "string", + "Containers": "[]*Container", + "Crds": "[]*apiextv1.CustomResourceDefinition", + "DaemonsetFailedToSpawn": "bool", + "Deployments": "[]*Deployment", + "ExecutedBy": "string", + "HelmChartReleases": "[]*release.Release", + "HorizontalScaler": "[]*scalingv1.HorizontalPodAutoscaler", + "IstioServiceMeshFound": "bool", + "K8sVersion": "string", + "Namespaces": "[]string", + "NetworkAttachmentDefinitions": "[]nadClient.NetworkAttachmentDefinition", + "NetworkPolicies": "[]networkingv1.NetworkPolicy", + "Nodes": "map[string]Node", + "OCPStatus": "string", + "OpenshiftVersion": "string", + "OperatorGroups": "[]*olmv1.OperatorGroup", + "Operators": "[]*Operator", + "PartnerName": "string", + "PersistentVolumeClaims": "[]corev1.PersistentVolumeClaim", + "PersistentVolumes": "[]corev1.PersistentVolume", + "PodDisruptionBudgets": "[]policyv1.PodDisruptionBudget", + "PodStates": "autodiscover.PodStates", + "Pods": "[]*Pod", + "ProbePods": "map[string]*corev1.Pod", + "ResourceQuotas": "[]corev1.ResourceQuota", + "RoleBindings": "[]rbacv1.RoleBinding", + "Roles": "[]rbacv1.Role", + "ScaleCrUnderTest": "[]ScaleObject", + "ServiceAccounts": "[]*corev1.ServiceAccount", + "Services": "[]*corev1.Service", + "SkipPreflight": "bool", + "SriovNetworkNodePolicies": "[]unstructured.Unstructured", + "SriovNetworks": "[]unstructured.Unstructured", + "StatefulSets": "[]*StatefulSet", + "StorageClassList": "[]storagev1.StorageClass", + "ValidProtocolNames": "[]string", + "params": "configuration.TestParameters" + }, + "methodNames": [ + "GetAffinityRequiredPods", + "GetBaremetalNodes", + "GetCPUPinningPodsWithDpdk", + "GetDockerConfigFile", + "GetGuaranteedPodContainersWithExclusiveCPUs", + "GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID", + "GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID", + "GetGuaranteedPods", + "GetGuaranteedPodsWithExclusiveCPUs", + "GetGuaranteedPodsWithIsolatedCPUs", + "GetHugepagesPods", + "GetMasterCount", + "GetNonGuaranteedPodContainersWithoutHostPID", + "GetNonGuaranteedPods", + "GetOfflineDBPath", + "GetPodsUsingSRIOV", + "GetPodsWithoutAffinityRequiredLabel", + "GetShareProcessNamespacePods", + "GetWorkerCount", + "IsIntrusive", + "IsPreflightInsecureAllowed", + "IsSNO", + "SetNeedsRefresh" + ], + "source": [ + "type TestEnvironment struct { // rename this with testTarget", + "\tNamespaces []string `json:\"testNamespaces\"`", + "\tAbnormalEvents []*Event", + "", + "\t// Pod Groupings", + "\tPods []*Pod `json:\"testPods\"`", + "\tProbePods map[string]*corev1.Pod // map from nodename to probePod", + "\tAllPods []*Pod `json:\"AllPods\"`", + "\tCSVToPodListMap map[string][]*Pod `json:\"CSVToPodListMap\"`", + "\tPodStates autodiscover.PodStates `json:\"podStates\"`", + "", + "\t// Deployment Groupings", + "\tDeployments []*Deployment `json:\"testDeployments\"`", + "\t// StatefulSet Groupings", + "\tStatefulSets []*StatefulSet `json:\"testStatefulSets\"`", + "", + "\t// Note: Containers is a filtered list of objects based on a block list of disallowed container names.", + "\tContainers []*Container `json:\"testContainers\"`", + "\tOperators []*Operator `json:\"testOperators\"`", + "\tAllOperators []*Operator `json:\"AllOperators\"`", + "\tAllOperatorsSummary []string `json:\"AllOperatorsSummary\"`", + "\tAllCsvs []*olmv1Alpha.ClusterServiceVersion", + "\tPersistentVolumes []corev1.PersistentVolume", + "\tPersistentVolumeClaims []corev1.PersistentVolumeClaim", + "\tClusterRoleBindings []rbacv1.ClusterRoleBinding", + "\tRoleBindings []rbacv1.RoleBinding", + "\tRoles []rbacv1.Role", + "", + "\tConfig configuration.TestConfiguration", + "\tparams configuration.TestParameters", + "\tCrds []*apiextv1.CustomResourceDefinition `json:\"testCrds\"`", + "\tAllCrds []*apiextv1.CustomResourceDefinition", + "", + "\tHorizontalScaler []*scalingv1.HorizontalPodAutoscaler `json:\"testHorizontalScaler\"`", + "\tServices []*corev1.Service `json:\"testServices\"`", + "\tAllServices []*corev1.Service `json:\"testAllServices\"`", + "\tServiceAccounts []*corev1.ServiceAccount `json:\"testServiceAccounts\"`", + "\tAllServiceAccounts []*corev1.ServiceAccount `json:\"AllServiceAccounts\"`", + "\tAllServiceAccountsMap map[string]*corev1.ServiceAccount", + "\tNodes map[string]Node `json:\"-\"`", + "\tK8sVersion string `json:\"-\"`", + "\tOpenshiftVersion string `json:\"-\"`", + "\tOCPStatus string `json:\"-\"`", + "\tHelmChartReleases []*release.Release `json:\"testHelmChartReleases\"`", + "\tResourceQuotas []corev1.ResourceQuota", + "\tPodDisruptionBudgets []policyv1.PodDisruptionBudget", + "\tNetworkPolicies []networkingv1.NetworkPolicy", + "\tAllInstallPlans []*olmv1Alpha.InstallPlan `json:\"AllInstallPlans\"`", + "\tAllSubscriptions []olmv1Alpha.Subscription `json:\"AllSubscriptions\"`", + "\tAllCatalogSources []*olmv1Alpha.CatalogSource `json:\"AllCatalogSources\"`", + "\tAllPackageManifests []*olmpkgv1.PackageManifest `json:\"AllPackageManifests\"`", + "\tOperatorGroups []*olmv1.OperatorGroup `json:\"OperatorGroups\"`", + "\tSriovNetworks []unstructured.Unstructured", + "\tAllSriovNetworks []unstructured.Unstructured", + "\tSriovNetworkNodePolicies []unstructured.Unstructured", + "\tAllSriovNetworkNodePolicies []unstructured.Unstructured", + "\tNetworkAttachmentDefinitions []nadClient.NetworkAttachmentDefinition", + "\tClusterOperators []configv1.ClusterOperator", + "\tIstioServiceMeshFound bool", + "\tValidProtocolNames []string", + "\tDaemonsetFailedToSpawn bool", + "\tScaleCrUnderTest []ScaleObject", + "\tStorageClassList []storagev1.StorageClass", + "\tExecutedBy string", + "\tPartnerName string", + "\tCollectorAppPassword string", + "\tCollectorAppEndpoint string", + "\tConnectAPIKey string", + "\tConnectProjectID string", + "\tConnectAPIBaseURL string", + "\tConnectAPIProxyURL string", + "\tConnectAPIProxyPort string", + "\tSkipPreflight bool", + "}" + ] + }, + { + "name": "deviceInfo", + "exported": false, + "doc": "deviceInfo Holds low-level device details\n\nThis struct stores information about a device, including its type and version\nstrings as well as a PCI configuration structure. The PCI field contains the\nspecific bus, device, and function identifiers that enable precise hardware\nidentification. Together, these fields provide a compact representation of\nthe device’s identity for use in diagnostics or policy enforcement.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:211", + "fields": { + "PCI": "pci", + "Type": "string", + "Version": "string" + }, + "methodNames": null, + "source": [ + "type deviceInfo struct {", + "\tType string `json:\"type\"`", + "\tVersion string `json:\"version\"`", + "\tPCI pci `json:\"pci\"`", + "}" + ] + }, + { + "name": "pci", + "exported": false, + "doc": "pci Represents a PCI device address\n\nThis type holds the string representation of a PCI bus, device, and function\nidentifier used by the provider to locate hardware resources. The single\nfield contains the address formatted as \"domain:bus:device.function\" or a\nsimplified form compatible with the system's PCI enumeration. It is utilized\ninternally when mapping certificates or configurations to specific hardware\ncomponents.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:225", + "fields": { + "PciAddress": "string" + }, + "methodNames": null, + "source": [ + "type pci struct {", + "\tPciAddress string `json:\"pci-address\"`", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "AreCPUResourcesWholeUnits", + "qualifiedName": "AreCPUResourcesWholeUnits", + "exported": true, + "signature": "func(*Pod)(bool)", + "doc": "AreCPUResourcesWholeUnits Verifies that all CPU requests and limits are whole units\n\nThe function iterates over each container in a pod, ensuring both CPU\nrequests and limits are defined and expressed as multiples of one . If any\ncontainer lacks these specifications or has non‑whole‑unit values, it\nlogs the issue and returns false. When all containers meet the criteria, it\nreturns true.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/isolation.go:67", + "calls": [ + { + "name": "MilliValue", + "kind": "function" + }, + { + "name": "Cpu", + "kind": "function" + }, + { + "name": "MilliValue", + "kind": "function" + }, + { + "name": "Cpu", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "isInteger", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "isInteger", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsPodGuaranteedWithExclusiveCPUs", + "kind": "function", + "source": [ + "func (p *Pod) IsPodGuaranteedWithExclusiveCPUs() bool {", + "\treturn AreCPUResourcesWholeUnits(p) \u0026\u0026 AreResourcesIdentical(p)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func AreCPUResourcesWholeUnits(p *Pod) bool {", + "\tisInteger := func(val int64) bool {", + "\t\treturn val%1000 == 0", + "\t}", + "", + "\t// Pods may contain more than one container. All containers must conform to the CPU isolation requirements.", + "\tfor _, cut := range p.Containers {", + "\t\t// Resources must be specified", + "\t\tcpuRequestsMillis := cut.Resources.Requests.Cpu().MilliValue()", + "\t\tcpuLimitsMillis := cut.Resources.Limits.Cpu().MilliValue()", + "", + "\t\tif cpuRequestsMillis == 0 || cpuLimitsMillis == 0 {", + "\t\t\tlog.Debug(\"%s has been found with undefined requests or limits.\", cut.String())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\tif !isInteger(cpuRequestsMillis) {", + "\t\t\tlog.Debug(\"%s has CPU requests %d (milli) that has to be a whole unit.\", cut.String(), cpuRequestsMillis)", + "\t\t\treturn false", + "\t\t}", + "\t\tif !isInteger(cpuLimitsMillis) {", + "\t\t\tlog.Debug(\"%s has CPU limits %d (milli) that has to be a whole unit.\", cut.String(), cpuLimitsMillis)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + }, + { + "name": "AreResourcesIdentical", + "qualifiedName": "AreResourcesIdentical", + "exported": true, + "signature": "func(*Pod)(bool)", + "doc": "AreResourcesIdentical Verifies that CPU and memory requests match limits for every container in a pod\n\nThe function iterates over all containers in the supplied pod, ensuring each\nhas defined resource limits. It compares the request values to their\ncorresponding limits for both CPU and memory; if any mismatch is found, it\nlogs a debug message and returns false. When all containers satisfy these\nconditions, the function returns true.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/isolation.go:30", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "Cpu", + "kind": "function" + }, + { + "name": "Cpu", + "kind": "function" + }, + { + "name": "Memory", + "kind": "function" + }, + { + "name": "Memory", + "kind": "function" + }, + { + "name": "Equal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "AsApproximateFloat64", + "kind": "function" + }, + { + "name": "AsApproximateFloat64", + "kind": "function" + }, + { + "name": "Equal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "AsApproximateFloat64", + "kind": "function" + }, + { + "name": "AsApproximateFloat64", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsPodGuaranteed", + "kind": "function", + "source": [ + "func (p *Pod) IsPodGuaranteed() bool {", + "\treturn AreResourcesIdentical(p)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsPodGuaranteedWithExclusiveCPUs", + "kind": "function", + "source": [ + "func (p *Pod) IsPodGuaranteedWithExclusiveCPUs() bool {", + "\treturn AreCPUResourcesWholeUnits(p) \u0026\u0026 AreResourcesIdentical(p)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func AreResourcesIdentical(p *Pod) bool {", + "\t// Pods may contain more than one container. All containers must conform to the CPU isolation requirements.", + "\tfor _, cut := range p.Containers {", + "\t\t// At least limits must be specified (requests default to limits if not specified)", + "\t\tif len(cut.Resources.Limits) == 0 {", + "\t\t\tlog.Debug(\"%s has been found with undefined limits.\", cut.String())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Gather the values", + "\t\tcpuRequests := cut.Resources.Requests.Cpu()", + "\t\tcpuLimits := cut.Resources.Limits.Cpu()", + "\t\tmemoryRequests := cut.Resources.Requests.Memory()", + "\t\tmemoryLimits := cut.Resources.Limits.Memory()", + "", + "\t\t// Check for mismatches", + "\t\tif !cpuRequests.Equal(*cpuLimits) {", + "\t\t\tlog.Debug(\"%s has CPU requests %f and limits %f that do not match.\", cut.String(), cpuRequests.AsApproximateFloat64(), cpuLimits.AsApproximateFloat64())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\tif !memoryRequests.Equal(*memoryLimits) {", + "\t\t\tlog.Debug(\"%s has memory requests %f and limits %f that do not match.\", cut.String(), memoryRequests.AsApproximateFloat64(), memoryLimits.AsApproximateFloat64())", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + }, + { + "name": "GetUID", + "qualifiedName": "Container.GetUID", + "exported": true, + "receiver": "Container", + "signature": "func()(string, error)", + "doc": "Container.GetUID Retrieves the unique identifier of a container\n\nThe method splits the container’s ID string on \"://\" and uses the last\nsegment as the UID, handling empty results with an error. It logs debug\nmessages indicating success or failure and returns the UID along with any\nerror encountered.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:103", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "errors", + "name": "New", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Container) GetUID() (string, error) {", + "\tsplit := strings.Split(c.Status.ContainerID, \"://\")", + "\tuid := \"\"", + "\tif len(split) \u003e 0 {", + "\t\tuid = split[len(split)-1]", + "\t}", + "\tif uid == \"\" {", + "\t\tlog.Debug(\"could not find uid of %s/%s/%s\\n\", c.Namespace, c.Podname, c.Name)", + "\t\treturn \"\", errors.New(\"cannot determine container UID\")", + "\t}", + "\tlog.Debug(\"uid of %s/%s/%s=%s\\n\", c.Namespace, c.Podname, c.Name, uid)", + "\treturn uid, nil", + "}" + ] + }, + { + "name": "HasExecProbes", + "qualifiedName": "Container.HasExecProbes", + "exported": true, + "receiver": "Container", + "signature": "func()(bool)", + "doc": "Container.HasExecProbes Checks if any probe uses an exec command\n\nThe method inspects the container's liveness, readiness, and startup probes\nfor non-nil Exec fields. It returns true if at least one of these probes has\nan Exec configuration defined; otherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:245", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Container) HasExecProbes() bool {", + "\treturn c.LivenessProbe != nil \u0026\u0026 c.LivenessProbe.Exec != nil ||", + "\t\tc.ReadinessProbe != nil \u0026\u0026 c.ReadinessProbe.Exec != nil ||", + "\t\tc.StartupProbe != nil \u0026\u0026 c.StartupProbe.Exec != nil", + "}" + ] + }, + { + "name": "HasIgnoredContainerName", + "qualifiedName": "Container.HasIgnoredContainerName", + "exported": true, + "receiver": "Container", + "signature": "func()(bool)", + "doc": "Container.HasIgnoredContainerName Determines if the container should be excluded from processing\n\nThis method checks each name in a predefined ignore list against the\ncontainer’s name, also treating any Istio proxy container as ignored. If a\nmatch is found it returns true; otherwise false. The result guides callers to\nskip containers that are not relevant for certain operations.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:221", + "calls": [ + { + "name": "Container.IsIstioProxy", + "kind": "function", + "source": [ + "func (c *Container) IsIstioProxy() bool {", + "\treturn c.Name == IstioProxyContainerName", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getPodContainers", + "kind": "function", + "source": [ + "func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Container) {", + "\tfor j := 0; j \u003c len(aPod.Spec.Containers); j++ {", + "\t\tcut := \u0026(aPod.Spec.Containers[j])", + "", + "\t\tvar cutStatus corev1.ContainerStatus", + "\t\t// get Status for current container", + "\t\tfor index := range aPod.Status.ContainerStatuses {", + "\t\t\tif aPod.Status.ContainerStatuses[index].Name == cut.Name {", + "\t\t\t\tcutStatus = aPod.Status.ContainerStatuses[index]", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\taRuntime, uid := GetRuntimeUID(\u0026cutStatus)", + "\t\tcontainer := Container{Podname: aPod.Name, Namespace: aPod.Namespace,", + "\t\t\tNodeName: aPod.Spec.NodeName, Container: cut, Status: cutStatus, Runtime: aRuntime, UID: uid,", + "\t\t\tContainerImageIdentifier: buildContainerImageSource(aPod.Spec.Containers[j].Image, cutStatus.ImageID)}", + "", + "\t\t// Warn if readiness probe did not succeeded yet.", + "\t\tif !cutStatus.Ready {", + "\t\t\tlog.Warn(\"Container %q is not ready yet.\", \u0026container)", + "\t\t}", + "", + "\t\t// Warn if container state is not running.", + "\t\tif state := \u0026cutStatus.State; state.Running == nil {", + "\t\t\treason := \"\"", + "\t\t\tswitch {", + "\t\t\tcase state.Waiting != nil:", + "\t\t\t\treason = \"waiting - \" + state.Waiting.Reason", + "\t\t\tcase state.Terminated != nil:", + "\t\t\t\treason = \"terminated - \" + state.Terminated.Reason", + "\t\t\tdefault:", + "\t\t\t\t// When no state was explicitly set, it's assumed to be in \"waiting state\".", + "\t\t\t\treason = \"waiting state reason unknown\"", + "\t\t\t}", + "", + "\t\t\tlog.Warn(\"Container %q is not running (reason: %s, restarts %d): some test cases might fail.\",", + "\t\t\t\t\u0026container, reason, cutStatus.RestartCount)", + "\t\t}", + "", + "\t\t// Build slices of containers based on whether or not we are \"ignoring\" them or not.", + "\t\tif useIgnoreList \u0026\u0026 container.HasIgnoredContainerName() {", + "\t\t\tcontinue", + "\t\t}", + "\t\tcontainerList = append(containerList, \u0026container)", + "\t}", + "\treturn containerList", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Container) HasIgnoredContainerName() bool {", + "\tfor _, ign := range ignoredContainerNames {", + "\t\tif c.IsIstioProxy() || strings.Contains(c.Name, ign) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "IsContainerRunAsNonRoot", + "qualifiedName": "Container.IsContainerRunAsNonRoot", + "exported": true, + "receiver": "Container", + "signature": "func(*bool)(bool, string)", + "doc": "Container.IsContainerRunAsNonRoot Determines if a container should run as non-root\n\nThe method checks the container’s security context for a RunAsNonRoot\nsetting, falling back to an optional pod-level value if the container does\nnot specify one. It returns a boolean indicating whether the container will\nrun as non‑root and a descriptive reason explaining which level provided\nthe decision. If neither level supplies a value, it reports that both are\nunset.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:283", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "PointerToString", + "kind": "function", + "source": [ + "func PointerToString[T any](p *T) string {", + "\tif p == nil {", + "\t\treturn \"nil\"", + "\t} else {", + "\t\treturn fmt.Sprint(*p)", + "\t}", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.GetRunAsNonRootFalseContainers", + "kind": "function", + "source": [ + "func (p *Pod) GetRunAsNonRootFalseContainers(knownContainersToSkip map[string]bool) (nonCompliantContainers []*Container, nonComplianceReasons []string) {", + "\t// Check pod-level security context this will be set by default for containers", + "\t// If not already configured at the container level", + "\tvar podRunAsNonRoot *bool", + "\tif p.Spec.SecurityContext != nil \u0026\u0026 p.Spec.SecurityContext.RunAsNonRoot != nil {", + "\t\tpodRunAsNonRoot = p.Spec.SecurityContext.RunAsNonRoot", + "\t}", + "", + "\tvar podRunAsUserID *int64", + "\tif p.Spec.SecurityContext != nil \u0026\u0026 p.Spec.SecurityContext.RunAsUser != nil {", + "\t\tpodRunAsUserID = p.Spec.SecurityContext.RunAsUser", + "\t}", + "", + "\t// Check each container for the RunAsNonRoot parameter.", + "\t// If it is not present, the pod value applies", + "\tfor _, cut := range p.Containers {", + "\t\tif knownContainersToSkip[cut.Name] {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tisRunAsNonRoot, isRunAsNonRootReason := cut.IsContainerRunAsNonRoot(podRunAsNonRoot)", + "\t\tisRunAsNonRootUserID, isRunAsNonRootUserIDReason := cut.IsContainerRunAsNonRootUserID(podRunAsUserID)", + "", + "\t\tif isRunAsNonRoot || isRunAsNonRootUserID {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tnonCompliantContainers = append(nonCompliantContainers, cut)", + "\t\tnonComplianceReasons = append(nonComplianceReasons, isRunAsNonRootReason+\", \"+isRunAsNonRootUserIDReason)", + "\t}", + "", + "\treturn nonCompliantContainers, nonComplianceReasons", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Container) IsContainerRunAsNonRoot(podRunAsNonRoot *bool) (isContainerRunAsNonRoot bool, reason string) {", + "\tif c.SecurityContext != nil \u0026\u0026 c.SecurityContext.RunAsNonRoot != nil {", + "\t\treturn *c.SecurityContext.RunAsNonRoot, fmt.Sprintf(\"RunAsNonRoot is set to %t at the container level, overriding a %v value defined at pod level\",", + "\t\t\t*c.SecurityContext.RunAsNonRoot, stringhelper.PointerToString(podRunAsNonRoot))", + "\t}", + "", + "\tif podRunAsNonRoot != nil {", + "\t\treturn *podRunAsNonRoot, fmt.Sprintf(\"RunAsNonRoot is set to nil at container level and inheriting a %t value from the pod level RunAsNonRoot setting\", *podRunAsNonRoot)", + "\t}", + "", + "\treturn false, \"RunAsNonRoot is set to nil at pod and container level\"", + "}" + ] + }, + { + "name": "IsContainerRunAsNonRootUserID", + "qualifiedName": "Container.IsContainerRunAsNonRootUserID", + "exported": true, + "receiver": "Container", + "signature": "func(*int64)(bool, string)", + "doc": "Container.IsContainerRunAsNonRootUserID checks whether the container is running as a non-root user\n\nThe function evaluates the container’s security context to determine if it\nhas a RunAsUser value different from zero, indicating a non‑root user ID.\nIt also considers any pod-level RunAsUser setting that may be inherited when\nthe container does not specify its own. The result is a boolean flag and a\ndescriptive reason explaining which level provided the decision.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:303", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "PointerToString", + "kind": "function", + "source": [ + "func PointerToString[T any](p *T) string {", + "\tif p == nil {", + "\t\treturn \"nil\"", + "\t} else {", + "\t\treturn fmt.Sprint(*p)", + "\t}", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.GetRunAsNonRootFalseContainers", + "kind": "function", + "source": [ + "func (p *Pod) GetRunAsNonRootFalseContainers(knownContainersToSkip map[string]bool) (nonCompliantContainers []*Container, nonComplianceReasons []string) {", + "\t// Check pod-level security context this will be set by default for containers", + "\t// If not already configured at the container level", + "\tvar podRunAsNonRoot *bool", + "\tif p.Spec.SecurityContext != nil \u0026\u0026 p.Spec.SecurityContext.RunAsNonRoot != nil {", + "\t\tpodRunAsNonRoot = p.Spec.SecurityContext.RunAsNonRoot", + "\t}", + "", + "\tvar podRunAsUserID *int64", + "\tif p.Spec.SecurityContext != nil \u0026\u0026 p.Spec.SecurityContext.RunAsUser != nil {", + "\t\tpodRunAsUserID = p.Spec.SecurityContext.RunAsUser", + "\t}", + "", + "\t// Check each container for the RunAsNonRoot parameter.", + "\t// If it is not present, the pod value applies", + "\tfor _, cut := range p.Containers {", + "\t\tif knownContainersToSkip[cut.Name] {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tisRunAsNonRoot, isRunAsNonRootReason := cut.IsContainerRunAsNonRoot(podRunAsNonRoot)", + "\t\tisRunAsNonRootUserID, isRunAsNonRootUserIDReason := cut.IsContainerRunAsNonRootUserID(podRunAsUserID)", + "", + "\t\tif isRunAsNonRoot || isRunAsNonRootUserID {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tnonCompliantContainers = append(nonCompliantContainers, cut)", + "\t\tnonComplianceReasons = append(nonComplianceReasons, isRunAsNonRootReason+\", \"+isRunAsNonRootUserIDReason)", + "\t}", + "", + "\treturn nonCompliantContainers, nonComplianceReasons", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Container) IsContainerRunAsNonRootUserID(podRunAsNonRootUserID *int64) (isContainerRunAsNonRootUserID bool, reason string) {", + "\tif c.SecurityContext != nil \u0026\u0026 c.SecurityContext.RunAsUser != nil {", + "\t\treturn *c.SecurityContext.RunAsUser != 0, fmt.Sprintf(\"RunAsUser is set to %v at the container level, overriding a %s value defined at pod level\",", + "\t\t\t*c.SecurityContext.RunAsUser, stringhelper.PointerToString(podRunAsNonRootUserID))", + "\t}", + "", + "\tif podRunAsNonRootUserID != nil {", + "\t\treturn *podRunAsNonRootUserID != 0, fmt.Sprintf(\"RunAsUser is set to nil at container level and inheriting a %v value from the pod level RunAsUser setting\", *podRunAsNonRootUserID)", + "\t}", + "", + "\treturn false, \"RunAsUser is set to nil at pod and container level\"", + "}" + ] + }, + { + "name": "IsIstioProxy", + "qualifiedName": "Container.IsIstioProxy", + "exported": true, + "receiver": "Container", + "signature": "func()(bool)", + "doc": "Container.IsIstioProxy Determines if the container is an Istio proxy\n\nIt checks whether the container’s name matches the predefined Istio proxy\nname. If it does, the function returns true; otherwise, it returns false.\nThis simple check is used to identify and potentially ignore Istio-related\ncontainers in other logic.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:236", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Container.HasIgnoredContainerName", + "kind": "function", + "source": [ + "func (c *Container) HasIgnoredContainerName() bool {", + "\tfor _, ign := range ignoredContainerNames {", + "\t\tif c.IsIstioProxy() || strings.Contains(c.Name, ign) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Container) IsIstioProxy() bool {", + "\treturn c.Name == IstioProxyContainerName", + "}" + ] + }, + { + "name": "IsReadOnlyRootFilesystem", + "qualifiedName": "Container.IsReadOnlyRootFilesystem", + "exported": true, + "receiver": "Container", + "signature": "func(*log.Logger)(bool)", + "doc": "Container.IsReadOnlyRootFilesystem Determines if the container’s root filesystem is read‑only\n\nIt logs a message indicating the container being tested, then checks whether\nthe security context and its ReadOnlyRootFilesystem field are defined. If\neither is missing it returns false; otherwise it returns the value of that\nfield.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:267", + "calls": [ + { + "name": "Info", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Container) IsReadOnlyRootFilesystem(logger *log.Logger) bool {", + "\tlogger.Info(\"Testing Container %q\", c)", + "\tif c.SecurityContext == nil || c.SecurityContext.ReadOnlyRootFilesystem == nil {", + "\t\treturn false", + "\t}", + "\treturn *c.SecurityContext.ReadOnlyRootFilesystem", + "}" + ] + }, + { + "name": "IsTagEmpty", + "qualifiedName": "Container.IsTagEmpty", + "exported": true, + "receiver": "Container", + "signature": "func()(bool)", + "doc": "Container.IsTagEmpty Checks whether the container image tag is unset\n\nThis method inspects the container's image identifier and compares its Tag\nfield to an empty string. It returns true when no tag has been specified,\nindicating a default or unspecified tag. The result helps callers determine\nif they need to supply a tag value.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:257", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Container) IsTagEmpty() bool {", + "\treturn c.ContainerImageIdentifier.Tag == \"\"", + "}" + ] + }, + { + "name": "SetPreflightResults", + "qualifiedName": "Container.SetPreflightResults", + "exported": true, + "receiver": "Container", + "signature": "func(map[string]PreflightResultsDB, *TestEnvironment)(error)", + "doc": "Container.SetPreflightResults Stores preflight test results for a container image\n\nThis method runs the OpenShift Preflight container checks on the image\nassociated with the receiver, capturing logs and test outcomes. If the image\nhas been processed before, it reuses cached results; otherwise it configures\nDocker credentials and optional insecure connections, executes the check,\nconverts raw results into a structured database format, and caches them for\nfuture use. The function returns an error if any part of the execution fails.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:125", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/container", + "name": "WithDockerConfigJSONFromFile", + "kind": "function" + }, + { + "name": "TestEnvironment.GetDockerConfigFile", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetDockerConfigFile() string {", + "\treturn env.params.PfltDockerconfig", + "}" + ] + }, + { + "name": "TestEnvironment.IsPreflightInsecureAllowed", + "kind": "function", + "source": [ + "func (env *TestEnvironment) IsPreflightInsecureAllowed() bool {", + "\treturn env.params.AllowPreflightInsecure", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/container", + "name": "WithInsecureConnection", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/artifacts", + "name": "NewMapWriter", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/artifacts", + "name": "ContextWithWriter", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "bytes", + "name": "NewBuffer", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Default", + "kind": "function" + }, + { + "name": "SetOutput", + "kind": "function" + }, + { + "pkgPath": "github.com/go-logr/stdr", + "name": "New", + "kind": "function" + }, + { + "pkgPath": "github.com/go-logr/logr", + "name": "NewContext", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/container", + "name": "NewCheck", + "kind": "function" + }, + { + "name": "Run", + "kind": "function" + }, + { + "name": "List", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "WithError", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "GetPreflightResultsDB", + "kind": "function", + "source": [ + "func GetPreflightResultsDB(results *plibRuntime.Results) PreflightResultsDB {", + "\tresultsDB := PreflightResultsDB{}", + "\tfor _, res := range results.Passed {", + "\t\ttest := PreflightTest{Name: res.Name(), Description: res.Metadata().Description, Remediation: res.Help().Suggestion}", + "\t\tresultsDB.Passed = append(resultsDB.Passed, test)", + "\t}", + "\tfor _, res := range results.Failed {", + "\t\ttest := PreflightTest{Name: res.Name(), Description: res.Metadata().Description, Remediation: res.Help().Suggestion}", + "\t\tresultsDB.Failed = append(resultsDB.Failed, test)", + "\t}", + "\tfor _, res := range results.Errors {", + "\t\ttest := PreflightTest{Name: res.Name(), Description: res.Metadata().Description, Remediation: res.Help().Suggestion, Error: res.Error()}", + "\t\tresultsDB.Errors = append(resultsDB.Errors, test)", + "\t}", + "", + "\treturn resultsDB", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Container) SetPreflightResults(preflightImageCache map[string]PreflightResultsDB, env *TestEnvironment) error {", + "\tlog.Info(\"Running Preflight container test for container %q with image %q\", c, c.Image)", + "", + "\t// Short circuit if the image already exists in the cache", + "\tif _, exists := preflightImageCache[c.Image]; exists {", + "\t\tlog.Info(\"Container image %q exists in the cache. Skipping this run.\", c.Image)", + "\t\tc.PreflightResults = preflightImageCache[c.Image]", + "\t\treturn nil", + "\t}", + "", + "\topts := []plibContainer.Option{}", + "\topts = append(opts, plibContainer.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibContainer.WithInsecureConnection())", + "\t}", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibContainer.NewCheck(c.Image, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "", + "\t\tresults.TestedImage = c.Image", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the Preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\t// Store the Preflight test results into the container's PreflightResults var and into the cache.", + "\tresultsDB := GetPreflightResultsDB(\u0026results)", + "\tc.PreflightResults = resultsDB", + "\tpreflightImageCache[c.Image] = resultsDB", + "\treturn nil", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "Container.String", + "exported": true, + "receiver": "Container", + "signature": "func()(string)", + "doc": "Container.String Formats container details into a readable string\n\nThis method returns a string that describes the container by combining its\nname, pod name, and namespace in a single line. It uses standard formatting\nto create a clear human-readable representation of the container's identity\nwithin the cluster.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:207", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Container) String() string {", + "\treturn fmt.Sprintf(\"container: %s pod: %s ns: %s\",", + "\t\tc.Name,", + "\t\tc.Podname,", + "\t\tc.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "StringLong", + "qualifiedName": "Container.StringLong", + "exported": true, + "receiver": "Container", + "signature": "func()(string)", + "doc": "Container.StringLong Formats container details into a readable string\n\nThis method assembles key fields from the container such as node name,\nnamespace, pod name, container name, UID, and runtime into a single formatted\nline. It uses standard string formatting to produce a concise representation\nof the container’s identity. The resulting text is returned for logging or\ndisplay purposes.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:190", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (c *Container) StringLong() string {", + "\treturn fmt.Sprintf(\"node: %s ns: %s podName: %s containerName: %s containerUID: %s containerRuntime: %s\",", + "\t\tc.NodeName,", + "\t\tc.Namespace,", + "\t\tc.Podname,", + "\t\tc.Name,", + "\t\tc.Status.ContainerID,", + "\t\tc.Runtime,", + "\t)", + "}" + ] + }, + { + "name": "ConvertArrayPods", + "qualifiedName": "ConvertArrayPods", + "exported": true, + "signature": "func([]*corev1.Pod)([]*Pod)", + "doc": "ConvertArrayPods Transforms a slice of core Kubernetes pods into provider-specific pod wrappers\n\nThe function iterates over each input pod, creates a new wrapper object with\nthe helper constructor, and collects pointers to these wrappers in a result\nslice. Each wrapper contains additional fields such as network interfaces,\nPCI devices, and test skip flags based on pod annotations and labels. The\nreturned slice provides an enriched representation suitable for downstream\nconnectivity testing.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:118", + "calls": [ + { + "name": "NewPod", + "kind": "function", + "source": [ + "func NewPod(aPod *corev1.Pod) (out Pod) {", + "\tvar err error", + "\tout.Pod = aPod", + "\tout.MultusNetworkInterfaces = make(map[string]CniNetworkInterface)", + "\tannotations := aPod.GetAnnotations()", + "\tnetStatus, exists := annotations[CniNetworksStatusKey]", + "\tif !exists || strings.TrimSpace(netStatus) == \"\" {", + "\t\t// Be graceful: log which annotations are present when the expected one is missing/empty", + "\t\tkeys := make([]string, 0, len(annotations))", + "\t\tfor k := range annotations {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "\t\tlog.Info(\"Pod %q (namespace %q) missing or empty annotation %q. Present annotations: %v\", aPod.Name, aPod.Namespace, CniNetworksStatusKey, keys)", + "\t} else {", + "\t\tout.MultusNetworkInterfaces, err = GetPodIPsPerNet(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get IPs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "", + "\t\tout.MultusPCIs, err = GetPciPerPod(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get PCIs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "\t}", + "", + "\tif _, ok := aPod.GetLabels()[skipConnectivityTestsLabel]; ok {", + "\t\tout.SkipNetTests = true", + "\t}", + "\tif _, ok := aPod.GetLabels()[skipMultusConnectivityTestsLabel]; ok {", + "\t\tout.SkipMultusNetTests = true", + "\t}", + "\tout.Containers = append(out.Containers, getPodContainers(aPod, false)...)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ConvertArrayPods(pods []*corev1.Pod) (out []*Pod) {", + "\tfor i := range pods {", + "\t\taPodWrapper := NewPod(pods[i])", + "\t\tout = append(out, \u0026aPodWrapper)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "IsScaleObjectReady", + "qualifiedName": "CrScale.IsScaleObjectReady", + "exported": true, + "receiver": "CrScale", + "signature": "func()(bool)", + "doc": "CrScale.IsScaleObjectReady Checks whether the scale object has reached the desired replica count\n\nThe function compares the desired number of replicas defined in the\nspecification with the current replica count reported in the status. It logs\nboth values for debugging purposes. The result is a boolean indicating if the\nactual count matches the requested count.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/scale_object.go:31", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (crScale CrScale) IsScaleObjectReady() bool {", + "\treplicas := (crScale.Spec.Replicas)", + "\tlog.Info(\"replicas is %d status replica is %d\", replicas, crScale.Status.Replicas)", + "\treturn crScale.Status.Replicas == replicas", + "}" + ] + }, + { + "name": "ToString", + "qualifiedName": "CrScale.ToString", + "exported": true, + "receiver": "CrScale", + "signature": "func()(string)", + "doc": "CrScale.ToString Formats the CrScale object into a readable string\n\nThis method returns a single string that contains both the name and namespace\nof the CrScale instance. It uses formatting to combine the two fields with\nclear labels, producing output like \"cr: \u003cname\u003e ns: \u003cnamespace\u003e\". The\nfunction requires no arguments and yields a straightforward textual\nrepresentation for logging or display purposes.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/scale_object.go:44", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (crScale CrScale) ToString() string {", + "\treturn fmt.Sprintf(\"cr: %s ns: %s\",", + "\t\tcrScale.Name,", + "\t\tcrScale.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "CsvToString", + "qualifiedName": "CsvToString", + "exported": true, + "signature": "func(*olmv1Alpha.ClusterServiceVersion)(string)", + "doc": "CsvToString Formats a CSV name and namespace into a readable string\n\nThe function receives a pointer to a ClusterServiceVersion object and returns\na string that includes the object's name followed by its namespace. It uses\nformatting to produce a concise representation suitable for logging or\ndebugging purposes.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:371", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/phasecheck", + "name": "WaitOperatorReady", + "kind": "function", + "source": [ + "func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool {", + "\toc := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tif isOperatorPhaseSucceeded(csv) {", + "\t\t\tlog.Debug(\"%s is ready\", provider.CsvToString(csv))", + "\t\t\treturn true", + "\t\t} else if isOperatorPhaseFailedOrUnknown(csv) {", + "\t\t\tlog.Debug(\"%s failed to be ready, status=%s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Operator is not ready, but we need to take into account that its pods", + "\t\t// could have been deleted by some of the lifecycle test cases, so they", + "\t\t// could be restarting. Let's give it some time before declaring it failed.", + "\t\tlog.Debug(\"Waiting for %s to be in Succeeded phase: %s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\ttime.Sleep(time.Second)", + "", + "\t\tfreshCsv, err := oc.OlmClient.OperatorsV1alpha1().ClusterServiceVersions(csv.Namespace).Get(context.TODO(), csv.Name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not get csv %s, err: %v\", provider.CsvToString(freshCsv), err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// update old csv and check status again", + "\t\t*csv = *freshCsv", + "\t}", + "\tif time.Since(start) \u003e timeout {", + "\t\tlog.Error(\"timeout waiting for csv %s to be ready\", provider.CsvToString(csv))", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CsvToString(csv *olmv1Alpha.ClusterServiceVersion) string {", + "\treturn fmt.Sprintf(\"operator csv: %s ns: %s\",", + "\t\tcsv.Name,", + "\t\tcsv.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "IsDeploymentReady", + "qualifiedName": "Deployment.IsDeploymentReady", + "exported": true, + "receiver": "Deployment", + "signature": "func()(bool)", + "doc": "Deployment.IsDeploymentReady Determines whether a deployment has reached the desired state\n\nIt inspects the deployment’s status conditions to see if an available\ncondition is present, then compares replica counts from the spec with various\nstatus fields such as unavailable, ready, available, and updated replicas. If\nany of these checks fail, it returns false; otherwise true.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/deployments.go:43", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (d *Deployment) IsDeploymentReady() bool {", + "\tnotReady := true", + "", + "\t// Check the deployment's conditions for deploymentAvailable.", + "\tfor _, condition := range d.Status.Conditions {", + "\t\tif condition.Type == appsv1.DeploymentAvailable {", + "\t\t\tnotReady = false // Deployment is ready", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\t// Find the number of expected replicas", + "\tvar replicas int32", + "\tif d.Spec.Replicas != nil {", + "\t\treplicas = *(d.Spec.Replicas)", + "\t} else {", + "\t\treplicas = 1", + "\t}", + "", + "\t// If condition says that the deployment is not ready or replicas do not match totals specified in spec.replicas.", + "\tif notReady ||", + "\t\td.Status.UnavailableReplicas != 0 || //", + "\t\td.Status.ReadyReplicas != replicas || // eg. 10 ready replicas == 10 total replicas", + "\t\td.Status.AvailableReplicas != replicas ||", + "\t\td.Status.UpdatedReplicas != replicas {", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "ToString", + "qualifiedName": "Deployment.ToString", + "exported": true, + "receiver": "Deployment", + "signature": "func()(string)", + "doc": "Deployment.ToString Formats deployment details into a human‑readable string\n\nThis method creates a concise representation of a Deployment by combining its\nname and namespace. It uses standard formatting to return the result as a\nsingle string, which can be printed or logged for debugging purposes.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/deployments.go:78", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (d *Deployment) ToString() string {", + "\treturn fmt.Sprintf(\"deployment: %s ns: %s\",", + "\t\td.Name,", + "\t\td.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "Event.String", + "exported": true, + "receiver": "Event", + "signature": "func()(string)", + "doc": "Event.String Formats event data into a readable string\n\nThis method constructs a formatted text representation of an event, including\nits timestamp, involved object, reason, and message. It uses standard\nformatting utilities to combine these fields into a single line. The\nresulting string is returned for display or logging purposes.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/events.go:55", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (e *Event) String() string {", + "\treturn fmt.Sprintf(\"timestamp=%s involved object=%s reason=%s message=%s\", e.CreationTimestamp.Time, e.InvolvedObject, e.Reason, e.Message)", + "}" + ] + }, + { + "name": "GetAllOperatorGroups", + "qualifiedName": "GetAllOperatorGroups", + "exported": true, + "signature": "func()([]*olmv1.OperatorGroup, error)", + "doc": "GetAllOperatorGroups Retrieves all OperatorGroup resources from the cluster\n\nThis function queries the OpenShift Operator Lifecycle Manager for\nOperatorGroup objects across all namespaces. It returns a slice of pointers\nto each group found, or nil if none exist, while logging warnings when the\nAPI resource is missing or empty. Errors unrelated to a missing resource are\npropagated back to the caller.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:452", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "OperatorGroups", + "kind": "function" + }, + { + "name": "OperatorsV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/api/errors", + "name": "IsNotFound", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/api/errors", + "name": "IsNotFound", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetAllOperatorGroups() ([]*olmv1.OperatorGroup, error) {", + "\tclient := clientsholder.GetClientsHolder()", + "", + "\tlist, err := client.OlmClient.OperatorsV1().OperatorGroups(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil \u0026\u0026 !k8serrors.IsNotFound(err) {", + "\t\treturn nil, err", + "\t}", + "", + "\tif k8serrors.IsNotFound(err) {", + "\t\tlog.Warn(\"No OperatorGroup(s) found in the cluster\")", + "\t\treturn nil, nil", + "\t}", + "", + "\tif len(list.Items) == 0 {", + "\t\tlog.Warn(\"OperatorGroup API resource found but no OperatorGroup(s) found in the cluster\")", + "\t\treturn nil, nil", + "\t}", + "", + "\t// Collect all OperatorGroup pointers", + "\tvar operatorGroups []*olmv1.OperatorGroup", + "\tfor i := range list.Items {", + "\t\toperatorGroups = append(operatorGroups, \u0026list.Items[i])", + "\t}", + "", + "\treturn operatorGroups, nil", + "}" + ] + }, + { + "name": "GetCatalogSourceBundleCount", + "qualifiedName": "GetCatalogSourceBundleCount", + "exported": true, + "signature": "func(*TestEnvironment, *olmv1Alpha.CatalogSource)(int)", + "doc": "GetCatalogSourceBundleCount Returns the number of bundles for a catalog source\n\nThe function determines how many bundles are associated with a given catalog\nsource by examining either probe container data or package manifests,\ndepending on the OpenShift version. It first checks if the cluster is running\nan OCP version less than or equal to 4.12; if so, it retrieves the count via\na probe container. Otherwise, it falls back to counting bundles listed in the\npackage manifests. The result is returned as an integer, with -1 indicating\nfailure to determine the count.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/catalogsources.go:22", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "NewVersion", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "Major", + "kind": "function" + }, + { + "name": "Major", + "kind": "function" + }, + { + "name": "Minor", + "kind": "function" + }, + { + "name": "getCatalogSourceBundleCountFromProbeContainer", + "kind": "function", + "source": [ + "func getCatalogSourceBundleCountFromProbeContainer(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// We need to use the probe container to get the bundle count", + "\t// This is because the package manifests are not available in the cluster", + "\t// for OCP versions \u003c= 4.12", + "\to := clientsholder.GetClientsHolder()", + "", + "\t// Find the kubernetes service associated with the catalog source", + "\tfor _, svc := range env.AllServices {", + "\t\t// Skip if the service is not associated with the catalog source", + "\t\tif svc.Spec.Selector[\"olm.catalogSource\"] != cs.Name {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tlog.Info(\"Found service %q associated with catalog source %q.\", svc.Name, cs.Name)", + "", + "\t\t// Use a probe pod to get the bundle count", + "\t\tfor _, probePod := range env.ProbePods {", + "\t\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\t\tcmd := \"grpcurl -plaintext \" + svc.Spec.ClusterIP + \":50051 api.Registry.ListBundles | jq -s 'length'\"", + "\t\t\tcmdValue, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\t\t\tif err != nil || errStr != \"\" {", + "\t\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cmd, probePod.String())", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Sanitize the command output", + "\t\t\tcmdValue = strings.TrimSpace(cmdValue)", + "\t\t\tcmdValue = strings.Trim(cmdValue, \"\\\"\")", + "", + "\t\t\t// Parse the command output", + "\t\t\tbundleCount, err := strconv.Atoi(cmdValue)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to convert bundle count to integer: %s\", cmdValue)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Try each probe pod until we get a valid bundle count (which should only be 1 probe pod)", + "\t\t\tlog.Info(\"Found bundle count via grpcurl %d for catalog source %q.\", bundleCount, cs.Name)", + "\t\t\treturn bundleCount", + "\t\t}", + "\t}", + "", + "\tlog.Warn(\"Warning: No services found associated with catalog source %q.\", cs.Name)", + "\treturn -1", + "}" + ] + }, + { + "name": "getCatalogSourceBundleCountFromPackageManifests", + "kind": "function", + "source": [ + "func getCatalogSourceBundleCountFromPackageManifests(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\ttotalRelatedBundles := 0", + "\tfor _, pm := range env.AllPackageManifests {", + "\t\t// Skip if the package manifest is not associated with the catalog source", + "\t\tif pm.Status.CatalogSource != cs.Name || pm.Status.CatalogSourceNamespace != cs.Namespace {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Count up the number of related bundles", + "\t\tfor c := range pm.Status.Channels {", + "\t\t\ttotalRelatedBundles += len(pm.Status.Channels[c].Entries)", + "\t\t}", + "\t}", + "", + "\treturn totalRelatedBundles", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func testOperatorCatalogSourceBundleCount(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tconst (", + "\t\tbundleCountLimit = 1000", + "", + "\t\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count.", + "\t\t// This means we cannot use the package manifests to skip based on channel.", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\tocp412Skip := false", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\tcheck.LogError(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003c= 4.12.\")", + "\t\t\tocp412Skip = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003e 4.12.\")", + "\t\t}", + "\t}", + "", + "\tbundleCountLimitStr := strconv.Itoa(bundleCountLimit)", + "", + "\t// Loop through all labeled operators and check if they have more than 1000 referenced images.", + "\tvar catalogsAlreadyReported []string", + "\tfor _, op := range env.Operators {", + "\t\tcatalogSourceCheckComplete := false", + "\t\tcheck.LogInfo(\"Checking bundle count for operator %q\", op.Csv.Name)", + "", + "\t\t// Search through packagemanifests to match the name of the CSV.", + "\t\tfor _, pm := range env.AllPackageManifests {", + "\t\t\t// Skip package manifests based on channel entries.", + "\t\t\t// Note: This only works for OCP versions \u003e 4.12 due to channel entries existence.", + "\t\t\tif !ocp412Skip \u0026\u0026 catalogsource.SkipPMBasedOnChannel(pm.Status.Channels, op.Csv.Name) {", + "\t\t\t\tlog.Debug(\"Skipping package manifest %q based on channel\", pm.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Search through all catalog sources to match the name and namespace of the package manifest.", + "\t\t\tfor _, catalogSource := range env.AllCatalogSources {", + "\t\t\t\tif catalogSource.Name != pm.Status.CatalogSource || catalogSource.Namespace != pm.Status.CatalogSourceNamespace {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on name or namespace\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the catalog source has already been reported, skip it.", + "\t\t\t\tif stringhelper.StringInSlice(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace, false) {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on already reported\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Found matching catalog source %q for operator %q\", catalogSource.Name, op.Csv.Name)", + "", + "\t\t\t\t// The name and namespace match. Lookup the bundle count.", + "\t\t\t\tbundleCount := provider.GetCatalogSourceBundleCount(env, catalogSource)", + "", + "\t\t\t\tif bundleCount == -1 {", + "\t\t\t\t\tcheck.LogError(\"Failed to get bundle count for CatalogSource %q\", catalogSource.Name)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"Failed to get bundle count\", false))", + "\t\t\t\t} else {", + "\t\t\t\t\tif bundleCount \u003e bundleCountLimit {", + "\t\t\t\t\t\tcheck.LogError(\"CatalogSource %q has more than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has more than \"+bundleCountLimitStr+\" referenced images\", false))", + "\t\t\t\t\t} else {", + "\t\t\t\t\t\tcheck.LogInfo(\"CatalogSource %q has less than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has less than \"+bundleCountLimitStr+\" referenced images\", true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Adding catalog source %q to list of already reported\", catalogSource.Name)", + "\t\t\t\tcatalogsAlreadyReported = append(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace)", + "\t\t\t\t// Signal that the catalog source check is complete.", + "\t\t\t\tcatalogSourceCheckComplete = true", + "\t\t\t\tbreak", + "\t\t\t}", + "", + "\t\t\t// If the catalog source check is complete, break out of the loop.", + "\t\t\tif catalogSourceCheckComplete {", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetCatalogSourceBundleCount(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// Now that we know the catalog source, we are going to count up all of the relatedImages", + "\t// that are associated with the catalog source. This will give us the number of bundles that", + "\t// are available in the catalog source.", + "", + "\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count", + "\tconst (", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn 0", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\treturn getCatalogSourceBundleCountFromProbeContainer(env, cs)", + "\t\t}", + "", + "\t\t// If we didn't find the bundle count via the probe container, we can attempt to use the package manifests", + "\t}", + "", + "\t// If we didn't find the bundle count via the probe container, we can use the package manifests", + "\t// to get the bundle count", + "\treturn getCatalogSourceBundleCountFromPackageManifests(env, cs)", + "}" + ] + }, + { + "name": "GetPciPerPod", + "qualifiedName": "GetPciPerPod", + "exported": true, + "signature": "func(string)([]string, error)", + "doc": "GetPciPerPod Retrieves PCI addresses associated with a pod's network interfaces\n\nThe function accepts the CNI networks status annotation string, checks for\nemptiness, and parses it as JSON into a slice of network interface objects.\nIt iterates over each interface, extracting any non-empty PCI address from\nthe device information and appends it to the result slice. If parsing fails,\nan error is returned; otherwise the collected PCI addresses are returned.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:706", + "calls": [ + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "NewPod", + "kind": "function", + "source": [ + "func NewPod(aPod *corev1.Pod) (out Pod) {", + "\tvar err error", + "\tout.Pod = aPod", + "\tout.MultusNetworkInterfaces = make(map[string]CniNetworkInterface)", + "\tannotations := aPod.GetAnnotations()", + "\tnetStatus, exists := annotations[CniNetworksStatusKey]", + "\tif !exists || strings.TrimSpace(netStatus) == \"\" {", + "\t\t// Be graceful: log which annotations are present when the expected one is missing/empty", + "\t\tkeys := make([]string, 0, len(annotations))", + "\t\tfor k := range annotations {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "\t\tlog.Info(\"Pod %q (namespace %q) missing or empty annotation %q. Present annotations: %v\", aPod.Name, aPod.Namespace, CniNetworksStatusKey, keys)", + "\t} else {", + "\t\tout.MultusNetworkInterfaces, err = GetPodIPsPerNet(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get IPs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "", + "\t\tout.MultusPCIs, err = GetPciPerPod(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get PCIs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "\t}", + "", + "\tif _, ok := aPod.GetLabels()[skipConnectivityTestsLabel]; ok {", + "\t\tout.SkipNetTests = true", + "\t}", + "\tif _, ok := aPod.GetLabels()[skipMultusConnectivityTestsLabel]; ok {", + "\t\tout.SkipMultusNetTests = true", + "\t}", + "\tout.Containers = append(out.Containers, getPodContainers(aPod, false)...)", + "\treturn out", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetPciPerPod(annotation string) (pciAddr []string, err error) {", + "\t// Sanity check: if the annotation is missing or empty, return empty result without error", + "\tif strings.TrimSpace(annotation) == \"\" {", + "\t\treturn []string{}, nil", + "\t}", + "", + "\tvar cniInfo []CniNetworkInterface", + "\terr = json.Unmarshal([]byte(annotation), \u0026cniInfo)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not unmarshal network-status annotation, err: %v\", err)", + "\t}", + "\tfor _, cniInterface := range cniInfo {", + "\t\tif cniInterface.DeviceInfo.PCI.PciAddress != \"\" {", + "\t\t\tpciAddr = append(pciAddr, cniInterface.DeviceInfo.PCI.PciAddress)", + "\t\t}", + "\t}", + "\treturn pciAddr, nil", + "}" + ] + }, + { + "name": "GetPodIPsPerNet", + "qualifiedName": "GetPodIPsPerNet", + "exported": true, + "signature": "func(string)(map[string]CniNetworkInterface, error)", + "doc": "GetPodIPsPerNet Retrieves pod IP addresses from a CNI annotation\n\nThis function takes the JSON string stored in the\n\"k8s.v1.cni.cncf.io/networks-status\" annotation and parses it into a slice of\nnetwork interface structures. It then builds a map keyed by each\nnon‑default network name, associating each key with its corresponding\ninterface information that includes IP addresses. If the annotation is empty\nor missing, an empty map is returned without error; if parsing fails, an\nerror is reported.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:673", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "NewPod", + "kind": "function", + "source": [ + "func NewPod(aPod *corev1.Pod) (out Pod) {", + "\tvar err error", + "\tout.Pod = aPod", + "\tout.MultusNetworkInterfaces = make(map[string]CniNetworkInterface)", + "\tannotations := aPod.GetAnnotations()", + "\tnetStatus, exists := annotations[CniNetworksStatusKey]", + "\tif !exists || strings.TrimSpace(netStatus) == \"\" {", + "\t\t// Be graceful: log which annotations are present when the expected one is missing/empty", + "\t\tkeys := make([]string, 0, len(annotations))", + "\t\tfor k := range annotations {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "\t\tlog.Info(\"Pod %q (namespace %q) missing or empty annotation %q. Present annotations: %v\", aPod.Name, aPod.Namespace, CniNetworksStatusKey, keys)", + "\t} else {", + "\t\tout.MultusNetworkInterfaces, err = GetPodIPsPerNet(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get IPs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "", + "\t\tout.MultusPCIs, err = GetPciPerPod(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get PCIs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "\t}", + "", + "\tif _, ok := aPod.GetLabels()[skipConnectivityTestsLabel]; ok {", + "\t\tout.SkipNetTests = true", + "\t}", + "\tif _, ok := aPod.GetLabels()[skipMultusConnectivityTestsLabel]; ok {", + "\t\tout.SkipMultusNetTests = true", + "\t}", + "\tout.Containers = append(out.Containers, getPodContainers(aPod, false)...)", + "\treturn out", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetPodIPsPerNet(annotation string) (ips map[string]CniNetworkInterface, err error) {", + "\t// This is a map indexed with the network name (network attachment) and", + "\t// listing all the IPs created in this subnet and belonging to the pod namespace", + "\t// The list of ips pr net is parsed from the content of the \"k8s.v1.cni.cncf.io/networks-status\" annotation.", + "\tips = make(map[string]CniNetworkInterface)", + "", + "\t// Sanity check: if the annotation is missing or empty, return empty result without error", + "\tif strings.TrimSpace(annotation) == \"\" {", + "\t\treturn ips, nil", + "\t}", + "", + "\tvar cniInfo []CniNetworkInterface", + "\terr = json.Unmarshal([]byte(annotation), \u0026cniInfo)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not unmarshal network-status annotation, err: %v\", err)", + "\t}", + "\t// If this is the default interface, skip it as it is tested separately", + "\t// Otherwise add all non default interfaces", + "\tfor _, cniInterface := range cniInfo {", + "\t\tif !cniInterface.Default {", + "\t\t\tips[cniInterface.Name] = cniInterface", + "\t\t}", + "\t}", + "\treturn ips, nil", + "}" + ] + }, + { + "name": "GetPreflightResultsDB", + "qualifiedName": "GetPreflightResultsDB", + "exported": true, + "signature": "func(*plibRuntime.Results)(PreflightResultsDB)", + "doc": "GetPreflightResultsDB Transforms runtime preflight test outcomes into a structured result set\n\nThe function receives a pointer to the runtime results of preflight checks.\nIt iterates over each passed, failed, and errored check, extracting the name,\ndescription, remediation suggestion, and error message when applicable. For\nevery check it constructs a PreflightTest entry and appends it to the\ncorresponding slice in a PreflightResultsDB structure. Finally, it returns\nthis populated database for use by the container or operator result handling.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:917", + "calls": [ + { + "name": "Name", + "kind": "function" + }, + { + "name": "Metadata", + "kind": "function" + }, + { + "name": "Help", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "Name", + "kind": "function" + }, + { + "name": "Metadata", + "kind": "function" + }, + { + "name": "Help", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "Name", + "kind": "function" + }, + { + "name": "Metadata", + "kind": "function" + }, + { + "name": "Help", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Container.SetPreflightResults", + "kind": "function", + "source": [ + "func (c *Container) SetPreflightResults(preflightImageCache map[string]PreflightResultsDB, env *TestEnvironment) error {", + "\tlog.Info(\"Running Preflight container test for container %q with image %q\", c, c.Image)", + "", + "\t// Short circuit if the image already exists in the cache", + "\tif _, exists := preflightImageCache[c.Image]; exists {", + "\t\tlog.Info(\"Container image %q exists in the cache. Skipping this run.\", c.Image)", + "\t\tc.PreflightResults = preflightImageCache[c.Image]", + "\t\treturn nil", + "\t}", + "", + "\topts := []plibContainer.Option{}", + "\topts = append(opts, plibContainer.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibContainer.WithInsecureConnection())", + "\t}", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibContainer.NewCheck(c.Image, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "", + "\t\tresults.TestedImage = c.Image", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the Preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\t// Store the Preflight test results into the container's PreflightResults var and into the cache.", + "\tresultsDB := GetPreflightResultsDB(\u0026results)", + "\tc.PreflightResults = resultsDB", + "\tpreflightImageCache[c.Image] = resultsDB", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Operator.SetPreflightResults", + "kind": "function", + "source": [ + "func (op *Operator) SetPreflightResults(env *TestEnvironment) error {", + "\tif len(op.InstallPlans) == 0 {", + "\t\tlog.Warn(\"Operator %q has no InstallPlans. Skipping setting Preflight results\", op)", + "\t\treturn nil", + "\t}", + "", + "\tbundleImage := op.InstallPlans[0].BundleImage", + "\tindexImage := op.InstallPlans[0].IndexImage", + "\toc := clientsholder.GetClientsHolder()", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\topts := []plibOperator.Option{}", + "\topts = append(opts, plibOperator.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibOperator.WithInsecureConnection())", + "\t}", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibOperator.NewCheck(bundleImage, indexImage, oc.KubeConfig, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\te := os.RemoveAll(\"artifacts/\")", + "\tif e != nil {", + "\t\tlog.Fatal(\"%v\", e)", + "\t}", + "", + "\tlog.Info(\"Storing operator Preflight results into object for %q\", bundleImage)", + "\top.PreflightResults = GetPreflightResultsDB(\u0026results)", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetPreflightResultsDB(results *plibRuntime.Results) PreflightResultsDB {", + "\tresultsDB := PreflightResultsDB{}", + "\tfor _, res := range results.Passed {", + "\t\ttest := PreflightTest{Name: res.Name(), Description: res.Metadata().Description, Remediation: res.Help().Suggestion}", + "\t\tresultsDB.Passed = append(resultsDB.Passed, test)", + "\t}", + "\tfor _, res := range results.Failed {", + "\t\ttest := PreflightTest{Name: res.Name(), Description: res.Metadata().Description, Remediation: res.Help().Suggestion}", + "\t\tresultsDB.Failed = append(resultsDB.Failed, test)", + "\t}", + "\tfor _, res := range results.Errors {", + "\t\ttest := PreflightTest{Name: res.Name(), Description: res.Metadata().Description, Remediation: res.Help().Suggestion, Error: res.Error()}", + "\t\tresultsDB.Errors = append(resultsDB.Errors, test)", + "\t}", + "", + "\treturn resultsDB", + "}" + ] + }, + { + "name": "GetRuntimeUID", + "qualifiedName": "GetRuntimeUID", + "exported": true, + "signature": "func(*corev1.ContainerStatus)(string)", + "doc": "GetRuntimeUID Extracts runtime type and unique identifier from a container status\n\nThe function splits the ContainerID string at \"://\" to separate the runtime\nprefix from the unique ID. If a split occurs, it assigns the first part as\nthe runtime name and the last part as the UID. It returns these two values\nfor use in higher‑level logic.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:655", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getPodContainers", + "kind": "function", + "source": [ + "func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Container) {", + "\tfor j := 0; j \u003c len(aPod.Spec.Containers); j++ {", + "\t\tcut := \u0026(aPod.Spec.Containers[j])", + "", + "\t\tvar cutStatus corev1.ContainerStatus", + "\t\t// get Status for current container", + "\t\tfor index := range aPod.Status.ContainerStatuses {", + "\t\t\tif aPod.Status.ContainerStatuses[index].Name == cut.Name {", + "\t\t\t\tcutStatus = aPod.Status.ContainerStatuses[index]", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\taRuntime, uid := GetRuntimeUID(\u0026cutStatus)", + "\t\tcontainer := Container{Podname: aPod.Name, Namespace: aPod.Namespace,", + "\t\t\tNodeName: aPod.Spec.NodeName, Container: cut, Status: cutStatus, Runtime: aRuntime, UID: uid,", + "\t\t\tContainerImageIdentifier: buildContainerImageSource(aPod.Spec.Containers[j].Image, cutStatus.ImageID)}", + "", + "\t\t// Warn if readiness probe did not succeeded yet.", + "\t\tif !cutStatus.Ready {", + "\t\t\tlog.Warn(\"Container %q is not ready yet.\", \u0026container)", + "\t\t}", + "", + "\t\t// Warn if container state is not running.", + "\t\tif state := \u0026cutStatus.State; state.Running == nil {", + "\t\t\treason := \"\"", + "\t\t\tswitch {", + "\t\t\tcase state.Waiting != nil:", + "\t\t\t\treason = \"waiting - \" + state.Waiting.Reason", + "\t\t\tcase state.Terminated != nil:", + "\t\t\t\treason = \"terminated - \" + state.Terminated.Reason", + "\t\t\tdefault:", + "\t\t\t\t// When no state was explicitly set, it's assumed to be in \"waiting state\".", + "\t\t\t\treason = \"waiting state reason unknown\"", + "\t\t\t}", + "", + "\t\t\tlog.Warn(\"Container %q is not running (reason: %s, restarts %d): some test cases might fail.\",", + "\t\t\t\t\u0026container, reason, cutStatus.RestartCount)", + "\t\t}", + "", + "\t\t// Build slices of containers based on whether or not we are \"ignoring\" them or not.", + "\t\tif useIgnoreList \u0026\u0026 container.HasIgnoredContainerName() {", + "\t\t\tcontinue", + "\t\t}", + "\t\tcontainerList = append(containerList, \u0026container)", + "\t}", + "\treturn containerList", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetRuntimeUID(cs *corev1.ContainerStatus) (runtime, uid string) {", + "\tsplit := strings.Split(cs.ContainerID, \"://\")", + "\tif len(split) \u003e 0 {", + "\t\tuid = split[len(split)-1]", + "\t\truntime = split[0]", + "\t}", + "\treturn runtime, uid", + "}" + ] + }, + { + "name": "GetTestEnvironment", + "qualifiedName": "GetTestEnvironment", + "exported": true, + "signature": "func()(TestEnvironment)", + "doc": "GetTestEnvironment Retrieves the test environment configuration\n\nThis function returns a TestEnvironment instance used throughout the suite.\nIt lazily builds the environment on first call by invoking\nbuildTestEnvironment and caches it for future invocations. Subsequent calls\nsimply return the cached environment without re‑initialising resources.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:588", + "calls": [ + { + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "ExecCommandContainerNSEnter", + "kind": "function", + "source": [ + "func ExecCommandContainerNSEnter(command string,", + "\taContainer *provider.Container) (outStr, errStr string, err error) {", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(aContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", aContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\t// Get the container PID to build the nsenter command", + "\tcontainerPid, err := GetPidFromContainer(aContainer, ctx)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot get PID from: %s, err: %v\", aContainer, err)", + "\t}", + "", + "\t// Add the container PID and the specific command to run with nsenter", + "\tnsenterCommand := \"nsenter -t \" + strconv.Itoa(containerPid) + \" -n \" + command", + "", + "\t// Run the nsenter command on the probe pod with retry logic", + "\tfor attempt := 1; attempt \u003c= RetryAttempts; attempt++ {", + "\t\toutStr, errStr, err = ch.ExecCommandContainer(ctx, nsenterCommand)", + "\t\tif err == nil {", + "\t\t\tbreak", + "\t\t}", + "\t\tif attempt \u003c RetryAttempts {", + "\t\t\ttime.Sleep(RetrySleepSeconds * time.Second)", + "\t\t}", + "\t}", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", command, aContainer, err)", + "\t}", + "", + "\treturn outStr, errStr, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetPidsFromPidNamespace", + "kind": "function", + "source": [ + "func GetPidsFromPidNamespace(pidNamespace string, container *provider.Container) (p []*Process, err error) {", + "\tconst command = \"trap \\\"\\\" SIGURG ; ps -e -o pidns,pid,ppid,args\"", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(container.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", container, err)", + "\t}", + "", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"command %q failed to run in probe pod=%s (node=%s): %v\", command, ctx.GetPodName(), container.NodeName, err)", + "\t}", + "", + "\tre := regexp.MustCompile(PsRegex)", + "\tmatches := re.FindAllStringSubmatch(stdout, -1)", + "\t// If we do not find a successful log, we fail", + "\tfor _, v := range matches {", + "\t\t// Matching only the right PidNs", + "\t\tif pidNamespace != v[1] {", + "\t\t\tcontinue", + "\t\t}", + "\t\taPidNs, err := strconv.Atoi(v[1])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[1], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPid, err := strconv.Atoi(v[2])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[2], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPPid, err := strconv.Atoi(v[3])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[3], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tp = append(p, \u0026Process{PidNs: aPidNs, Pid: aPid, Args: v[4], PPid: aPPid})", + "\t}", + "\treturn p, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "MarshalConfigurations", + "kind": "function", + "source": [ + "func MarshalConfigurations(env *provider.TestEnvironment) (configurations []byte, err error) {", + "\tconfig := env", + "\tif config == nil {", + "\t\t*config = provider.GetTestEnvironment()", + "\t}", + "\tconfigurations, err = j.Marshal(\u0026config)", + "\tif err != nil {", + "\t\tlog.Error(\"Error converting configurations to JSON: %v\", err)", + "\t\treturn configurations, err", + "\t}", + "\treturn configurations, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetCniPlugins", + "kind": "function", + "source": [ + "func GetCniPlugins() (out map[string][]interface{}) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string][]interface{})", + "\tfor _, probePod := range env.ProbePods {", + "\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, cniPluginsCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cniPluginsCommand, probePod.String())", + "\t\t\tcontinue", + "\t\t}", + "\t\tdecoded := []interface{}{}", + "\t\terr = json.Unmarshal([]byte(outStr), \u0026decoded)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not decode json file because of: %s\", err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = decoded", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetHwInfoAllNodes", + "kind": "function", + "source": [ + "func GetHwInfoAllNodes() (out map[string]NodeHwInfo) {", + "\tenv := provider.GetTestEnvironment()", + "\to := clientsholder.GetClientsHolder()", + "\tout = make(map[string]NodeHwInfo)", + "\tfor _, probePod := range env.ProbePods {", + "\t\thw := NodeHwInfo{}", + "\t\tlscpu, err := getHWJsonOutput(probePod, o, lscpuCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lscpu for node %s\", probePod.Spec.NodeName)", + "\t\t} else {", + "\t\t\tvar ok bool", + "\t\t\ttemp, ok := lscpu.(map[string]interface{})", + "\t\t\tif !ok {", + "\t\t\t\tlog.Error(\"problem casting lscpu field for node %s, lscpu=%v\", probePod.Spec.NodeName, lscpu)", + "\t\t\t} else {", + "\t\t\t\thw.Lscpu = temp[\"lscpu\"]", + "\t\t\t}", + "\t\t}", + "\t\thw.IPconfig, err = getHWJsonOutput(probePod, o, ipCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting ip config for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lsblk, err = getHWJsonOutput(probePod, o, lsblkCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lsblk for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\thw.Lspci, err = getHWTextOutput(probePod, o, lspciCommand)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"problem getting lspci for node %s\", probePod.Spec.NodeName)", + "\t\t}", + "\t\tout[probePod.Spec.NodeName] = hw", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetNodeJSON", + "kind": "function", + "source": [ + "func GetNodeJSON() (out map[string]interface{}) {", + "\tenv := provider.GetTestEnvironment()", + "", + "\tnodesJSON, err := json.Marshal(env.Nodes)", + "\tif err != nil {", + "\t\tlog.Error(\"Could not Marshall env.Nodes, err=%v\", err)", + "\t}", + "", + "\terr = json.Unmarshal(nodesJSON, \u0026out)", + "\tif err != nil {", + "\t\tlog.Error(\"Could not unMarshall env.Nodes, err=%v\", err)", + "\t}", + "", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetVersionK8s", + "kind": "function", + "source": [ + "func GetVersionK8s() (out string) {", + "\tenv := provider.GetTestEnvironment()", + "\treturn env.K8sVersion", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetVersionOcp", + "kind": "function", + "source": [ + "func GetVersionOcp() (out string) {", + "\tenv := provider.GetTestEnvironment()", + "\tif !provider.IsOCPCluster() {", + "\t\treturn \"n/a, (non-OpenShift cluster)\"", + "\t}", + "\treturn env.OpenshiftVersion", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/postmortem", + "name": "Log", + "kind": "function", + "source": [ + "func Log() (out string) {", + "\t// Get current environment", + "\tenv := provider.GetTestEnvironment()", + "", + "\t// Set refresh", + "\tenv.SetNeedsRefresh()", + "", + "\t// Get up-to-date environment", + "\tenv = provider.GetTestEnvironment()", + "", + "\tout += \"\\nNode Status:\\n\"", + "\tfor _, n := range env.Nodes {", + "\t\tout += fmt.Sprintf(\"node name=%s taints=%+v\", n.Data.Name, n.Data.Spec.Taints) + \"\\n\"", + "\t}", + "\tout += \"\\nPending Pods:\\n\"", + "\tfor _, p := range env.AllPods {", + "\t\tif p.Status.Phase != corev1.PodSucceeded \u0026\u0026 p.Status.Phase != corev1.PodRunning {", + "\t\t\tout += p.String() + \"\\n\"", + "\t\t}", + "\t}", + "\tout += \"\\nAbnormal events:\\n\"", + "\tfor _, e := range env.AbnormalEvents {", + "\t\tout += e.String() + \"\\n\"", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling", + "name": "GetProcessCPUScheduling", + "kind": "function", + "source": [ + "func GetProcessCPUScheduling(pid int, testContainer *provider.Container) (schedulePolicy string, schedulePriority int, err error) {", + "\tlog.Info(\"Checking the scheduling policy/priority in %v for pid=%d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"chrt -p %d\", pid)", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := crclient.GetNodeProbePodContext(testContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", 0, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\tstdout, stderr, err := ch.ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"command %q failed to run in probe pod %s (node %s): %v (stderr: %v)\",", + "\t\t\tcommand, ctx.GetPodName(), testContainer.NodeName, err, stderr)", + "\t}", + "", + "\tschedulePolicy, schedulePriority, err = parseSchedulingPolicyAndPriority(stdout)", + "\tif err != nil {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"error getting the scheduling policy and priority for %v : %v\", testContainer, err)", + "\t}", + "\tlog.Info(\"pid %d in %v has the cpu scheduling policy %s, scheduling priority %d\", pid, testContainer, schedulePolicy, schedulePriority)", + "", + "\treturn schedulePolicy, schedulePriority, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Running %s suite checks\", common.PreflightTestKey)", + "", + "\t// As the preflight lib's checks need to run here, we need to get the test environment now.", + "\tenv = provider.GetTestEnvironment()", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PreflightTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\ttestPreflightContainers(checksGroup, \u0026env)", + "\tif provider.IsOCPCluster() {", + "\t\tlog.Info(\"OCP cluster detected, allowing Preflight operator tests to run\")", + "\t\ttestPreflightOperators(checksGroup, \u0026env)", + "\t} else {", + "\t\tlog.Info(\"Skipping the Preflight operators test because it requires an OCP cluster to run against\")", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "ShouldRun", + "kind": "function", + "source": [ + "func ShouldRun(labelsExpr string) bool {", + "\tenv = provider.GetTestEnvironment()", + "\tpreflightAllowedLabels := []string{common.PreflightTestKey, identifiers.TagPreflight}", + "", + "\tif !labelsAllowTestRun(labelsExpr, preflightAllowedLabels) {", + "\t\treturn false", + "\t}", + "", + "\t// Add safeguard against running the preflight tests if the docker config does not exist.", + "\tpreflightDockerConfigFile := configuration.GetTestParameters().PfltDockerconfig", + "\tif preflightDockerConfigFile == \"\" || preflightDockerConfigFile == \"NA\" {", + "\t\tlog.Warn(\"Skipping the preflight suite because the Docker Config file is not provided.\")", + "\t\tenv.SkipPreflight = true", + "\t}", + "", + "\treturn true", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "name": "GetUpdatedCrObject", + "qualifiedName": "GetUpdatedCrObject", + "exported": true, + "signature": "func(scale.ScalesGetter, string, string, schema.GroupResource)(*CrScale, error)", + "doc": "GetUpdatedCrObject Retrieves a scaled custom resource and wraps it for further use\n\nThis function calls the discovery helper to fetch a custom resource by name\nwithin a namespace, using the provided scale getter and group-resource\nschema. It then packages the returned scaling object into a CrScale\nstructure, returning that along with any error encountered during retrieval.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/scale_object.go:57", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "FindCrObjectByNameByNamespace", + "kind": "function", + "source": [ + "func FindCrObjectByNameByNamespace(scalesGetter scale.ScalesGetter, ns, name string, groupResourceSchema schema.GroupResource) (*scalingv1.Scale, error) {", + "\tcrScale, err := scalesGetter.Scales(ns).Get(context.TODO(), groupResourceSchema, name, metav1.GetOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot retrieve deployment in ns=%s name=%s\", ns, name)", + "\t\treturn nil, err", + "\t}", + "\treturn crScale, nil", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetUpdatedCrObject(sg scale.ScalesGetter, namespace, name string, groupResourceSchema schema.GroupResource) (*CrScale, error) {", + "\tresult, err := autodiscover.FindCrObjectByNameByNamespace(sg, namespace, name, groupResourceSchema)", + "\treturn \u0026CrScale{", + "\t\tresult,", + "\t}, err", + "}" + ] + }, + { + "name": "GetUpdatedDeployment", + "qualifiedName": "GetUpdatedDeployment", + "exported": true, + "signature": "func(appv1client.AppsV1Interface, string, string)(*Deployment, error)", + "doc": "GetUpdatedDeployment Retrieves the latest state of a Kubernetes deployment\n\nThe function queries the cluster for a specific deployment in a given\nnamespace, then wraps the result in a custom Deployment type that exposes\nhelper methods. It returns a pointer to this wrapper and an error if the\nlookup fails or the API call encounters an issue.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/deployments.go:91", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "FindDeploymentByNameByNamespace", + "kind": "function", + "source": [ + "func FindDeploymentByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.Deployment, error) {", + "\tdp, err := appClient.Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot retrieve deployment in ns=%s name=%s\", namespace, name)", + "\t\treturn nil, err", + "\t}", + "\treturn dp, nil", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "isDeploymentReady", + "kind": "function", + "source": [ + "func isDeploymentReady(name, namespace string) (bool, error) {", + "\tappsV1Api := clientsholder.GetClientsHolder().K8sClient.AppsV1()", + "", + "\tdep, err := provider.GetUpdatedDeployment(appsV1Api, namespace, name)", + "\tif err != nil {", + "\t\treturn false, err", + "\t}", + "", + "\treturn dep.IsDeploymentReady(), nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetUpdatedDeployment(ac appv1client.AppsV1Interface, namespace, name string) (*Deployment, error) {", + "\tresult, err := autodiscover.FindDeploymentByNameByNamespace(ac, namespace, name)", + "\treturn \u0026Deployment{", + "\t\tresult,", + "\t}, err", + "}" + ] + }, + { + "name": "GetUpdatedStatefulset", + "qualifiedName": "GetUpdatedStatefulset", + "exported": true, + "signature": "func(appv1client.AppsV1Interface, string, string)(*StatefulSet, error)", + "doc": "GetUpdatedStatefulset Retrieves the current StatefulSet object for a given namespace and name\n\nThis function calls an internal discovery helper to fetch the latest\nstatefulset from the Kubernetes API. It wraps the result in a custom\nStatefulSet type that provides additional methods, such as readiness checks.\nThe returned pointer is nil if an error occurs, with the error propagated to\nthe caller.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/statefulsets.go:77", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "FindStatefulsetByNameByNamespace", + "kind": "function", + "source": [ + "func FindStatefulsetByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.StatefulSet, error) {", + "\tss, err := appClient.StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot retrieve deployment in ns=%s name=%s\", namespace, name)", + "\t\treturn nil, err", + "\t}", + "\treturn ss, nil", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForStatefulSetReady", + "kind": "function", + "source": [ + "func WaitForStatefulSetReady(ns, name string, timeout time.Duration, logger *log.Logger) bool {", + "\tlogger.Debug(\"Check if statefulset %s:%s is ready\", ns, name)", + "\tclients := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tss, err := provider.GetUpdatedStatefulset(clients.K8sClient.AppsV1(), ns, name)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Error while getting the %s, err: %v\", ss.ToString(), err)", + "\t\t} else if ss.IsStatefulSetReady() {", + "\t\t\tlogger.Info(\"%s is ready\", ss.ToString())", + "\t\t\treturn true", + "\t\t}", + "\t\ttime.Sleep(time.Second)", + "\t}", + "\tlogger.Error(\"Statefulset %s:%s is not ready\", ns, name)", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "isStatefulSetReady", + "kind": "function", + "source": [ + "func isStatefulSetReady(name, namespace string) (bool, error) {", + "\tappsV1Api := clientsholder.GetClientsHolder().K8sClient.AppsV1()", + "", + "\tsts, err := provider.GetUpdatedStatefulset(appsV1Api, namespace, name)", + "\tif err != nil {", + "\t\treturn false, err", + "\t}", + "", + "\treturn sts.IsStatefulSetReady(), nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetUpdatedStatefulset(ac appv1client.AppsV1Interface, namespace, name string) (*StatefulSet, error) {", + "\tresult, err := autodiscover.FindStatefulsetByNameByNamespace(ac, namespace, name)", + "\treturn \u0026StatefulSet{", + "\t\tresult,", + "\t}, err", + "}" + ] + }, + { + "name": "IsOCPCluster", + "qualifiedName": "IsOCPCluster", + "exported": true, + "signature": "func()(bool)", + "doc": "IsOCPCluster Determines if the current cluster is an OpenShift installation\n\nThe function checks whether the test environment’s OpenshiftVersion field\ndiffers from a predefined constant that represents non‑OpenShift clusters.\nIt returns true when the cluster is recognized as OpenShift, and false\notherwise.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:602", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics", + "name": "GetVersionOcp", + "kind": "function", + "source": [ + "func GetVersionOcp() (out string) {", + "\tenv := provider.GetTestEnvironment()", + "\tif !provider.IsOCPCluster() {", + "\t\treturn \"n/a, (non-OpenShift cluster)\"", + "\t}", + "\treturn env.OpenshiftVersion", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createNodes", + "kind": "function", + "source": [ + "func createNodes(nodes []corev1.Node) map[string]Node {", + "\twrapperNodes := map[string]Node{}", + "", + "\t// machineConfigs is a helper map to avoid download \u0026 process the same mc twice.", + "\tmachineConfigs := map[string]MachineConfig{}", + "\tfor i := range nodes {", + "\t\tnode := \u0026nodes[i]", + "", + "\t\tif !IsOCPCluster() {", + "\t\t\t// Avoid getting Mc info for non ocp clusters.", + "\t\t\twrapperNodes[node.Name] = Node{Data: node}", + "\t\t\tlog.Warn(\"Non-OCP cluster detected. MachineConfig retrieval for node %q skipped.\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Get Node's machineConfig name", + "\t\tmcName, exists := node.Annotations[\"machineconfiguration.openshift.io/currentConfig\"]", + "\t\tif !exists {", + "\t\t\tlog.Error(\"Failed to get machineConfig name for node %q\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "\t\tlog.Info(\"Node %q - mc name %q\", node.Name, mcName)", + "\t\tmc, err := getMachineConfig(mcName, machineConfigs)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get machineConfig %q, err: %v\", mcName, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\twrapperNodes[node.Name] = Node{", + "\t\t\tData: node,", + "\t\t\tMc: mc,", + "\t\t}", + "\t}", + "", + "\treturn wrapperNodes", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNonOCPClusterSkipFn", + "kind": "function", + "source": [ + "func GetNonOCPClusterSkipFn() func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !provider.IsOCPCluster() {", + "\t\t\treturn true, \"non-OCP cluster detected\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "testAllOperatorCertified", + "kind": "function", + "source": [ + "func testAllOperatorCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) {", + "\toperatorsUnderTest := env.Operators", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tocpMinorVersion := \"\"", + "\tif provider.IsOCPCluster() {", + "\t\t// Converts\tmajor.minor.patch version format to major.minor", + "\t\tconst majorMinorPatchCount = 3", + "\t\tsplitVersion := strings.SplitN(env.OpenshiftVersion, \".\", majorMinorPatchCount)", + "\t\tocpMinorVersion = splitVersion[0] + \".\" + splitVersion[1]", + "\t}", + "\tfor _, operator := range operatorsUnderTest {", + "\t\tcheck.LogInfo(\"Testing Operator %q\", operator)", + "\t\tisCertified := validator.IsOperatorCertified(operator.Name, ocpMinorVersion)", + "\t\tif !isCertified {", + "\t\t\tcheck.LogError(\"Operator %q (channel %q) failed to be certified for OpenShift %s\", operator.Name, operator.Channel, ocpMinorVersion)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"Operator failed to be certified for OpenShift\", false).", + "\t\t\t\tAddField(testhelper.OCPVersion, ocpMinorVersion).", + "\t\t\t\tAddField(testhelper.OCPChannel, operator.Channel))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Operator %q (channel %q) is certified for OpenShift %s\", operator.Name, operator.Channel, ocpMinorVersion)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"Operator certified OK\", true).", + "\t\t\t\tAddField(testhelper.OCPVersion, ocpMinorVersion).", + "\t\t\t\tAddField(testhelper.OCPChannel, operator.Channel))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "testAPICompatibilityWithNextOCPRelease", + "kind": "function", + "source": [ + "func testAPICompatibilityWithNextOCPRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tisOCP := provider.IsOCPCluster()", + "\tcheck.LogInfo(\"Is OCP: %v\", isOCP)", + "", + "\tif !isOCP {", + "\t\tcheck.LogInfo(\"The Kubernetes distribution is not OpenShift. Skipping API compatibility test.\")", + "\t\treturn", + "\t}", + "", + "\t// Retrieve APIRequestCount using clientsholder", + "\toc := clientsholder.GetClientsHolder()", + "\tapiRequestCounts, err := oc.ApiserverClient.ApiserverV1().APIRequestCounts().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error retrieving APIRequestCount objects: %s\", err)", + "\t\treturn", + "\t}", + "", + "\t// Extract unique service account names from env.ServiceAccounts", + "\tworkloadServiceAccountNames := extractUniqueServiceAccountNames(env)", + "\tcheck.LogInfo(\"Detected %d unique service account names for the workload: %v\", len(workloadServiceAccountNames), workloadServiceAccountNames)", + "", + "\t// Build a map from service accounts to deprecated APIs", + "\tserviceAccountToDeprecatedAPIs := buildServiceAccountToDeprecatedAPIMap(apiRequestCounts.Items, workloadServiceAccountNames)", + "", + "\t// Evaluate API compliance with the next Kubernetes version", + "\tcompliantObjects, nonCompliantObjects := evaluateAPICompliance(serviceAccountToDeprecatedAPIs, env.K8sVersion, workloadServiceAccountNames)", + "", + "\t// Add test results", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Running %s suite checks\", common.PreflightTestKey)", + "", + "\t// As the preflight lib's checks need to run here, we need to get the test environment now.", + "\tenv = provider.GetTestEnvironment()", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PreflightTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\ttestPreflightContainers(checksGroup, \u0026env)", + "\tif provider.IsOCPCluster() {", + "\t\tlog.Info(\"OCP cluster detected, allowing Preflight operator tests to run\")", + "\t\ttestPreflightOperators(checksGroup, \u0026env)", + "\t} else {", + "\t\tlog.Info(\"Skipping the Preflight operators test because it requires an OCP cluster to run against\")", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsOCPCluster() bool {", + "\treturn env.OpenshiftVersion != autodiscover.NonOpenshiftClusterVersion", + "}" + ] + }, + { + "name": "LoadBalancingDisabled", + "qualifiedName": "LoadBalancingDisabled", + "exported": true, + "signature": "func(*Pod)(bool)", + "doc": "LoadBalancingDisabled Determines if both CPU and IRQ load balancing are disabled via annotations\n\nThe function checks a pod’s annotations for \"cpu-load-balancing.crio.io\"\nand \"irq-load-balancing.crio.io\", verifying each is set to the value\n\"disable\". If either annotation is missing or has an invalid value, it logs a\ndebug message. It returns true only when both annotations are present with\nthe correct value; otherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/isolation.go:103", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsCPUIsolationCompliant", + "kind": "function", + "source": [ + "func (p *Pod) IsCPUIsolationCompliant() bool {", + "\tisCPUIsolated := true", + "", + "\tif !LoadBalancingDisabled(p) {", + "\t\tlog.Debug(\"Pod %q has been found to not have annotations set correctly for CPU isolation.\", p)", + "\t\tisCPUIsolated = false", + "\t}", + "", + "\tif !p.IsRuntimeClassNameSpecified() {", + "\t\tlog.Debug(\"Pod %q has been found to not have runtimeClassName specified.\", p)", + "\t\tisCPUIsolated = false", + "\t}", + "", + "\treturn isCPUIsolated", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadBalancingDisabled(p *Pod) bool {", + "\tconst (", + "\t\tdisableVar = \"disable\"", + "\t)", + "", + "\tcpuLoadBalancingDisabled := false", + "\tirqLoadBalancingDisabled := false", + "", + "\tif v, ok := p.Annotations[\"cpu-load-balancing.crio.io\"]; ok {", + "\t\tif v == disableVar {", + "\t\t\tcpuLoadBalancingDisabled = true", + "\t\t} else {", + "\t\t\tlog.Debug(\"Annotation cpu-load-balancing.crio.io has an invalid value for CPU isolation. Must be 'disable'.\")", + "\t\t}", + "\t} else {", + "\t\tlog.Debug(\"Annotation cpu-load-balancing.crio.io is missing.\")", + "\t}", + "", + "\tif v, ok := p.Annotations[\"irq-load-balancing.crio.io\"]; ok {", + "\t\tif v == disableVar {", + "\t\t\tirqLoadBalancingDisabled = true", + "\t\t} else {", + "\t\t\tlog.Debug(\"Annotation irq-load-balancing.crio.io has an invalid value for CPU isolation. Must be 'disable'.\")", + "\t\t}", + "\t} else {", + "\t\tlog.Debug(\"Annotation irq-load-balancing.crio.io is missing.\")", + "\t}", + "", + "\t// Both conditions have to be set to 'disable'", + "\tif cpuLoadBalancingDisabled \u0026\u0026 irqLoadBalancingDisabled {", + "\t\treturn true", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "NewContainer", + "qualifiedName": "NewContainer", + "exported": true, + "signature": "func()(*Container)", + "doc": "NewContainer Creates an empty Container instance\n\nThe function returns a pointer to a new Container struct with its embedded\ncorev1.Container field initialized to an empty object. No parameters are\nrequired, and the returned value can be used as a starting point for building\na container configuration.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:91", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewContainer() *Container {", + "\treturn \u0026Container{", + "\t\tContainer: \u0026corev1.Container{}, // initialize the corev1.Container object", + "\t}", + "}" + ] + }, + { + "name": "NewEvent", + "qualifiedName": "NewEvent", + "exported": true, + "signature": "func(*corev1.Event)(Event)", + "doc": "NewEvent Wraps a Kubernetes event object\n\nThe function receives a pointer to a corev1.Event and returns an Event\ninstance that encapsulates the original event. It assigns the passed event to\nthe internal field of the returned struct, enabling further processing within\nthe provider package. No additional transformation or validation is\nperformed.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/events.go:44", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewEvent(aEvent *corev1.Event) (out Event) {", + "\tout.Event = aEvent", + "\treturn out", + "}" + ] + }, + { + "name": "NewPod", + "qualifiedName": "NewPod", + "exported": true, + "signature": "func(*corev1.Pod)(Pod)", + "doc": "NewPod Creates a Pod wrapper with network and container details\n\nThe function takes a Kubernetes pod object, extracts its annotations to\ndetermine Multus network interfaces and PCI addresses, logs missing or empty\nannotations, and handles errors gracefully. It also inspects labels to decide\nwhether to skip connectivity tests and populates the list of containers from\nthe pod specification. The resulting Pod structure includes the original pod\npointer, network interface maps, PCI information, container slice, and flags\ncontrolling test behavior.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:75", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "GetAnnotations", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "GetPodIPsPerNet", + "kind": "function", + "source": [ + "func GetPodIPsPerNet(annotation string) (ips map[string]CniNetworkInterface, err error) {", + "\t// This is a map indexed with the network name (network attachment) and", + "\t// listing all the IPs created in this subnet and belonging to the pod namespace", + "\t// The list of ips pr net is parsed from the content of the \"k8s.v1.cni.cncf.io/networks-status\" annotation.", + "\tips = make(map[string]CniNetworkInterface)", + "", + "\t// Sanity check: if the annotation is missing or empty, return empty result without error", + "\tif strings.TrimSpace(annotation) == \"\" {", + "\t\treturn ips, nil", + "\t}", + "", + "\tvar cniInfo []CniNetworkInterface", + "\terr = json.Unmarshal([]byte(annotation), \u0026cniInfo)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not unmarshal network-status annotation, err: %v\", err)", + "\t}", + "\t// If this is the default interface, skip it as it is tested separately", + "\t// Otherwise add all non default interfaces", + "\tfor _, cniInterface := range cniInfo {", + "\t\tif !cniInterface.Default {", + "\t\t\tips[cniInterface.Name] = cniInterface", + "\t\t}", + "\t}", + "\treturn ips, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "GetPciPerPod", + "kind": "function", + "source": [ + "func GetPciPerPod(annotation string) (pciAddr []string, err error) {", + "\t// Sanity check: if the annotation is missing or empty, return empty result without error", + "\tif strings.TrimSpace(annotation) == \"\" {", + "\t\treturn []string{}, nil", + "\t}", + "", + "\tvar cniInfo []CniNetworkInterface", + "\terr = json.Unmarshal([]byte(annotation), \u0026cniInfo)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not unmarshal network-status annotation, err: %v\", err)", + "\t}", + "\tfor _, cniInterface := range cniInfo {", + "\t\tif cniInterface.DeviceInfo.PCI.PciAddress != \"\" {", + "\t\t\tpciAddr = append(pciAddr, cniInterface.DeviceInfo.PCI.PciAddress)", + "\t\t}", + "\t}", + "\treturn pciAddr, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "GetLabels", + "kind": "function" + }, + { + "name": "GetLabels", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "getPodContainers", + "kind": "function", + "source": [ + "func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Container) {", + "\tfor j := 0; j \u003c len(aPod.Spec.Containers); j++ {", + "\t\tcut := \u0026(aPod.Spec.Containers[j])", + "", + "\t\tvar cutStatus corev1.ContainerStatus", + "\t\t// get Status for current container", + "\t\tfor index := range aPod.Status.ContainerStatuses {", + "\t\t\tif aPod.Status.ContainerStatuses[index].Name == cut.Name {", + "\t\t\t\tcutStatus = aPod.Status.ContainerStatuses[index]", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\taRuntime, uid := GetRuntimeUID(\u0026cutStatus)", + "\t\tcontainer := Container{Podname: aPod.Name, Namespace: aPod.Namespace,", + "\t\t\tNodeName: aPod.Spec.NodeName, Container: cut, Status: cutStatus, Runtime: aRuntime, UID: uid,", + "\t\t\tContainerImageIdentifier: buildContainerImageSource(aPod.Spec.Containers[j].Image, cutStatus.ImageID)}", + "", + "\t\t// Warn if readiness probe did not succeeded yet.", + "\t\tif !cutStatus.Ready {", + "\t\t\tlog.Warn(\"Container %q is not ready yet.\", \u0026container)", + "\t\t}", + "", + "\t\t// Warn if container state is not running.", + "\t\tif state := \u0026cutStatus.State; state.Running == nil {", + "\t\t\treason := \"\"", + "\t\t\tswitch {", + "\t\t\tcase state.Waiting != nil:", + "\t\t\t\treason = \"waiting - \" + state.Waiting.Reason", + "\t\t\tcase state.Terminated != nil:", + "\t\t\t\treason = \"terminated - \" + state.Terminated.Reason", + "\t\t\tdefault:", + "\t\t\t\t// When no state was explicitly set, it's assumed to be in \"waiting state\".", + "\t\t\t\treason = \"waiting state reason unknown\"", + "\t\t\t}", + "", + "\t\t\tlog.Warn(\"Container %q is not running (reason: %s, restarts %d): some test cases might fail.\",", + "\t\t\t\t\u0026container, reason, cutStatus.RestartCount)", + "\t\t}", + "", + "\t\t// Build slices of containers based on whether or not we are \"ignoring\" them or not.", + "\t\tif useIgnoreList \u0026\u0026 container.HasIgnoredContainerName() {", + "\t\t\tcontinue", + "\t\t}", + "\t\tcontainerList = append(containerList, \u0026container)", + "\t}", + "\treturn containerList", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "ConvertArrayPods", + "kind": "function", + "source": [ + "func ConvertArrayPods(pods []*corev1.Pod) (out []*Pod) {", + "\tfor i := range pods {", + "\t\taPodWrapper := NewPod(pods[i])", + "\t\tout = append(out, \u0026aPodWrapper)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewPod(aPod *corev1.Pod) (out Pod) {", + "\tvar err error", + "\tout.Pod = aPod", + "\tout.MultusNetworkInterfaces = make(map[string]CniNetworkInterface)", + "\tannotations := aPod.GetAnnotations()", + "\tnetStatus, exists := annotations[CniNetworksStatusKey]", + "\tif !exists || strings.TrimSpace(netStatus) == \"\" {", + "\t\t// Be graceful: log which annotations are present when the expected one is missing/empty", + "\t\tkeys := make([]string, 0, len(annotations))", + "\t\tfor k := range annotations {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "\t\tlog.Info(\"Pod %q (namespace %q) missing or empty annotation %q. Present annotations: %v\", aPod.Name, aPod.Namespace, CniNetworksStatusKey, keys)", + "\t} else {", + "\t\tout.MultusNetworkInterfaces, err = GetPodIPsPerNet(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get IPs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "", + "\t\tout.MultusPCIs, err = GetPciPerPod(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get PCIs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "\t}", + "", + "\tif _, ok := aPod.GetLabels()[skipConnectivityTestsLabel]; ok {", + "\t\tout.SkipNetTests = true", + "\t}", + "\tif _, ok := aPod.GetLabels()[skipMultusConnectivityTestsLabel]; ok {", + "\t\tout.SkipMultusNetTests = true", + "\t}", + "\tout.Containers = append(out.Containers, getPodContainers(aPod, false)...)", + "\treturn out", + "}" + ] + }, + { + "name": "GetCSCOSVersion", + "qualifiedName": "Node.GetCSCOSVersion", + "exported": true, + "receiver": "Node", + "signature": "func()(string, error)", + "doc": "Node.GetCSCOSVersion Retrieves the CoreOS version string from a node's OS image\n\nThe function first verifies that the node is running CoreOS by checking its\nstatus. If not, it returns an error indicating an unsupported OS type. When\nvalid, it parses the OSImage field to extract and return the CoreOS release\nidentifier as a string.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:161", + "calls": [ + { + "name": "Node.IsCSCOS", + "kind": "function", + "source": [ + "func (node *Node) IsCSCOS() bool {", + "\treturn strings.Contains(strings.TrimSpace(node.Data.Status.NodeInfo.OSImage), cscosName)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (node *Node) GetCSCOSVersion() (string, error) {", + "\t// Check if the node is running CoreOS or not", + "\tif !node.IsCSCOS() {", + "\t\treturn \"\", fmt.Errorf(\"invalid OS type: %s\", node.Data.Status.NodeInfo.OSImage)", + "\t}", + "", + "\t// CentOS Stream CoreOS 413.92.202303061740-0 (Plow) --\u003e 413.92.202303061740-0", + "\tsplitStr := strings.Split(node.Data.Status.NodeInfo.OSImage, cscosName)", + "\tlongVersionSplit := strings.Split(strings.TrimSpace(splitStr[1]), \" \")", + "", + "\treturn longVersionSplit[0], nil", + "}" + ] + }, + { + "name": "GetRHCOSVersion", + "qualifiedName": "Node.GetRHCOSVersion", + "exported": true, + "receiver": "Node", + "signature": "func()(string, error)", + "doc": "Node.GetRHCOSVersion Retrieves the short RHCOS version string from a node's OS image\n\nThe function first verifies that the node is running Red Hat Enterprise Linux\nCoreOS, returning an error if not. It then parses the OSImage field to\nextract the long version identifier and converts it into the corresponding\nshort version using a helper routine. The resulting short version string is\nreturned alongside any potential errors.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:136", + "calls": [ + { + "name": "Node.IsRHCOS", + "kind": "function", + "source": [ + "func (node *Node) IsRHCOS() bool {", + "\treturn strings.Contains(strings.TrimSpace(node.Data.Status.NodeInfo.OSImage), rhcosName)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/operatingsystem", + "name": "GetShortVersionFromLong", + "kind": "function", + "source": [ + "func GetShortVersionFromLong(longVersion string) (string, error) {", + "\tcapturedVersions, err := GetRHCOSMappedVersions(rhcosVersionMap)", + "\tif err != nil {", + "\t\treturn \"\", err", + "\t}", + "", + "\t// search through all available rhcos versions for a match", + "\tfor s, l := range capturedVersions {", + "\t\tif l == longVersion {", + "\t\t\treturn s, nil", + "\t\t}", + "\t}", + "", + "\t// return \"version-not-found\" if the short version cannot be found", + "\treturn NotFoundStr, nil", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (node *Node) GetRHCOSVersion() (string, error) {", + "\t// Check if the node is running CoreOS or not", + "\tif !node.IsRHCOS() {", + "\t\treturn \"\", fmt.Errorf(\"invalid OS type: %s\", node.Data.Status.NodeInfo.OSImage)", + "\t}", + "", + "\t// Red Hat Enterprise Linux CoreOS 410.84.202205031645-0 (Ootpa) --\u003e 410.84.202205031645-0", + "\tsplitStr := strings.Split(node.Data.Status.NodeInfo.OSImage, rhcosName)", + "\tlongVersionSplit := strings.Split(strings.TrimSpace(splitStr[1]), \" \")", + "", + "\t// Get the short version string from the long version string", + "\tshortVersion, err := operatingsystem.GetShortVersionFromLong(longVersionSplit[0])", + "\tif err != nil {", + "\t\treturn \"\", err", + "\t}", + "", + "\treturn shortVersion, nil", + "}" + ] + }, + { + "name": "GetRHELVersion", + "qualifiedName": "Node.GetRHELVersion", + "exported": true, + "receiver": "Node", + "signature": "func()(string, error)", + "doc": "Node.GetRHELVersion Retrieves the major and minor RHEL version from a node\n\nThe method first verifies that the node reports an OS image containing\n\"RHEL\"; if not, it returns an error indicating the OS type is invalid. It\nthen splits the OS image string on the RHEL identifier, trims any surrounding\nwhitespace, and extracts the leading numeric part of the remaining string as\nthe version. The extracted version string is returned along with a nil error\nwhen successful.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:182", + "calls": [ + { + "name": "Node.IsRHEL", + "kind": "function", + "source": [ + "func (node *Node) IsRHEL() bool {", + "\treturn strings.Contains(strings.TrimSpace(node.Data.Status.NodeInfo.OSImage), rhelName)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (node *Node) GetRHELVersion() (string, error) {", + "\t// Check if the node is running RHEL or not", + "\tif !node.IsRHEL() {", + "\t\treturn \"\", fmt.Errorf(\"invalid OS type: %s\", node.Data.Status.NodeInfo.OSImage)", + "\t}", + "", + "\t// Red Hat Enterprise Linux 8.5 (Ootpa) --\u003e 8.5", + "\tsplitStr := strings.Split(node.Data.Status.NodeInfo.OSImage, rhelName)", + "\tlongVersionSplit := strings.Split(strings.TrimSpace(splitStr[1]), \" \")", + "", + "\treturn longVersionSplit[0], nil", + "}" + ] + }, + { + "name": "HasWorkloadDeployed", + "qualifiedName": "Node.HasWorkloadDeployed", + "exported": true, + "receiver": "Node", + "signature": "func([]*Pod)(bool)", + "doc": "Node.HasWorkloadDeployed Determines whether any of a set of pods are running on this node\n\nThe method walks through each pod in the provided slice and inspects its spec\nto see if the node name matches the current node’s name. If it finds a\nmatch, it immediately returns true; otherwise, after checking all pods it\nreturns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:230", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (node *Node) HasWorkloadDeployed(podsUnderTest []*Pod) bool {", + "\tfor _, pod := range podsUnderTest {", + "\t\tif pod.Spec.NodeName == node.Data.Name {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "IsCSCOS", + "qualifiedName": "Node.IsCSCOS", + "exported": true, + "receiver": "Node", + "signature": "func()(bool)", + "doc": "Node.IsCSCOS Determines whether the node runs CoreOS\n\nThis method inspects the operating system image string from the node’s\nstatus information, trims surrounding whitespace, and checks if it contains\nthe CoreOS identifier. It returns true when the identifier is present,\nindicating a CoreOS or CentOS Stream CoreOS environment; otherwise it returns\nfalse.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:104", + "calls": [ + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Node.GetCSCOSVersion", + "kind": "function", + "source": [ + "func (node *Node) GetCSCOSVersion() (string, error) {", + "\t// Check if the node is running CoreOS or not", + "\tif !node.IsCSCOS() {", + "\t\treturn \"\", fmt.Errorf(\"invalid OS type: %s\", node.Data.Status.NodeInfo.OSImage)", + "\t}", + "", + "\t// CentOS Stream CoreOS 413.92.202303061740-0 (Plow) --\u003e 413.92.202303061740-0", + "\tsplitStr := strings.Split(node.Data.Status.NodeInfo.OSImage, cscosName)", + "\tlongVersionSplit := strings.Split(strings.TrimSpace(splitStr[1]), \" \")", + "", + "\treturn longVersionSplit[0], nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (node *Node) IsCSCOS() bool {", + "\treturn strings.Contains(strings.TrimSpace(node.Data.Status.NodeInfo.OSImage), cscosName)", + "}" + ] + }, + { + "name": "IsControlPlaneNode", + "qualifiedName": "Node.IsControlPlaneNode", + "exported": true, + "receiver": "Node", + "signature": "func()(bool)", + "doc": "Node.IsControlPlaneNode Determines whether the node is a control‑plane instance\n\nThe method inspects each label on the node’s data and checks if any match\nknown master labels using a string containment helper. If a matching label is\nfound, it returns true; otherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:76", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetMasterCount", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetMasterCount() int {", + "\tmasterCount := 0", + "\tfor _, e := range env.Nodes {", + "\t\tif e.IsControlPlaneNode() {", + "\t\t\tmasterCount++", + "\t\t}", + "\t}", + "\treturn masterCount", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (node *Node) IsControlPlaneNode() bool {", + "\tfor nodeLabel := range node.Data.Labels {", + "\t\tif stringhelper.StringInSlice(MasterLabels, nodeLabel, true) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "IsHyperThreadNode", + "qualifiedName": "Node.IsHyperThreadNode", + "exported": true, + "receiver": "Node", + "signature": "func(*TestEnvironment)(bool, error)", + "doc": "Node.IsHyperThreadNode Determines if the node supports hyper‑threading\n\nThe method runs a predefined command inside a probe pod on the node to query\nCPU core information. It parses the output for the number of threads per core\nand returns true when more than one thread is reported, indicating\nhyper‑threading support. Errors from execution or parsing are returned\nalongside the boolean result.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:207", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "name": "FindStringSubmatch", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (node *Node) IsHyperThreadNode(env *TestEnvironment) (bool, error) {", + "\to := clientsholder.GetClientsHolder()", + "\tnodeName := node.Data.Name", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tcmdValue, errStr, err := o.ExecCommandContainer(ctx, isHyperThreadCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn false, fmt.Errorf(\"cannot execute %s on probe pod %s, err=%s, stderr=%s\", isHyperThreadCommand, env.ProbePods[nodeName], err, errStr)", + "\t}", + "\tre := regexp.MustCompile(`Thread\\(s\\) per core:\\s+(\\d+)`)", + "\tmatch := re.FindStringSubmatch(cmdValue)", + "\tnum := 0", + "\tif len(match) == expectedValue {", + "\t\tnum, _ = strconv.Atoi(match[1])", + "\t}", + "\treturn num \u003e 1, nil", + "}" + ] + }, + { + "name": "IsRHCOS", + "qualifiedName": "Node.IsRHCOS", + "exported": true, + "receiver": "Node", + "signature": "func()(bool)", + "doc": "Node.IsRHCOS Determines whether a node runs Red Hat CoreOS\n\nThe method examines the operating system image field of the node's status\ninformation, removing any surrounding whitespace before searching for the\npredefined CoreOS identifier string. If that identifier is present, it\nreturns true; otherwise, it returns false. This check is used by other\nfunctions to confirm OS compatibility before proceeding with further\noperations.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:93", + "calls": [ + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Node.GetRHCOSVersion", + "kind": "function", + "source": [ + "func (node *Node) GetRHCOSVersion() (string, error) {", + "\t// Check if the node is running CoreOS or not", + "\tif !node.IsRHCOS() {", + "\t\treturn \"\", fmt.Errorf(\"invalid OS type: %s\", node.Data.Status.NodeInfo.OSImage)", + "\t}", + "", + "\t// Red Hat Enterprise Linux CoreOS 410.84.202205031645-0 (Ootpa) --\u003e 410.84.202205031645-0", + "\tsplitStr := strings.Split(node.Data.Status.NodeInfo.OSImage, rhcosName)", + "\tlongVersionSplit := strings.Split(strings.TrimSpace(splitStr[1]), \" \")", + "", + "\t// Get the short version string from the long version string", + "\tshortVersion, err := operatingsystem.GetShortVersionFromLong(longVersionSplit[0])", + "\tif err != nil {", + "\t\treturn \"\", err", + "\t}", + "", + "\treturn shortVersion, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (node *Node) IsRHCOS() bool {", + "\treturn strings.Contains(strings.TrimSpace(node.Data.Status.NodeInfo.OSImage), rhcosName)", + "}" + ] + }, + { + "name": "IsRHEL", + "qualifiedName": "Node.IsRHEL", + "exported": true, + "receiver": "Node", + "signature": "func()(bool)", + "doc": "Node.IsRHEL checks whether the node’s OS image is a Red Hat Enterprise Linux release\n\nThe method trims any surrounding whitespace from the node’s OS image string\nand then looks for the RHEL identifier within it. If the identifier is\npresent, it returns true; otherwise it returns false. This boolean result is\nused by other functions to decide whether RHEL‑specific logic should be\napplied.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:115", + "calls": [ + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Node.GetRHELVersion", + "kind": "function", + "source": [ + "func (node *Node) GetRHELVersion() (string, error) {", + "\t// Check if the node is running RHEL or not", + "\tif !node.IsRHEL() {", + "\t\treturn \"\", fmt.Errorf(\"invalid OS type: %s\", node.Data.Status.NodeInfo.OSImage)", + "\t}", + "", + "\t// Red Hat Enterprise Linux 8.5 (Ootpa) --\u003e 8.5", + "\tsplitStr := strings.Split(node.Data.Status.NodeInfo.OSImage, rhelName)", + "\tlongVersionSplit := strings.Split(strings.TrimSpace(splitStr[1]), \" \")", + "", + "\treturn longVersionSplit[0], nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (node *Node) IsRHEL() bool {", + "\treturn strings.Contains(strings.TrimSpace(node.Data.Status.NodeInfo.OSImage), rhelName)", + "}" + ] + }, + { + "name": "IsRTKernel", + "qualifiedName": "Node.IsRTKernel", + "exported": true, + "receiver": "Node", + "signature": "func()(bool)", + "doc": "Node.IsRTKernel Indicates if the node uses a real‑time kernel\n\nThis method examines the node's kernel version string, trims whitespace, and\nchecks for the presence of \"rt\" to determine whether a real‑time kernel is\ninstalled. It returns true when the substring is found, otherwise false.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:124", + "calls": [ + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (node *Node) IsRTKernel() bool {", + "\t// More information: https://www.redhat.com/sysadmin/real-time-kernel", + "\treturn strings.Contains(strings.TrimSpace(node.Data.Status.NodeInfo.KernelVersion), \"rt\")", + "}" + ] + }, + { + "name": "IsWorkerNode", + "qualifiedName": "Node.IsWorkerNode", + "exported": true, + "receiver": "Node", + "signature": "func()(bool)", + "doc": "Node.IsWorkerNode Determines if a node is considered a worker by inspecting its labels\n\nThis method iterates over all labels attached to the node and checks each one\nagainst a predefined list of worker-identifying label patterns. It uses a\nhelper that performs a substring match, allowing flexible recognition of\ncommon worker label conventions. The function returns true if any matching\nlabel is found; otherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:62", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetWorkerCount", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetWorkerCount() int {", + "\tworkerCount := 0", + "\tfor _, e := range env.Nodes {", + "\t\tif e.IsWorkerNode() {", + "\t\t\tworkerCount++", + "\t\t}", + "\t}", + "\treturn workerCount", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (node *Node) IsWorkerNode() bool {", + "\tfor nodeLabel := range node.Data.Labels {", + "\t\tif stringhelper.StringInSlice(WorkerLabels, nodeLabel, true) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "MarshalJSON", + "qualifiedName": "Node.MarshalJSON", + "exported": true, + "receiver": "Node", + "signature": "func()([]byte, error)", + "doc": "Node.MarshalJSON Serializes the node's internal data to JSON\n\nThe method calls the standard library’s Marshal function with a pointer to\nthe node’s Data field. It produces a byte slice containing the JSON\nrepresentation of that data and returns any error encountered during\nmarshaling.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:51", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "Marshal", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (node Node) MarshalJSON() ([]byte, error) {", + "\treturn json.Marshal(\u0026node.Data)", + "}" + ] + }, + { + "name": "SetPreflightResults", + "qualifiedName": "Operator.SetPreflightResults", + "exported": true, + "receiver": "Operator", + "signature": "func(*TestEnvironment)(error)", + "doc": "Operator.SetPreflightResults Collects and stores Preflight test outcomes for an operator\n\nThe function runs the OpenShift Preflight checks against the operator's\nbundle image, capturing passed, failed, and error results. It writes all\ncheck logs to a buffer and attaches them to the global log output. After\nprocessing, it removes temporary artifacts and assigns the collected results\nto the operator’s PreflightResults field.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:104", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/artifacts", + "name": "NewMapWriter", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/artifacts", + "name": "ContextWithWriter", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/operator", + "name": "WithDockerConfigJSONFromFile", + "kind": "function" + }, + { + "name": "TestEnvironment.GetDockerConfigFile", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetDockerConfigFile() string {", + "\treturn env.params.PfltDockerconfig", + "}" + ] + }, + { + "name": "TestEnvironment.IsPreflightInsecureAllowed", + "kind": "function", + "source": [ + "func (env *TestEnvironment) IsPreflightInsecureAllowed() bool {", + "\treturn env.params.AllowPreflightInsecure", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/operator", + "name": "WithInsecureConnection", + "kind": "function" + }, + { + "pkgPath": "bytes", + "name": "NewBuffer", + "kind": "function" + }, + { + "pkgPath": "log", + "name": "Default", + "kind": "function" + }, + { + "name": "SetOutput", + "kind": "function" + }, + { + "pkgPath": "github.com/go-logr/stdr", + "name": "New", + "kind": "function" + }, + { + "pkgPath": "github.com/go-logr/logr", + "name": "NewContext", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-openshift-ecosystem/openshift-preflight/operator", + "name": "NewCheck", + "kind": "function" + }, + { + "name": "Run", + "kind": "function" + }, + { + "name": "List", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "WithError", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "pkgPath": "os", + "name": "RemoveAll", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "GetPreflightResultsDB", + "kind": "function", + "source": [ + "func GetPreflightResultsDB(results *plibRuntime.Results) PreflightResultsDB {", + "\tresultsDB := PreflightResultsDB{}", + "\tfor _, res := range results.Passed {", + "\t\ttest := PreflightTest{Name: res.Name(), Description: res.Metadata().Description, Remediation: res.Help().Suggestion}", + "\t\tresultsDB.Passed = append(resultsDB.Passed, test)", + "\t}", + "\tfor _, res := range results.Failed {", + "\t\ttest := PreflightTest{Name: res.Name(), Description: res.Metadata().Description, Remediation: res.Help().Suggestion}", + "\t\tresultsDB.Failed = append(resultsDB.Failed, test)", + "\t}", + "\tfor _, res := range results.Errors {", + "\t\ttest := PreflightTest{Name: res.Name(), Description: res.Metadata().Description, Remediation: res.Help().Suggestion, Error: res.Error()}", + "\t\tresultsDB.Errors = append(resultsDB.Errors, test)", + "\t}", + "", + "\treturn resultsDB", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (op *Operator) SetPreflightResults(env *TestEnvironment) error {", + "\tif len(op.InstallPlans) == 0 {", + "\t\tlog.Warn(\"Operator %q has no InstallPlans. Skipping setting Preflight results\", op)", + "\t\treturn nil", + "\t}", + "", + "\tbundleImage := op.InstallPlans[0].BundleImage", + "\tindexImage := op.InstallPlans[0].IndexImage", + "\toc := clientsholder.GetClientsHolder()", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\topts := []plibOperator.Option{}", + "\topts = append(opts, plibOperator.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibOperator.WithInsecureConnection())", + "\t}", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibOperator.NewCheck(bundleImage, indexImage, oc.KubeConfig, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\te := os.RemoveAll(\"artifacts/\")", + "\tif e != nil {", + "\t\tlog.Fatal(\"%v\", e)", + "\t}", + "", + "\tlog.Info(\"Storing operator Preflight results into object for %q\", bundleImage)", + "\top.PreflightResults = GetPreflightResultsDB(\u0026results)", + "\treturn nil", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "Operator.String", + "exported": true, + "receiver": "Operator", + "signature": "func()(string)", + "doc": "Operator.String Provides a human-readable representation of the operator\n\nThis method formats key fields such as the operator name, namespace,\nsubscription name, and target namespaces into a single string. It uses a\nstandard formatting function to create the output and returns it for display\nor logging purposes.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:93", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (op *Operator) String() string {", + "\treturn fmt.Sprintf(\"csv: %s ns:%s subscription:%s targetNamespaces=%v\", op.Name, op.Namespace, op.SubscriptionName, op.TargetNamespaces)", + "}" + ] + }, + { + "name": "AffinityRequired", + "qualifiedName": "Pod.AffinityRequired", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool)", + "doc": "Pod.AffinityRequired Determines if a pod requires affinity based on its labels\n\nThe method looks for the key that indicates whether affinity is required in\nthe pod's label set. If present, it attempts to interpret the value as a\nboolean string; on parsing failure it logs a warning and returns false. When\nthe key is absent or parsing succeeds, it returns the parsed boolean result.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:185", + "calls": [ + { + "pkgPath": "strconv", + "name": "ParseBool", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetAffinityRequiredPods", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetAffinityRequiredPods() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.AffinityRequired() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetPodsWithoutAffinityRequiredLabel", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetPodsWithoutAffinityRequiredLabel() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif !p.AffinityRequired() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) AffinityRequired() bool {", + "\tif val, ok := p.Labels[AffinityRequiredKey]; ok {", + "\t\tresult, err := strconv.ParseBool(val)", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"failure to parse bool %v\", val)", + "\t\t\treturn false", + "\t\t}", + "\t\treturn result", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "CheckResourceHugePagesSize", + "qualifiedName": "Pod.CheckResourceHugePagesSize", + "exported": true, + "receiver": "Pod", + "signature": "func(string)(bool)", + "doc": "Pod.CheckResourceHugePagesSize Verifies that all huge page resources match the specified size\n\nThe method iterates over each container in a pod, checking both requested and\nlimited resources for any huge page entries. If a huge page resource is found\nbut its name differs from the supplied size, the function returns false\nimmediately. When no mismatches are detected, it returns true.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:225", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) CheckResourceHugePagesSize(size string) bool {", + "\tfor _, cut := range p.Containers {", + "\t\t// Resources must be specified", + "\t\tif len(cut.Resources.Requests) == 0 || len(cut.Resources.Limits) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor name := range cut.Resources.Requests {", + "\t\t\tif strings.Contains(name.String(), hugePages) \u0026\u0026 name.String() != size {", + "\t\t\t\treturn false", + "\t\t\t}", + "\t\t}", + "\t\tfor name := range cut.Resources.Limits {", + "\t\t\tif strings.Contains(name.String(), hugePages) \u0026\u0026 name.String() != size {", + "\t\t\t\treturn false", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "ContainsIstioProxy", + "qualifiedName": "Pod.ContainsIstioProxy", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool)", + "doc": "Pod.ContainsIstioProxy Detects the presence of an Istio side‑car container in a pod\n\nThe method scans each container defined in the pod, comparing its name\nagainst the predefined Istio proxy container identifier. If it finds a match,\nit immediately returns true; otherwise, after examining all containers, it\nreturns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:280", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) ContainsIstioProxy() bool {", + "\tfor _, container := range p.Containers {", + "\t\tif container.Name == IstioProxyContainerName {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "CreatedByDeploymentConfig", + "qualifiedName": "Pod.CreatedByDeploymentConfig", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool, error)", + "doc": "Pod.CreatedByDeploymentConfig Determines if a pod originates from an OpenShift DeploymentConfig\n\nThis method examines each owner reference of the pod, looking for a\nReplicationController that itself references a DeploymentConfig. It retrieves\nreplication controller objects via the Kubernetes client and checks their\nowners to find a matching deployment config name. The function returns true\nif such a relationship exists, otherwise false, along with any error\nencountered during API calls.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:297", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "GetOwnerReferences", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "name": "ReplicationControllers", + "kind": "function" + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "GetOwnerReferences", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) CreatedByDeploymentConfig() (bool, error) {", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, podOwner := range p.GetOwnerReferences() {", + "\t\tif podOwner.Kind == replicationController {", + "\t\t\treplicationControllers, err := oc.K8sClient.CoreV1().ReplicationControllers(p.Namespace).Get(context.TODO(), podOwner.Name, metav1.GetOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\treturn false, err", + "\t\t\t}", + "\t\t\tfor _, rcOwner := range replicationControllers.GetOwnerReferences() {", + "\t\t\t\tif rcOwner.Name == podOwner.Name \u0026\u0026 rcOwner.Kind == deploymentConfig {", + "\t\t\t\t\treturn true, err", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false, nil", + "}" + ] + }, + { + "name": "GetRunAsNonRootFalseContainers", + "qualifiedName": "Pod.GetRunAsNonRootFalseContainers", + "exported": true, + "receiver": "Pod", + "signature": "func(map[string]bool)([]*Container, []string)", + "doc": "Pod.GetRunAsNonRootFalseContainers identifies containers violating non-root security policies\n\nThis method examines each container in a pod to determine if it inherits or\nsets runAsNonRoot to false or runs as user ID zero, indicating a root\ncontext. It skips any containers listed in the provided map and aggregates\nthose that fail the checks along with explanatory reasons. The function\nreturns two slices: one of non-compliant containers and another containing\nthe corresponding justification strings.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:690", + "calls": [ + { + "name": "Container.IsContainerRunAsNonRoot", + "kind": "function", + "source": [ + "func (c *Container) IsContainerRunAsNonRoot(podRunAsNonRoot *bool) (isContainerRunAsNonRoot bool, reason string) {", + "\tif c.SecurityContext != nil \u0026\u0026 c.SecurityContext.RunAsNonRoot != nil {", + "\t\treturn *c.SecurityContext.RunAsNonRoot, fmt.Sprintf(\"RunAsNonRoot is set to %t at the container level, overriding a %v value defined at pod level\",", + "\t\t\t*c.SecurityContext.RunAsNonRoot, stringhelper.PointerToString(podRunAsNonRoot))", + "\t}", + "", + "\tif podRunAsNonRoot != nil {", + "\t\treturn *podRunAsNonRoot, fmt.Sprintf(\"RunAsNonRoot is set to nil at container level and inheriting a %t value from the pod level RunAsNonRoot setting\", *podRunAsNonRoot)", + "\t}", + "", + "\treturn false, \"RunAsNonRoot is set to nil at pod and container level\"", + "}" + ] + }, + { + "name": "Container.IsContainerRunAsNonRootUserID", + "kind": "function", + "source": [ + "func (c *Container) IsContainerRunAsNonRootUserID(podRunAsNonRootUserID *int64) (isContainerRunAsNonRootUserID bool, reason string) {", + "\tif c.SecurityContext != nil \u0026\u0026 c.SecurityContext.RunAsUser != nil {", + "\t\treturn *c.SecurityContext.RunAsUser != 0, fmt.Sprintf(\"RunAsUser is set to %v at the container level, overriding a %s value defined at pod level\",", + "\t\t\t*c.SecurityContext.RunAsUser, stringhelper.PointerToString(podRunAsNonRootUserID))", + "\t}", + "", + "\tif podRunAsNonRootUserID != nil {", + "\t\treturn *podRunAsNonRootUserID != 0, fmt.Sprintf(\"RunAsUser is set to nil at container level and inheriting a %v value from the pod level RunAsUser setting\", *podRunAsNonRootUserID)", + "\t}", + "", + "\treturn false, \"RunAsUser is set to nil at pod and container level\"", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) GetRunAsNonRootFalseContainers(knownContainersToSkip map[string]bool) (nonCompliantContainers []*Container, nonComplianceReasons []string) {", + "\t// Check pod-level security context this will be set by default for containers", + "\t// If not already configured at the container level", + "\tvar podRunAsNonRoot *bool", + "\tif p.Spec.SecurityContext != nil \u0026\u0026 p.Spec.SecurityContext.RunAsNonRoot != nil {", + "\t\tpodRunAsNonRoot = p.Spec.SecurityContext.RunAsNonRoot", + "\t}", + "", + "\tvar podRunAsUserID *int64", + "\tif p.Spec.SecurityContext != nil \u0026\u0026 p.Spec.SecurityContext.RunAsUser != nil {", + "\t\tpodRunAsUserID = p.Spec.SecurityContext.RunAsUser", + "\t}", + "", + "\t// Check each container for the RunAsNonRoot parameter.", + "\t// If it is not present, the pod value applies", + "\tfor _, cut := range p.Containers {", + "\t\tif knownContainersToSkip[cut.Name] {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tisRunAsNonRoot, isRunAsNonRootReason := cut.IsContainerRunAsNonRoot(podRunAsNonRoot)", + "\t\tisRunAsNonRootUserID, isRunAsNonRootUserIDReason := cut.IsContainerRunAsNonRootUserID(podRunAsUserID)", + "", + "\t\tif isRunAsNonRoot || isRunAsNonRootUserID {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tnonCompliantContainers = append(nonCompliantContainers, cut)", + "\t\tnonComplianceReasons = append(nonComplianceReasons, isRunAsNonRootReason+\", \"+isRunAsNonRootUserIDReason)", + "\t}", + "", + "\treturn nonCompliantContainers, nonComplianceReasons", + "}" + ] + }, + { + "name": "GetTopOwner", + "qualifiedName": "Pod.GetTopOwner", + "exported": true, + "receiver": "Pod", + "signature": "func()(map[string]podhelper.TopOwner, error)", + "doc": "Pod.GetTopOwner Retrieves the top-level owners of a pod\n\nThe method returns a map keyed by owner kind, containing information about\neach top-level resource that owns the pod. It calls an internal helper to\nresolve all owner references, following chains up to the root. The result is\nreturned along with any error encountered during resolution.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:730", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "name": "GetPodTopOwner", + "kind": "function", + "source": [ + "func GetPodTopOwner(podNamespace string, podOwnerReferences []metav1.OwnerReference) (topOwners map[string]TopOwner, err error) {", + "\ttopOwners = make(map[string]TopOwner)", + "\terr = followOwnerReferences(", + "\t\tclientsholder.GetClientsHolder().GroupResources,", + "\t\tclientsholder.GetClientsHolder().DynamicClient,", + "\t\ttopOwners,", + "\t\tpodNamespace,", + "\t\tpodOwnerReferences)", + "\tif err != nil {", + "\t\treturn topOwners, fmt.Errorf(\"could not get top owners, err: %v\", err)", + "\t}", + "\treturn topOwners, nil", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) GetTopOwner() (topOwners map[string]podhelper.TopOwner, err error) {", + "\treturn podhelper.GetPodTopOwner(p.Namespace, p.OwnerReferences)", + "}" + ] + }, + { + "name": "HasHugepages", + "qualifiedName": "Pod.HasHugepages", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool)", + "doc": "Pod.HasHugepages determines if any container requests or limits hugepage resources\n\nThe method scans each container’s resource requests and limits for a name\ncontaining the substring \"hugepage\". If such a resource is found, it\nimmediately returns true; otherwise, after all containers are checked, it\nreturns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:203", + "calls": [ + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetHugepagesPods", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetHugepagesPods() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.HasHugepages() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) HasHugepages() bool {", + "\tfor _, cut := range p.Containers {", + "\t\tfor name := range cut.Resources.Requests {", + "\t\t\tif strings.Contains(name.String(), hugePages) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t\tfor _, name := range cut.Resources.Limits {", + "\t\t\tif strings.Contains(name.String(), hugePages) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "HasNodeSelector", + "qualifiedName": "Pod.HasNodeSelector", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool)", + "doc": "Pod.HasNodeSelector Indicates if the pod specifies a node selector\n\nThe method examines the pod's specification for a non‑empty nodeSelector\nmap. It returns true when at least one key/value pair is present, meaning the\npod has constraints on which nodes it can run. If the map is empty or nil,\nthe function returns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:321", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) HasNodeSelector() bool {", + "\t// Checks whether or not the pod has a nodeSelector or a NodeName supplied", + "\treturn len(p.Spec.NodeSelector) != 0", + "}" + ] + }, + { + "name": "IsAffinityCompliant", + "qualifiedName": "Pod.IsAffinityCompliant", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool, error)", + "doc": "Pod.IsAffinityCompliant checks whether a pod has required affinity rules\n\nThe method examines the pod's specification to determine if it contains any\naffinity configuration. If no affinity is present, or if anti‑affinity\nrules exist, or if neither pod nor node affinity are defined, it returns\nfalse along with an explanatory error. Otherwise it reports success by\nreturning true and a nil error.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:252", + "calls": [ + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) IsAffinityCompliant() (bool, error) {", + "\tif p.Spec.Affinity == nil {", + "\t\treturn false, fmt.Errorf(\"%s has been found with an AffinityRequired flag but is missing corresponding affinity rules\", p.String())", + "\t}", + "\tif p.Spec.Affinity.PodAntiAffinity != nil {", + "\t\treturn false, fmt.Errorf(\"%s has been found with an AffinityRequired flag but has anti-affinity rules\", p.String())", + "\t}", + "\tif p.Spec.Affinity.PodAffinity == nil \u0026\u0026 p.Spec.Affinity.NodeAffinity == nil {", + "\t\treturn false, fmt.Errorf(\"%s has been found with an AffinityRequired flag but is missing corresponding pod/node affinity rules\", p.String())", + "\t}", + "\treturn true, nil", + "}" + ] + }, + { + "name": "IsAutomountServiceAccountSetOnSA", + "qualifiedName": "Pod.IsAutomountServiceAccountSetOnSA", + "exported": true, + "receiver": "Pod", + "signature": "func()(*bool, error)", + "doc": "Pod.IsAutomountServiceAccountSetOnSA Determines if a pod’s service account has automount enabled\n\nThe method inspects the pod’s associated service account to see whether its\nAutomountServiceAccountToken field is set. It first validates that the\nservice account map exists and contains an entry for the pod’s namespace\nand name, returning errors otherwise. If found, it returns a pointer to the\nboolean value indicating automount status along with nil error.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:741", + "calls": [ + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) IsAutomountServiceAccountSetOnSA() (isSet *bool, err error) {", + "\tif p.AllServiceAccountsMap == nil {", + "\t\treturn isSet, fmt.Errorf(\"AllServiceAccountsMap is not initialized for pod with ns: %s and name %s\", p.Namespace, p.Name)", + "\t}", + "\tif _, ok := (*p.AllServiceAccountsMap)[p.Namespace+p.Spec.ServiceAccountName]; !ok {", + "\t\treturn isSet, fmt.Errorf(\"could not find a service account with ns: %s and name %s\", p.Namespace, p.Spec.ServiceAccountName)", + "\t}", + "\treturn (*p.AllServiceAccountsMap)[p.Namespace+p.Spec.ServiceAccountName].AutomountServiceAccountToken, nil", + "}" + ] + }, + { + "name": "IsCPUIsolationCompliant", + "qualifiedName": "Pod.IsCPUIsolationCompliant", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool)", + "doc": "Pod.IsCPUIsolationCompliant Determines whether a pod meets CPU isolation requirements\n\nThe method checks that the pod has annotations disabling both CPU and IRQ\nload balancing, and verifies a runtime class name is set. If either condition\nfails it logs a debug message and returns false; otherwise true.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:150", + "calls": [ + { + "name": "LoadBalancingDisabled", + "kind": "function", + "source": [ + "func LoadBalancingDisabled(p *Pod) bool {", + "\tconst (", + "\t\tdisableVar = \"disable\"", + "\t)", + "", + "\tcpuLoadBalancingDisabled := false", + "\tirqLoadBalancingDisabled := false", + "", + "\tif v, ok := p.Annotations[\"cpu-load-balancing.crio.io\"]; ok {", + "\t\tif v == disableVar {", + "\t\t\tcpuLoadBalancingDisabled = true", + "\t\t} else {", + "\t\t\tlog.Debug(\"Annotation cpu-load-balancing.crio.io has an invalid value for CPU isolation. Must be 'disable'.\")", + "\t\t}", + "\t} else {", + "\t\tlog.Debug(\"Annotation cpu-load-balancing.crio.io is missing.\")", + "\t}", + "", + "\tif v, ok := p.Annotations[\"irq-load-balancing.crio.io\"]; ok {", + "\t\tif v == disableVar {", + "\t\t\tirqLoadBalancingDisabled = true", + "\t\t} else {", + "\t\t\tlog.Debug(\"Annotation irq-load-balancing.crio.io has an invalid value for CPU isolation. Must be 'disable'.\")", + "\t\t}", + "\t} else {", + "\t\tlog.Debug(\"Annotation irq-load-balancing.crio.io is missing.\")", + "\t}", + "", + "\t// Both conditions have to be set to 'disable'", + "\tif cpuLoadBalancingDisabled \u0026\u0026 irqLoadBalancingDisabled {", + "\t\treturn true", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Pod.IsRuntimeClassNameSpecified", + "kind": "function", + "source": [ + "func (p *Pod) IsRuntimeClassNameSpecified() bool {", + "\treturn p.Spec.RuntimeClassName != nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodsWithIsolatedCPUs() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsPodGuaranteedWithExclusiveCPUs() \u0026\u0026 p.IsCPUIsolationCompliant() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) IsCPUIsolationCompliant() bool {", + "\tisCPUIsolated := true", + "", + "\tif !LoadBalancingDisabled(p) {", + "\t\tlog.Debug(\"Pod %q has been found to not have annotations set correctly for CPU isolation.\", p)", + "\t\tisCPUIsolated = false", + "\t}", + "", + "\tif !p.IsRuntimeClassNameSpecified() {", + "\t\tlog.Debug(\"Pod %q has been found to not have runtimeClassName specified.\", p)", + "\t\tisCPUIsolated = false", + "\t}", + "", + "\treturn isCPUIsolated", + "}" + ] + }, + { + "name": "IsPodGuaranteed", + "qualifiedName": "Pod.IsPodGuaranteed", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool)", + "doc": "Pod.IsPodGuaranteed Determines if the pod meets guaranteed resource conditions\n\nThe method checks whether every container in the pod has defined CPU and\nmemory limits that match their requests, indicating a guaranteed QoS class.\nIt delegates this logic to AreResourcesIdentical, which verifies consistency\nacross all containers. The result is returned as a boolean.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:132", + "calls": [ + { + "name": "AreResourcesIdentical", + "kind": "function", + "source": [ + "func AreResourcesIdentical(p *Pod) bool {", + "\t// Pods may contain more than one container. All containers must conform to the CPU isolation requirements.", + "\tfor _, cut := range p.Containers {", + "\t\t// At least limits must be specified (requests default to limits if not specified)", + "\t\tif len(cut.Resources.Limits) == 0 {", + "\t\t\tlog.Debug(\"%s has been found with undefined limits.\", cut.String())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Gather the values", + "\t\tcpuRequests := cut.Resources.Requests.Cpu()", + "\t\tcpuLimits := cut.Resources.Limits.Cpu()", + "\t\tmemoryRequests := cut.Resources.Requests.Memory()", + "\t\tmemoryLimits := cut.Resources.Limits.Memory()", + "", + "\t\t// Check for mismatches", + "\t\tif !cpuRequests.Equal(*cpuLimits) {", + "\t\t\tlog.Debug(\"%s has CPU requests %f and limits %f that do not match.\", cut.String(), cpuRequests.AsApproximateFloat64(), cpuLimits.AsApproximateFloat64())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\tif !memoryRequests.Equal(*memoryLimits) {", + "\t\t\tlog.Debug(\"%s has memory requests %f and limits %f that do not match.\", cut.String(), memoryRequests.AsApproximateFloat64(), memoryLimits.AsApproximateFloat64())", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetGuaranteedPods", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPods() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsPodGuaranteed() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetNonGuaranteedPods", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetNonGuaranteedPods() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif !p.IsPodGuaranteed() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) IsPodGuaranteed() bool {", + "\treturn AreResourcesIdentical(p)", + "}" + ] + }, + { + "name": "IsPodGuaranteedWithExclusiveCPUs", + "qualifiedName": "Pod.IsPodGuaranteedWithExclusiveCPUs", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool)", + "doc": "Pod.IsPodGuaranteedWithExclusiveCPUs Determines if a pod’s CPU requests and limits are whole units and match exactly\n\nIt checks that each container in the pod specifies CPU resources as whole and\nthat the request equals the limit for both CPU and memory. If all containers\nsatisfy these conditions, it returns true; otherwise false.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:141", + "calls": [ + { + "name": "AreCPUResourcesWholeUnits", + "kind": "function", + "source": [ + "func AreCPUResourcesWholeUnits(p *Pod) bool {", + "\tisInteger := func(val int64) bool {", + "\t\treturn val%1000 == 0", + "\t}", + "", + "\t// Pods may contain more than one container. All containers must conform to the CPU isolation requirements.", + "\tfor _, cut := range p.Containers {", + "\t\t// Resources must be specified", + "\t\tcpuRequestsMillis := cut.Resources.Requests.Cpu().MilliValue()", + "\t\tcpuLimitsMillis := cut.Resources.Limits.Cpu().MilliValue()", + "", + "\t\tif cpuRequestsMillis == 0 || cpuLimitsMillis == 0 {", + "\t\t\tlog.Debug(\"%s has been found with undefined requests or limits.\", cut.String())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\tif !isInteger(cpuRequestsMillis) {", + "\t\t\tlog.Debug(\"%s has CPU requests %d (milli) that has to be a whole unit.\", cut.String(), cpuRequestsMillis)", + "\t\t\treturn false", + "\t\t}", + "\t\tif !isInteger(cpuLimitsMillis) {", + "\t\t\tlog.Debug(\"%s has CPU limits %d (milli) that has to be a whole unit.\", cut.String(), cpuLimitsMillis)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + }, + { + "name": "AreResourcesIdentical", + "kind": "function", + "source": [ + "func AreResourcesIdentical(p *Pod) bool {", + "\t// Pods may contain more than one container. All containers must conform to the CPU isolation requirements.", + "\tfor _, cut := range p.Containers {", + "\t\t// At least limits must be specified (requests default to limits if not specified)", + "\t\tif len(cut.Resources.Limits) == 0 {", + "\t\t\tlog.Debug(\"%s has been found with undefined limits.\", cut.String())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Gather the values", + "\t\tcpuRequests := cut.Resources.Requests.Cpu()", + "\t\tcpuLimits := cut.Resources.Limits.Cpu()", + "\t\tmemoryRequests := cut.Resources.Requests.Memory()", + "\t\tmemoryLimits := cut.Resources.Limits.Memory()", + "", + "\t\t// Check for mismatches", + "\t\tif !cpuRequests.Equal(*cpuLimits) {", + "\t\t\tlog.Debug(\"%s has CPU requests %f and limits %f that do not match.\", cut.String(), cpuRequests.AsApproximateFloat64(), cpuLimits.AsApproximateFloat64())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\tif !memoryRequests.Equal(*memoryLimits) {", + "\t\t\tlog.Debug(\"%s has memory requests %f and limits %f that do not match.\", cut.String(), memoryRequests.AsApproximateFloat64(), memoryLimits.AsApproximateFloat64())", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodsWithExclusiveCPUs() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsPodGuaranteedWithExclusiveCPUs() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodsWithIsolatedCPUs() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsPodGuaranteedWithExclusiveCPUs() \u0026\u0026 p.IsCPUIsolationCompliant() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) IsPodGuaranteedWithExclusiveCPUs() bool {", + "\treturn AreCPUResourcesWholeUnits(p) \u0026\u0026 AreResourcesIdentical(p)", + "}" + ] + }, + { + "name": "IsRunAsUserID", + "qualifiedName": "Pod.IsRunAsUserID", + "exported": true, + "receiver": "Pod", + "signature": "func(int64)(bool)", + "doc": "Pod.IsRunAsUserID Checks if the pod runs as a specific user ID\n\nThe method inspects the pod's security context, returning false if it is nil\nor if no RunAsUser value is set. If a run-as-user value exists, it compares\nthat value to the supplied uid and returns true when they match. This allows\ncallers to verify whether the pod will execute with the given user identity.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:675", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) IsRunAsUserID(uid int64) bool {", + "\tif p.Spec.SecurityContext == nil || p.Spec.SecurityContext.RunAsUser == nil {", + "\t\treturn false", + "\t}", + "\treturn *p.Spec.SecurityContext.RunAsUser == uid", + "}" + ] + }, + { + "name": "IsRuntimeClassNameSpecified", + "qualifiedName": "Pod.IsRuntimeClassNameSpecified", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool)", + "doc": "Pod.IsRuntimeClassNameSpecified checks whether a pod has a runtime class specified\n\nThe method returns true when the pod’s specification includes a\nruntimeClassName field, indicating that a runtime class has been assigned. If\nthe field is nil, it returns false, implying no runtime class is set for the\npod.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:332", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsCPUIsolationCompliant", + "kind": "function", + "source": [ + "func (p *Pod) IsCPUIsolationCompliant() bool {", + "\tisCPUIsolated := true", + "", + "\tif !LoadBalancingDisabled(p) {", + "\t\tlog.Debug(\"Pod %q has been found to not have annotations set correctly for CPU isolation.\", p)", + "\t\tisCPUIsolated = false", + "\t}", + "", + "\tif !p.IsRuntimeClassNameSpecified() {", + "\t\tlog.Debug(\"Pod %q has been found to not have runtimeClassName specified.\", p)", + "\t\tisCPUIsolated = false", + "\t}", + "", + "\treturn isCPUIsolated", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) IsRuntimeClassNameSpecified() bool {", + "\treturn p.Spec.RuntimeClassName != nil", + "}" + ] + }, + { + "name": "IsShareProcessNamespace", + "qualifiedName": "Pod.IsShareProcessNamespace", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool)", + "doc": "Pod.IsShareProcessNamespace determines if a pod shares its process namespace\n\nThe method checks the pod specification for the ShareProcessNamespace field.\nIf the field exists and is set to true, it returns true; otherwise it returns\nfalse.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:270", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetShareProcessNamespacePods", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetShareProcessNamespacePods() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsShareProcessNamespace() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) IsShareProcessNamespace() bool {", + "\treturn p.Spec.ShareProcessNamespace != nil \u0026\u0026 *p.Spec.ShareProcessNamespace", + "}" + ] + }, + { + "name": "IsUsingClusterRoleBinding", + "qualifiedName": "Pod.IsUsingClusterRoleBinding", + "exported": true, + "receiver": "Pod", + "signature": "func([]rbacv1.ClusterRoleBinding, *log.Logger)(bool, string, error)", + "doc": "Pod.IsUsingClusterRoleBinding Checks if a pod’s service account is linked to any cluster role binding\n\nThe function receives a list of cluster role bindings and logs the pod being\nexamined. It iterates through each binding, comparing the pod’s service\naccount name and namespace with the subjects in the binding. If a match is\nfound, it reports true along with the role reference name; otherwise it\nreturns false.\n\nnolint:gocritic", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:647", + "calls": [ + { + "name": "Info", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) IsUsingClusterRoleBinding(clusterRoleBindings []rbacv1.ClusterRoleBinding,", + "\tlogger *log.Logger) (bool, string, error) {", + "\t// This function accepts a list of clusterRoleBindings and checks to see if the pod's service account is", + "\t// tied to any of them. If it is, then it returns true, otherwise it returns false.", + "\tlogger.Info(\"Pod %q is using service account %q\", p, p.Spec.ServiceAccountName)", + "", + "\t// Loop through the service accounts in the namespace, looking for a match between the pod serviceAccountName and", + "\t// the service account name. If there is a match, check to make sure that the SA is not a 'subject' of the cluster", + "\t// role bindings.", + "\tfor crbIndex := range clusterRoleBindings {", + "\t\tfor _, subject := range clusterRoleBindings[crbIndex].Subjects {", + "\t\t\tif subject.Kind == rbacv1.ServiceAccountKind \u0026\u0026", + "\t\t\t\tsubject.Name == p.Spec.ServiceAccountName \u0026\u0026 subject.Namespace == p.Namespace {", + "\t\t\t\tlogger.Error(\"Pod %q has service account %q that is tied to cluster role binding %q\", p.Name, p.Spec.ServiceAccountName, clusterRoleBindings[crbIndex].Name)", + "\t\t\t\treturn true, clusterRoleBindings[crbIndex].RoleRef.Name, nil", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn false, \"\", nil", + "}" + ] + }, + { + "name": "IsUsingSRIOV", + "qualifiedName": "Pod.IsUsingSRIOV", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool, error)", + "doc": "Pod.IsUsingSRIOV determines whether a pod has any SR‑IOV network interfaces\n\nThe method inspects the pod’s annotations for CNCF network names, retrieves\neach corresponding NetworkAttachmentDefinition, and checks if its CNI\nconfiguration type is \"sriov\". If at least one definition matches, it returns\ntrue; otherwise false. Errors from annotation parsing or API calls are\npropagated to the caller.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:497", + "calls": [ + { + "name": "getCNCFNetworksNamesFromPodAnnotation", + "kind": "function", + "source": [ + "func getCNCFNetworksNamesFromPodAnnotation(networksAnnotation string) []string {", + "\t// Each CNCF network has many more fields, but here we only need to unmarshal the name.", + "\t// See https://github.com/k8snetworkplumbingwg/multus-cni/blob/e692127d19623c8bdfc4d391224ea542658b584c/pkg/types/types.go#L127", + "\ttype CNCFNetwork struct {", + "\t\tName string `json:\"name\"`", + "\t}", + "", + "\tnetworkObjects := []CNCFNetwork{}", + "\tnetworkNames := []string{}", + "", + "\t// Let's start trying to unmarshal a json array of objects.", + "\t// We will not care about bad-formatted/invalid annotation value. If that's the case,", + "\t// the pod wouldn't have been deployed or wouldn't be in running state.", + "\tif err := json.Unmarshal([]byte(networksAnnotation), \u0026networkObjects); err == nil {", + "\t\tfor _, network := range networkObjects {", + "\t\t\tnetworkNames = append(networkNames, network.Name)", + "\t\t}", + "\t\treturn networkNames", + "\t}", + "", + "\t// If the previous unmarshalling didn't work, let's try with parsing the comma separated names list.", + "\tnetworks := strings.TrimSpace(networksAnnotation)", + "", + "\t// First, avoid empty strings (unlikely).", + "\tif networks == \"\" {", + "\t\treturn []string{}", + "\t}", + "", + "\tfor _, networkName := range strings.Split(networks, \",\") {", + "\t\tnetworkNames = append(networkNames, strings.TrimSpace(networkName))", + "\t}", + "\treturn networkNames", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Get", + "kind": "function" + }, + { + "name": "NetworkAttachmentDefinitions", + "kind": "function" + }, + { + "name": "K8sCniCncfIoV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "isNetworkAttachmentDefinitionConfigTypeSRIOV", + "kind": "function", + "source": [ + "func isNetworkAttachmentDefinitionConfigTypeSRIOV(nadConfig string) (bool, error) {", + "\tconst (", + "\t\ttypeSriov = \"sriov\"", + "\t)", + "", + "\ttype CNIConfig struct {", + "\t\tCniVersion string `json:\"cniVersion\"`", + "\t\tName string `json:\"name\"`", + "\t\tType *string `json:\"type,omitempty\"`", + "\t\tPlugins *[]struct {", + "\t\t\tType string `json:\"type\"`", + "\t\t} `json:\"plugins,omitempty\"`", + "\t}", + "", + "\tcniConfig := CNIConfig{}", + "\tif err := json.Unmarshal([]byte(nadConfig), \u0026cniConfig); err != nil {", + "\t\treturn false, fmt.Errorf(\"failed to unmarshal cni config %s: %v\", nadConfig, err)", + "\t}", + "", + "\t// If type is found, it's a single plugin CNI config.", + "\tif cniConfig.Type != nil {", + "\t\tlog.Debug(\"Single plugin config type found: %+v, type=%s\", cniConfig, *cniConfig.Type)", + "\t\treturn *cniConfig.Type == typeSriov, nil", + "\t}", + "", + "\tif cniConfig.Plugins == nil {", + "\t\treturn false, fmt.Errorf(\"invalid multi-plugins cni config: %s\", nadConfig)", + "\t}", + "", + "\tlog.Debug(\"CNI plugins: %+v\", *cniConfig.Plugins)", + "\tfor i := range *cniConfig.Plugins {", + "\t\tplugin := (*cniConfig.Plugins)[i]", + "\t\tif plugin.Type == typeSriov {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\t// No sriov plugin type found.", + "\treturn false, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetPodsUsingSRIOV", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetPodsUsingSRIOV() ([]*Pod, error) {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tusesSRIOV, err := p.IsUsingSRIOV()", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to check sriov usage for pod %s: %v\", p, err)", + "\t\t}", + "", + "\t\tif usesSRIOV {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) IsUsingSRIOV() (bool, error) {", + "\tconst (", + "\t\tcncfNetworksAnnotation = \"k8s.v1.cni.cncf.io/networks\"", + "\t)", + "", + "\tcncfNetworks, exist := p.Annotations[cncfNetworksAnnotation]", + "\tif !exist {", + "\t\treturn false, nil", + "\t}", + "", + "\t// Get all CNCF network names", + "\tcncfNetworkNames := getCNCFNetworksNamesFromPodAnnotation(cncfNetworks)", + "", + "\t// For each CNCF network, get its network attachment definition and check", + "\t// whether its config's type is \"sriov\"", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tfor _, networkName := range cncfNetworkNames {", + "\t\tlog.Debug(\"%s: Reviewing network-attachment definition %q\", p, networkName)", + "\t\tnad, err := oc.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(p.Namespace).Get(context.TODO(), networkName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to get NetworkAttachment %s: %v\", networkName, err)", + "\t\t}", + "", + "\t\tisSRIOV, err := isNetworkAttachmentDefinitionConfigTypeSRIOV(nad.Spec.Config)", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to know if network-attachment %s is sriov: %v\", networkName, err)", + "\t\t}", + "", + "\t\tlog.Debug(\"%s: NAD config: %s\", p, nad.Spec.Config)", + "\t\tif isSRIOV {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\treturn false, nil", + "}" + ] + }, + { + "name": "IsUsingSRIOVWithMTU", + "qualifiedName": "Pod.IsUsingSRIOVWithMTU", + "exported": true, + "receiver": "Pod", + "signature": "func()(bool, error)", + "doc": "Pod.IsUsingSRIOVWithMTU determines if the pod has any SR-IOV interface configured with an MTU\n\nThe method inspects the pod's annotations to find declared CNCF networks,\nthen retrieves each corresponding NetworkAttachmentDefinition. For every\nnetwork it checks whether a SriovNetwork and matching SriovNetworkNodePolicy\nexist that specify an MTU value; if so it returns true. If no such\nconfiguration is found, it returns false without error.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:542", + "calls": [ + { + "name": "getCNCFNetworksNamesFromPodAnnotation", + "kind": "function", + "source": [ + "func getCNCFNetworksNamesFromPodAnnotation(networksAnnotation string) []string {", + "\t// Each CNCF network has many more fields, but here we only need to unmarshal the name.", + "\t// See https://github.com/k8snetworkplumbingwg/multus-cni/blob/e692127d19623c8bdfc4d391224ea542658b584c/pkg/types/types.go#L127", + "\ttype CNCFNetwork struct {", + "\t\tName string `json:\"name\"`", + "\t}", + "", + "\tnetworkObjects := []CNCFNetwork{}", + "\tnetworkNames := []string{}", + "", + "\t// Let's start trying to unmarshal a json array of objects.", + "\t// We will not care about bad-formatted/invalid annotation value. If that's the case,", + "\t// the pod wouldn't have been deployed or wouldn't be in running state.", + "\tif err := json.Unmarshal([]byte(networksAnnotation), \u0026networkObjects); err == nil {", + "\t\tfor _, network := range networkObjects {", + "\t\t\tnetworkNames = append(networkNames, network.Name)", + "\t\t}", + "\t\treturn networkNames", + "\t}", + "", + "\t// If the previous unmarshalling didn't work, let's try with parsing the comma separated names list.", + "\tnetworks := strings.TrimSpace(networksAnnotation)", + "", + "\t// First, avoid empty strings (unlikely).", + "\tif networks == \"\" {", + "\t\treturn []string{}", + "\t}", + "", + "\tfor _, networkName := range strings.Split(networks, \",\") {", + "\t\tnetworkNames = append(networkNames, strings.TrimSpace(networkName))", + "\t}", + "\treturn networkNames", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Get", + "kind": "function" + }, + { + "name": "NetworkAttachmentDefinitions", + "kind": "function" + }, + { + "name": "K8sCniCncfIoV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "sriovNetworkUsesMTU", + "kind": "function", + "source": [ + "func sriovNetworkUsesMTU(sriovNetworks, sriovNetworkNodePolicies []unstructured.Unstructured, nadName string) bool {", + "\tfor _, sriovNetwork := range sriovNetworks {", + "\t\tnetworkName := sriovNetwork.GetName()", + "\t\tlog.Debug(\"Checking SriovNetwork %s\", networkName)", + "\t\tif networkName == nadName {", + "\t\t\tlog.Debug(\"SriovNetwork %s found to match the NAD name %s\", networkName, nadName)", + "", + "\t\t\t// Get the ResourceName from the SriovNetwork spec", + "\t\t\tspec, found, err := unstructured.NestedMap(sriovNetwork.Object, \"spec\")", + "\t\t\tif !found || err != nil {", + "\t\t\t\tlog.Debug(\"Failed to get spec from SriovNetwork %s: %v\", networkName, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tresourceName, found, err := unstructured.NestedString(spec, \"resourceName\")", + "\t\t\tif !found || err != nil {", + "\t\t\t\tlog.Debug(\"Failed to get resourceName from SriovNetwork %s: %v\", networkName, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tfor _, nodePolicy := range sriovNetworkNodePolicies {", + "\t\t\t\tpolicyNamespace := nodePolicy.GetNamespace()", + "\t\t\t\tnetworkNamespace := sriovNetwork.GetNamespace()", + "", + "\t\t\t\tlog.Debug(\"Checking SriovNetworkNodePolicy in namespace %s\", policyNamespace)", + "\t\t\t\tif policyNamespace == networkNamespace {", + "\t\t\t\t\t// Get the ResourceName and MTU from the SriovNetworkNodePolicy spec", + "\t\t\t\t\tpolicySpec, found, err := unstructured.NestedMap(nodePolicy.Object, \"spec\")", + "\t\t\t\t\tif !found || err != nil {", + "\t\t\t\t\t\tlog.Debug(\"Failed to get spec from SriovNetworkNodePolicy: %v\", err)", + "\t\t\t\t\t\tcontinue", + "\t\t\t\t\t}", + "", + "\t\t\t\t\tpolicyResourceName, found, err := unstructured.NestedString(policySpec, \"resourceName\")", + "\t\t\t\t\tif !found || err != nil {", + "\t\t\t\t\t\tlog.Debug(\"Failed to get resourceName from SriovNetworkNodePolicy: %v\", err)", + "\t\t\t\t\t\tcontinue", + "\t\t\t\t\t}", + "", + "\t\t\t\t\tif policyResourceName == resourceName {", + "\t\t\t\t\t\tmtu, found, err := unstructured.NestedInt64(policySpec, \"mtu\")", + "\t\t\t\t\t\tif found \u0026\u0026 err == nil \u0026\u0026 mtu \u003e 0 {", + "\t\t\t\t\t\t\treturn true", + "\t\t\t\t\t\t}", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) IsUsingSRIOVWithMTU() (bool, error) {", + "\tconst (", + "\t\tcncfNetworksAnnotation = \"k8s.v1.cni.cncf.io/networks\"", + "\t)", + "", + "\tcncfNetworks, exist := p.Annotations[cncfNetworksAnnotation]", + "\tif !exist {", + "\t\treturn false, nil", + "\t}", + "", + "\t// Get all CNCF network names", + "\tcncfNetworkNames := getCNCFNetworksNamesFromPodAnnotation(cncfNetworks)", + "", + "\t// For each CNCF network, get its network attachment definition and check", + "\t// whether its config's type is \"sriov\"", + "", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, networkName := range cncfNetworkNames {", + "\t\tlog.Debug(\"%s: Reviewing network-attachment definition %q\", p, networkName)", + "\t\tnad, err := oc.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(", + "\t\t\tp.Namespace).Get(context.TODO(), networkName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to get NetworkAttachment %s: %v\", networkName, err)", + "\t\t}", + "", + "\t\t// If the network-status annotation is not set, let's check the SriovNetwork/SriovNetworkNodePolicy CRs", + "\t\t// to see if the MTU is set there.", + "\t\tlog.Debug(\"Number of SriovNetworks: %d\", len(env.AllSriovNetworks))", + "\t\tlog.Debug(\"Number of SriovNetworkNodePolicies: %d\", len(env.AllSriovNetworkNodePolicies))", + "\t\tif sriovNetworkUsesMTU(env.AllSriovNetworks, env.AllSriovNetworkNodePolicies, nad.Name) {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\treturn false, nil", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "Pod.String", + "exported": true, + "receiver": "Pod", + "signature": "func()(string)", + "doc": "Pod.String Formats pod name and namespace into a readable string\n\nThis method constructs a human‑readable representation of a Pod by\ncombining its name and namespace. It uses formatting to produce the pattern\n\"pod: \u003cname\u003e ns: \u003cnamespace\u003e\", which is helpful for logging or debugging\noutput throughout the provider package.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:172", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "AreCPUResourcesWholeUnits", + "kind": "function", + "source": [ + "func AreCPUResourcesWholeUnits(p *Pod) bool {", + "\tisInteger := func(val int64) bool {", + "\t\treturn val%1000 == 0", + "\t}", + "", + "\t// Pods may contain more than one container. All containers must conform to the CPU isolation requirements.", + "\tfor _, cut := range p.Containers {", + "\t\t// Resources must be specified", + "\t\tcpuRequestsMillis := cut.Resources.Requests.Cpu().MilliValue()", + "\t\tcpuLimitsMillis := cut.Resources.Limits.Cpu().MilliValue()", + "", + "\t\tif cpuRequestsMillis == 0 || cpuLimitsMillis == 0 {", + "\t\t\tlog.Debug(\"%s has been found with undefined requests or limits.\", cut.String())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\tif !isInteger(cpuRequestsMillis) {", + "\t\t\tlog.Debug(\"%s has CPU requests %d (milli) that has to be a whole unit.\", cut.String(), cpuRequestsMillis)", + "\t\t\treturn false", + "\t\t}", + "\t\tif !isInteger(cpuLimitsMillis) {", + "\t\t\tlog.Debug(\"%s has CPU limits %d (milli) that has to be a whole unit.\", cut.String(), cpuLimitsMillis)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "AreResourcesIdentical", + "kind": "function", + "source": [ + "func AreResourcesIdentical(p *Pod) bool {", + "\t// Pods may contain more than one container. All containers must conform to the CPU isolation requirements.", + "\tfor _, cut := range p.Containers {", + "\t\t// At least limits must be specified (requests default to limits if not specified)", + "\t\tif len(cut.Resources.Limits) == 0 {", + "\t\t\tlog.Debug(\"%s has been found with undefined limits.\", cut.String())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Gather the values", + "\t\tcpuRequests := cut.Resources.Requests.Cpu()", + "\t\tcpuLimits := cut.Resources.Limits.Cpu()", + "\t\tmemoryRequests := cut.Resources.Requests.Memory()", + "\t\tmemoryLimits := cut.Resources.Limits.Memory()", + "", + "\t\t// Check for mismatches", + "\t\tif !cpuRequests.Equal(*cpuLimits) {", + "\t\t\tlog.Debug(\"%s has CPU requests %f and limits %f that do not match.\", cut.String(), cpuRequests.AsApproximateFloat64(), cpuLimits.AsApproximateFloat64())", + "\t\t\treturn false", + "\t\t}", + "", + "\t\tif !memoryRequests.Equal(*memoryLimits) {", + "\t\t\tlog.Debug(\"%s has memory requests %f and limits %f that do not match.\", cut.String(), memoryRequests.AsApproximateFloat64(), memoryLimits.AsApproximateFloat64())", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Container.SetPreflightResults", + "kind": "function", + "source": [ + "func (c *Container) SetPreflightResults(preflightImageCache map[string]PreflightResultsDB, env *TestEnvironment) error {", + "\tlog.Info(\"Running Preflight container test for container %q with image %q\", c, c.Image)", + "", + "\t// Short circuit if the image already exists in the cache", + "\tif _, exists := preflightImageCache[c.Image]; exists {", + "\t\tlog.Info(\"Container image %q exists in the cache. Skipping this run.\", c.Image)", + "\t\tc.PreflightResults = preflightImageCache[c.Image]", + "\t\treturn nil", + "\t}", + "", + "\topts := []plibContainer.Option{}", + "\topts = append(opts, plibContainer.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibContainer.WithInsecureConnection())", + "\t}", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibContainer.NewCheck(c.Image, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "", + "\t\tresults.TestedImage = c.Image", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the Preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\t// Store the Preflight test results into the container's PreflightResults var and into the cache.", + "\tresultsDB := GetPreflightResultsDB(\u0026results)", + "\tc.PreflightResults = resultsDB", + "\tpreflightImageCache[c.Image] = resultsDB", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Operator.SetPreflightResults", + "kind": "function", + "source": [ + "func (op *Operator) SetPreflightResults(env *TestEnvironment) error {", + "\tif len(op.InstallPlans) == 0 {", + "\t\tlog.Warn(\"Operator %q has no InstallPlans. Skipping setting Preflight results\", op)", + "\t\treturn nil", + "\t}", + "", + "\tbundleImage := op.InstallPlans[0].BundleImage", + "\tindexImage := op.InstallPlans[0].IndexImage", + "\toc := clientsholder.GetClientsHolder()", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\topts := []plibOperator.Option{}", + "\topts = append(opts, plibOperator.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibOperator.WithInsecureConnection())", + "\t}", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibOperator.NewCheck(bundleImage, indexImage, oc.KubeConfig, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\te := os.RemoveAll(\"artifacts/\")", + "\tif e != nil {", + "\t\tlog.Fatal(\"%v\", e)", + "\t}", + "", + "\tlog.Info(\"Storing operator Preflight results into object for %q\", bundleImage)", + "\top.PreflightResults = GetPreflightResultsDB(\u0026results)", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.CheckResourceHugePagesSize", + "kind": "function", + "source": [ + "func (p *Pod) CheckResourceHugePagesSize(size string) bool {", + "\tfor _, cut := range p.Containers {", + "\t\t// Resources must be specified", + "\t\tif len(cut.Resources.Requests) == 0 || len(cut.Resources.Limits) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor name := range cut.Resources.Requests {", + "\t\t\tif strings.Contains(name.String(), hugePages) \u0026\u0026 name.String() != size {", + "\t\t\t\treturn false", + "\t\t\t}", + "\t\t}", + "\t\tfor name := range cut.Resources.Limits {", + "\t\t\tif strings.Contains(name.String(), hugePages) \u0026\u0026 name.String() != size {", + "\t\t\t\treturn false", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.HasHugepages", + "kind": "function", + "source": [ + "func (p *Pod) HasHugepages() bool {", + "\tfor _, cut := range p.Containers {", + "\t\tfor name := range cut.Resources.Requests {", + "\t\t\tif strings.Contains(name.String(), hugePages) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t\tfor _, name := range cut.Resources.Limits {", + "\t\t\tif strings.Contains(name.String(), hugePages) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsAffinityCompliant", + "kind": "function", + "source": [ + "func (p *Pod) IsAffinityCompliant() (bool, error) {", + "\tif p.Spec.Affinity == nil {", + "\t\treturn false, fmt.Errorf(\"%s has been found with an AffinityRequired flag but is missing corresponding affinity rules\", p.String())", + "\t}", + "\tif p.Spec.Affinity.PodAntiAffinity != nil {", + "\t\treturn false, fmt.Errorf(\"%s has been found with an AffinityRequired flag but has anti-affinity rules\", p.String())", + "\t}", + "\tif p.Spec.Affinity.PodAffinity == nil \u0026\u0026 p.Spec.Affinity.NodeAffinity == nil {", + "\t\treturn false, fmt.Errorf(\"%s has been found with an AffinityRequired flag but is missing corresponding pod/node affinity rules\", p.String())", + "\t}", + "\treturn true, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createOperators", + "kind": "function", + "source": [ + "func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion,", + "\tallSubscriptions []olmv1Alpha.Subscription,", + "\tallPackageManifests []*olmpkgv1.PackageManifest,", + "\tallInstallPlans []*olmv1Alpha.InstallPlan,", + "\tallCatalogSources []*olmv1Alpha.CatalogSource,", + "\tsucceededRequired,", + "\tkeepCsvDetails bool) []*Operator {", + "\tconst (", + "\t\tmaxSize = 2", + "\t)", + "", + "\toperators := []*Operator{}", + "", + "\t// Make map with unique csv names to original index in the env.Csvs slice.", + "\t// Otherwise, cluster-wide operators info will be repeated unnecessarily.", + "\tuniqueCsvs := getUniqueCsvListByName(csvs)", + "", + "\tfor _, csv := range uniqueCsvs {", + "\t\t// Skip CSVs that are not in the Succeeded phase if the flag is set.", + "\t\tif csv.Status.Phase != olmv1Alpha.CSVPhaseSucceeded \u0026\u0026 succeededRequired {", + "\t\t\tcontinue", + "\t\t}", + "\t\top := \u0026Operator{Name: csv.Name, Namespace: csv.Namespace}", + "\t\tif keepCsvDetails {", + "\t\t\top.Csv = csv", + "\t\t}", + "\t\top.Phase = csv.Status.Phase", + "\t\tpackageAndVersion := strings.SplitN(csv.Name, \".\", maxSize)", + "\t\tif len(packageAndVersion) == 0 {", + "\t\t\tlog.Debug(\"Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v\", csv)", + "\t\t\tcontinue", + "\t\t}", + "\t\top.PackageFromCsvName = packageAndVersion[0]", + "\t\top.Version = csv.Spec.Version.String()", + "\t\t// Get at least one subscription and update the Operator object with it.", + "\t\tif getAtLeastOneSubscription(op, csv, allSubscriptions, allPackageManifests) {", + "\t\t\ttargetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to get target namespaces for operator %s: %v\", csv.Name, err)", + "\t\t\t} else {", + "\t\t\t\top.TargetNamespaces = targetNamespaces", + "\t\t\t\top.IsClusterWide = len(targetNamespaces) == 0", + "\t\t\t}", + "\t\t} else {", + "\t\t\tlog.Warn(\"Subscription not found for CSV: %s (ns %s)\", csv.Name, csv.Namespace)", + "\t\t}", + "\t\tlog.Info(\"Getting installplans for op %s (subs %s ns %s)\", op.Name, op.SubscriptionName, op.SubscriptionNamespace)", + "\t\t// Get at least one Install Plan and update the Operator object with it.", + "\t\tgetAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources)", + "\t\toperators = append(operators, op)", + "\t}", + "\treturn operators", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "filterDPDKRunningPods", + "kind": "function", + "source": [ + "func filterDPDKRunningPods(pods []*Pod) []*Pod {", + "\tvar filteredPods []*Pod", + "\tconst (", + "\t\tdpdkDriver = \"vfio-pci\"", + "\t\tfindDeviceSubCommand = \"find /sys -name\"", + "\t)", + "\to := clientsholder.GetClientsHolder()", + "\tfor _, pod := range pods {", + "\t\tif len(pod.MultusPCIs) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tctx := clientsholder.NewContext(pod.Namespace, pod.Name, pod.Spec.Containers[0].Name)", + "\t\tfindCommand := fmt.Sprintf(\"%s '%s'\", findDeviceSubCommand, pod.MultusPCIs[0])", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, findCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tlog.Error(\"Failed to execute command %s in probe %s, errStr: %s, err: %v\", findCommand, pod.String(), errStr, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif strings.Contains(outStr, dpdkDriver) {", + "\t\t\tfilteredPods = append(filteredPods, pod)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getCatalogSourceBundleCountFromProbeContainer", + "kind": "function", + "source": [ + "func getCatalogSourceBundleCountFromProbeContainer(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// We need to use the probe container to get the bundle count", + "\t// This is because the package manifests are not available in the cluster", + "\t// for OCP versions \u003c= 4.12", + "\to := clientsholder.GetClientsHolder()", + "", + "\t// Find the kubernetes service associated with the catalog source", + "\tfor _, svc := range env.AllServices {", + "\t\t// Skip if the service is not associated with the catalog source", + "\t\tif svc.Spec.Selector[\"olm.catalogSource\"] != cs.Name {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tlog.Info(\"Found service %q associated with catalog source %q.\", svc.Name, cs.Name)", + "", + "\t\t// Use a probe pod to get the bundle count", + "\t\tfor _, probePod := range env.ProbePods {", + "\t\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\t\tcmd := \"grpcurl -plaintext \" + svc.Spec.ClusterIP + \":50051 api.Registry.ListBundles | jq -s 'length'\"", + "\t\t\tcmdValue, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\t\t\tif err != nil || errStr != \"\" {", + "\t\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cmd, probePod.String())", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Sanitize the command output", + "\t\t\tcmdValue = strings.TrimSpace(cmdValue)", + "\t\t\tcmdValue = strings.Trim(cmdValue, \"\\\"\")", + "", + "\t\t\t// Parse the command output", + "\t\t\tbundleCount, err := strconv.Atoi(cmdValue)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to convert bundle count to integer: %s\", cmdValue)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Try each probe pod until we get a valid bundle count (which should only be 1 probe pod)", + "\t\t\tlog.Info(\"Found bundle count via grpcurl %d for catalog source %q.\", bundleCount, cs.Name)", + "\t\t\treturn bundleCount", + "\t\t}", + "\t}", + "", + "\tlog.Warn(\"Warning: No services found associated with catalog source %q.\", cs.Name)", + "\treturn -1", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "IsStatefulSetReady", + "qualifiedName": "StatefulSet.IsStatefulSetReady", + "exported": true, + "receiver": "StatefulSet", + "signature": "func()(bool)", + "doc": "StatefulSet.IsStatefulSetReady Checks if all replicas of a StatefulSet are fully operational\n\nThe method compares the desired number of replicas, which defaults to one if\nunspecified, against the current status fields: ready, current, and updated\nreplicas. If any of these counts differ from the target, it returns false;\notherwise, true indicates the StatefulSet is considered ready.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/statefulsets.go:43", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (ss *StatefulSet) IsStatefulSetReady() bool {", + "\tvar replicas int32", + "\tif ss.Spec.Replicas != nil {", + "\t\treplicas = *(ss.Spec.Replicas)", + "\t} else {", + "\t\treplicas = 1", + "\t}", + "\tif ss.Status.ReadyReplicas != replicas ||", + "\t\tss.Status.CurrentReplicas != replicas ||", + "\t\tss.Status.UpdatedReplicas != replicas {", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "ToString", + "qualifiedName": "StatefulSet.ToString", + "exported": true, + "receiver": "StatefulSet", + "signature": "func()(string)", + "doc": "StatefulSet.ToString Formats a StatefulSet name and namespace into a string\n\nThe method builds a concise representation of the StatefulSet by combining\nits name and namespace. It uses formatting utilities to return a single\nstring that identifies the resource in a human‑readable form.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/statefulsets.go:63", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (ss *StatefulSet) ToString() string {", + "\treturn fmt.Sprintf(\"statefulset: %s ns: %s\",", + "\t\tss.Name,", + "\t\tss.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "GetAffinityRequiredPods", + "qualifiedName": "TestEnvironment.GetAffinityRequiredPods", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Pod)", + "doc": "TestEnvironment.GetAffinityRequiredPods Retrieves pods that require affinity\n\nThis method scans the test environment's collection of pod objects and\nselects those that have an affinity requirement flag set in their labels. It\nreturns a slice containing only the matching pods, enabling callers to focus\non affinity-dependent resources.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:116", + "calls": [ + { + "name": "Pod.AffinityRequired", + "kind": "function", + "source": [ + "func (p *Pod) AffinityRequired() bool {", + "\tif val, ok := p.Labels[AffinityRequiredKey]; ok {", + "\t\tresult, err := strconv.ParseBool(val)", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"failure to parse bool %v\", val)", + "\t\t\treturn false", + "\t\t}", + "\t\treturn result", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetAffinityRequiredPods() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.AffinityRequired() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "name": "GetBaremetalNodes", + "qualifiedName": "TestEnvironment.GetBaremetalNodes", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]Node)", + "doc": "TestEnvironment.GetBaremetalNodes Retrieves nodes that use a bare‑metal provider\n\nIt iterates over the environment’s node list, selecting those whose\nProviderID begins with \"baremetalhost://\". Matching nodes are collected into\na slice which is returned. The function returns only the filtered set of\nbare‑metal nodes.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:899", + "calls": [ + { + "pkgPath": "strings", + "name": "HasPrefix", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetBaremetalNodes() []Node {", + "\tvar baremetalNodes []Node", + "\tfor _, node := range env.Nodes {", + "\t\tif strings.HasPrefix(node.Data.Spec.ProviderID, \"baremetalhost://\") {", + "\t\t\tbaremetalNodes = append(baremetalNodes, node)", + "\t\t}", + "\t}", + "\treturn baremetalNodes", + "}" + ] + }, + { + "name": "GetCPUPinningPodsWithDpdk", + "qualifiedName": "TestEnvironment.GetCPUPinningPodsWithDpdk", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Pod)", + "doc": "TestEnvironment.GetCPUPinningPodsWithDpdk Lists guaranteed pods that pin CPUs with DPDK\n\nThis method retrieves all pods in the test environment that are guaranteed to\nhave exclusive CPU resources and then filters them to include only those\nrunning DPDK drivers. It calls a helper function that checks each pod’s\ncontainer for DPDK device presence via a system command. The resulting slice\ncontains pointers to pods meeting both criteria, suitable for further\nvalidation or manipulation.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:150", + "calls": [ + { + "name": "filterDPDKRunningPods", + "kind": "function", + "source": [ + "func filterDPDKRunningPods(pods []*Pod) []*Pod {", + "\tvar filteredPods []*Pod", + "\tconst (", + "\t\tdpdkDriver = \"vfio-pci\"", + "\t\tfindDeviceSubCommand = \"find /sys -name\"", + "\t)", + "\to := clientsholder.GetClientsHolder()", + "\tfor _, pod := range pods {", + "\t\tif len(pod.MultusPCIs) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tctx := clientsholder.NewContext(pod.Namespace, pod.Name, pod.Spec.Containers[0].Name)", + "\t\tfindCommand := fmt.Sprintf(\"%s '%s'\", findDeviceSubCommand, pod.MultusPCIs[0])", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, findCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tlog.Error(\"Failed to execute command %s in probe %s, errStr: %s, err: %v\", findCommand, pod.String(), errStr, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif strings.Contains(outStr, dpdkDriver) {", + "\t\t\tfilteredPods = append(filteredPods, pod)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "name": "TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodsWithExclusiveCPUs() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsPodGuaranteedWithExclusiveCPUs() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetCPUPinningPodsWithDpdk() []*Pod {", + "\treturn filterDPDKRunningPods(env.GetGuaranteedPodsWithExclusiveCPUs())", + "}" + ] + }, + { + "name": "GetDockerConfigFile", + "qualifiedName": "TestEnvironment.GetDockerConfigFile", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()(string)", + "doc": "TestEnvironment.GetDockerConfigFile Retrieves the path to the Docker configuration file\n\nThis method accesses the TestEnvironment's parameters to return the location\nof the Docker config used by Preflight checks. It returns a string\nrepresenting the file path, which is then supplied to container and operator\npreflight options for authentication. The function performs no additional\nlogic beyond fetching the stored value.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:761", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Container.SetPreflightResults", + "kind": "function", + "source": [ + "func (c *Container) SetPreflightResults(preflightImageCache map[string]PreflightResultsDB, env *TestEnvironment) error {", + "\tlog.Info(\"Running Preflight container test for container %q with image %q\", c, c.Image)", + "", + "\t// Short circuit if the image already exists in the cache", + "\tif _, exists := preflightImageCache[c.Image]; exists {", + "\t\tlog.Info(\"Container image %q exists in the cache. Skipping this run.\", c.Image)", + "\t\tc.PreflightResults = preflightImageCache[c.Image]", + "\t\treturn nil", + "\t}", + "", + "\topts := []plibContainer.Option{}", + "\topts = append(opts, plibContainer.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibContainer.WithInsecureConnection())", + "\t}", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibContainer.NewCheck(c.Image, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "", + "\t\tresults.TestedImage = c.Image", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the Preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\t// Store the Preflight test results into the container's PreflightResults var and into the cache.", + "\tresultsDB := GetPreflightResultsDB(\u0026results)", + "\tc.PreflightResults = resultsDB", + "\tpreflightImageCache[c.Image] = resultsDB", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Operator.SetPreflightResults", + "kind": "function", + "source": [ + "func (op *Operator) SetPreflightResults(env *TestEnvironment) error {", + "\tif len(op.InstallPlans) == 0 {", + "\t\tlog.Warn(\"Operator %q has no InstallPlans. Skipping setting Preflight results\", op)", + "\t\treturn nil", + "\t}", + "", + "\tbundleImage := op.InstallPlans[0].BundleImage", + "\tindexImage := op.InstallPlans[0].IndexImage", + "\toc := clientsholder.GetClientsHolder()", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\topts := []plibOperator.Option{}", + "\topts = append(opts, plibOperator.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibOperator.WithInsecureConnection())", + "\t}", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibOperator.NewCheck(bundleImage, indexImage, oc.KubeConfig, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\te := os.RemoveAll(\"artifacts/\")", + "\tif e != nil {", + "\t\tlog.Fatal(\"%v\", e)", + "\t}", + "", + "\tlog.Info(\"Storing operator Preflight results into object for %q\", bundleImage)", + "\top.PreflightResults = GetPreflightResultsDB(\u0026results)", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetDockerConfigFile() string {", + "\treturn env.params.PfltDockerconfig", + "}" + ] + }, + { + "name": "GetGuaranteedPodContainersWithExclusiveCPUs", + "qualifiedName": "TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUs", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Container)", + "doc": "TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUs Retrieves containers with guaranteed exclusive CPUs\n\nThis method returns a slice of container objects that belong to pods which\nhave been marked as guaranteed to use exclusive CPUs. It gathers the relevant\npods via GetGuaranteedPodsWithExclusiveCPUs and then collects their\ncontainers into a single list for further processing or inspection.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:263", + "calls": [ + { + "name": "getContainers", + "kind": "function", + "source": [ + "func getContainers(pods []*Pod) []*Container {", + "\tvar containers []*Container", + "", + "\tfor _, pod := range pods {", + "\t\tcontainers = append(containers, pod.Containers...)", + "\t}", + "\treturn containers", + "}" + ] + }, + { + "name": "TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodsWithExclusiveCPUs() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsPodGuaranteedWithExclusiveCPUs() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUs() []*Container {", + "\treturn getContainers(env.GetGuaranteedPodsWithExclusiveCPUs())", + "}" + ] + }, + { + "name": "GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID", + "qualifiedName": "TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Container)", + "doc": "TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID Retrieves containers from guaranteed pods that use exclusive CPUs but do not enable host PID\n\nIt first selects all pods in the test environment marked as guaranteed with\nexclusive CPUs, then filters out any pod where HostPID is enabled. Finally it\ncollects and returns every container belonging to the remaining pods.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:283", + "calls": [ + { + "name": "getContainers", + "kind": "function", + "source": [ + "func getContainers(pods []*Pod) []*Container {", + "\tvar containers []*Container", + "", + "\tfor _, pod := range pods {", + "\t\tcontainers = append(containers, pod.Containers...)", + "\t}", + "\treturn containers", + "}" + ] + }, + { + "name": "filterPodsWithoutHostPID", + "kind": "function", + "source": [ + "func filterPodsWithoutHostPID(pods []*Pod) []*Pod {", + "\tvar withoutHostPIDPods []*Pod", + "", + "\tfor _, pod := range pods {", + "\t\tif pod.Spec.HostPID {", + "\t\t\tcontinue", + "\t\t}", + "\t\twithoutHostPIDPods = append(withoutHostPIDPods, pod)", + "\t}", + "\treturn withoutHostPIDPods", + "}" + ] + }, + { + "name": "TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodsWithExclusiveCPUs() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsPodGuaranteedWithExclusiveCPUs() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID() []*Container {", + "\treturn getContainers(filterPodsWithoutHostPID(env.GetGuaranteedPodsWithExclusiveCPUs()))", + "}" + ] + }, + { + "name": "GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID", + "qualifiedName": "TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Container)", + "doc": "TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID returns containers from guaranteed pods with isolated CPUs that do not use HostPID\n\nIt first collects all pods in the environment that are guaranteed to have\nexclusive CPU allocation and comply with CPU isolation rules. Then it filters\nout any pod where the HostPID flag is enabled, ensuring only non-HostPID pods\nremain. Finally, it aggregates and returns a slice of containers from those\nremaining pods.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:294", + "calls": [ + { + "name": "getContainers", + "kind": "function", + "source": [ + "func getContainers(pods []*Pod) []*Container {", + "\tvar containers []*Container", + "", + "\tfor _, pod := range pods {", + "\t\tcontainers = append(containers, pod.Containers...)", + "\t}", + "\treturn containers", + "}" + ] + }, + { + "name": "filterPodsWithoutHostPID", + "kind": "function", + "source": [ + "func filterPodsWithoutHostPID(pods []*Pod) []*Pod {", + "\tvar withoutHostPIDPods []*Pod", + "", + "\tfor _, pod := range pods {", + "\t\tif pod.Spec.HostPID {", + "\t\t\tcontinue", + "\t\t}", + "\t\twithoutHostPIDPods = append(withoutHostPIDPods, pod)", + "\t}", + "\treturn withoutHostPIDPods", + "}" + ] + }, + { + "name": "TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodsWithIsolatedCPUs() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsPodGuaranteedWithExclusiveCPUs() \u0026\u0026 p.IsCPUIsolationCompliant() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID() []*Container {", + "\treturn getContainers(filterPodsWithoutHostPID(env.GetGuaranteedPodsWithIsolatedCPUs()))", + "}" + ] + }, + { + "name": "GetGuaranteedPods", + "qualifiedName": "TestEnvironment.GetGuaranteedPods", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Pod)", + "doc": "TestEnvironment.GetGuaranteedPods Retrieves all pods that satisfy the guaranteed condition\n\nThis method scans every pod in the test environment, checks each one with its\nown guarantee logic, and collects those that pass into a slice. The resulting\nslice contains only the pods deemed guaranteed, which are then returned to\nthe caller.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:68", + "calls": [ + { + "name": "Pod.IsPodGuaranteed", + "kind": "function", + "source": [ + "func (p *Pod) IsPodGuaranteed() bool {", + "\treturn AreResourcesIdentical(p)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetGuaranteedPods() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsPodGuaranteed() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "name": "GetGuaranteedPodsWithExclusiveCPUs", + "qualifiedName": "TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Pod)", + "doc": "TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs Retrieves pods that have guaranteed exclusive CPU allocation\n\nThe method examines each pod in the test environment, applying a check to\ndetermine if the pod is guaranteed with exclusive CPUs. Pods passing this\ncheck are collected into a slice and returned. This list can be used by other\nfunctions to identify containers or pods suitable for CPU‑pinning\nscenarios.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:34", + "calls": [ + { + "name": "Pod.IsPodGuaranteedWithExclusiveCPUs", + "kind": "function", + "source": [ + "func (p *Pod) IsPodGuaranteedWithExclusiveCPUs() bool {", + "\treturn AreCPUResourcesWholeUnits(p) \u0026\u0026 AreResourcesIdentical(p)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetCPUPinningPodsWithDpdk", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetCPUPinningPodsWithDpdk() []*Pod {", + "\treturn filterDPDKRunningPods(env.GetGuaranteedPodsWithExclusiveCPUs())", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUs", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUs() []*Container {", + "\treturn getContainers(env.GetGuaranteedPodsWithExclusiveCPUs())", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID() []*Container {", + "\treturn getContainers(filterPodsWithoutHostPID(env.GetGuaranteedPodsWithExclusiveCPUs()))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodsWithExclusiveCPUs() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsPodGuaranteedWithExclusiveCPUs() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "name": "GetGuaranteedPodsWithIsolatedCPUs", + "qualifiedName": "TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Pod)", + "doc": "TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs Retrieves pods that are guaranteed to have isolated CPUs\n\nThis method scans all pods in the test environment, selecting only those\nwhose CPU requests match whole units and whose resources are identical across\ncontainers. It further checks that each pod meets CPU isolation compliance\ncriteria, such as having appropriate annotations and a specified runtime\nclass name. The resulting slice of pods is returned for use by other\nfiltering functions.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:52", + "calls": [ + { + "name": "Pod.IsPodGuaranteedWithExclusiveCPUs", + "kind": "function", + "source": [ + "func (p *Pod) IsPodGuaranteedWithExclusiveCPUs() bool {", + "\treturn AreCPUResourcesWholeUnits(p) \u0026\u0026 AreResourcesIdentical(p)", + "}" + ] + }, + { + "name": "Pod.IsCPUIsolationCompliant", + "kind": "function", + "source": [ + "func (p *Pod) IsCPUIsolationCompliant() bool {", + "\tisCPUIsolated := true", + "", + "\tif !LoadBalancingDisabled(p) {", + "\t\tlog.Debug(\"Pod %q has been found to not have annotations set correctly for CPU isolation.\", p)", + "\t\tisCPUIsolated = false", + "\t}", + "", + "\tif !p.IsRuntimeClassNameSpecified() {", + "\t\tlog.Debug(\"Pod %q has been found to not have runtimeClassName specified.\", p)", + "\t\tisCPUIsolated = false", + "\t}", + "", + "\treturn isCPUIsolated", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID() []*Container {", + "\treturn getContainers(filterPodsWithoutHostPID(env.GetGuaranteedPodsWithIsolatedCPUs()))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodsWithIsolatedCPUs() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsPodGuaranteedWithExclusiveCPUs() \u0026\u0026 p.IsCPUIsolationCompliant() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "name": "GetHugepagesPods", + "qualifiedName": "TestEnvironment.GetHugepagesPods", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Pod)", + "doc": "TestEnvironment.GetHugepagesPods returns all pods that request or limit hugepages\n\nThe method scans the environment’s pod collection, checks each pod for any\ncontainer using a hugepage resource via HasHugepages, and collects those that\ndo. The resulting slice of pointers to Pod objects is returned; if none have\nhugepages, an empty slice is produced.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:132", + "calls": [ + { + "name": "Pod.HasHugepages", + "kind": "function", + "source": [ + "func (p *Pod) HasHugepages() bool {", + "\tfor _, cut := range p.Containers {", + "\t\tfor name := range cut.Resources.Requests {", + "\t\t\tif strings.Contains(name.String(), hugePages) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t\tfor _, name := range cut.Resources.Limits {", + "\t\t\tif strings.Contains(name.String(), hugePages) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetHugepagesPods() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.HasHugepages() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "name": "GetMasterCount", + "qualifiedName": "TestEnvironment.GetMasterCount", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()(int)", + "doc": "TestEnvironment.GetMasterCount Counts control plane nodes in the test environment\n\nThis method iterates over all nodes stored in the TestEnvironment, checks\neach node to see if it is a control‑node by examining its labels, and\ntallies them. It returns the total number of master nodes as an integer.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:797", + "calls": [ + { + "name": "Node.IsControlPlaneNode", + "kind": "function", + "source": [ + "func (node *Node) IsControlPlaneNode() bool {", + "\tfor nodeLabel := range node.Data.Labels {", + "\t\tif stringhelper.StringInSlice(MasterLabels, nodeLabel, true) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetMasterCount() int {", + "\tmasterCount := 0", + "\tfor _, e := range env.Nodes {", + "\t\tif e.IsControlPlaneNode() {", + "\t\t\tmasterCount++", + "\t\t}", + "\t}", + "\treturn masterCount", + "}" + ] + }, + { + "name": "GetNonGuaranteedPodContainersWithoutHostPID", + "qualifiedName": "TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Container)", + "doc": "TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID Lists containers in non-guaranteed pods that do not use HostPID\n\nThis method retrieves all non-guaranteed pods from the test environment,\nfilters out any pods with the HostPID setting enabled, then collects every\ncontainer within those remaining pods. The result is a slice of container\nobjects representing workloads that are both non‑guaranteed and run without\nshared PID namespaces.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:274", + "calls": [ + { + "name": "getContainers", + "kind": "function", + "source": [ + "func getContainers(pods []*Pod) []*Container {", + "\tvar containers []*Container", + "", + "\tfor _, pod := range pods {", + "\t\tcontainers = append(containers, pod.Containers...)", + "\t}", + "\treturn containers", + "}" + ] + }, + { + "name": "filterPodsWithoutHostPID", + "kind": "function", + "source": [ + "func filterPodsWithoutHostPID(pods []*Pod) []*Pod {", + "\tvar withoutHostPIDPods []*Pod", + "", + "\tfor _, pod := range pods {", + "\t\tif pod.Spec.HostPID {", + "\t\t\tcontinue", + "\t\t}", + "\t\twithoutHostPIDPods = append(withoutHostPIDPods, pod)", + "\t}", + "\treturn withoutHostPIDPods", + "}" + ] + }, + { + "name": "TestEnvironment.GetNonGuaranteedPods", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetNonGuaranteedPods() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif !p.IsPodGuaranteed() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetNonGuaranteedPodContainersWithoutHostPID() []*Container {", + "\treturn getContainers(filterPodsWithoutHostPID(env.GetNonGuaranteedPods()))", + "}" + ] + }, + { + "name": "GetNonGuaranteedPods", + "qualifiedName": "TestEnvironment.GetNonGuaranteedPods", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Pod)", + "doc": "TestEnvironment.GetNonGuaranteedPods retrieves all pods that are not guaranteed in the test environment\n\nThe function iterates over every pod in the TestEnvironment, checks if each\npod is not guaranteed by calling IsPodGuaranteed, and collects those pods\ninto a slice. It returns this slice of non‑guaranteed pods for further\nprocessing or analysis.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:84", + "calls": [ + { + "name": "Pod.IsPodGuaranteed", + "kind": "function", + "source": [ + "func (p *Pod) IsPodGuaranteed() bool {", + "\treturn AreResourcesIdentical(p)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetNonGuaranteedPodContainersWithoutHostPID() []*Container {", + "\treturn getContainers(filterPodsWithoutHostPID(env.GetNonGuaranteedPods()))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetNonGuaranteedPods() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif !p.IsPodGuaranteed() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "name": "GetOfflineDBPath", + "qualifiedName": "TestEnvironment.GetOfflineDBPath", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()(string)", + "doc": "TestEnvironment.GetOfflineDBPath Retrieves the configured file system path for an offline database\n\nThis method accesses the TestEnvironment's internal parameters to obtain the\nlocation of the offline database. It returns a string representing that\nfilesystem path, which can be used by other components to locate or access\nthe database file. No arguments are required and the value is read directly\nfrom the environment configuration.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:772", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetOfflineDBPath() string {", + "\treturn env.params.OfflineDB", + "}" + ] + }, + { + "name": "GetPodsUsingSRIOV", + "qualifiedName": "TestEnvironment.GetPodsUsingSRIOV", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Pod, error)", + "doc": "TestEnvironment.GetPodsUsingSRIOV Collects all pods that are using SR-IOV\n\nThe method scans every pod in the test environment, checking each one for\nSR‑IOV usage by calling its helper function. If a pod reports SR‑IOV\nsupport, it is added to a slice of matching pods. The function returns this\nlist and an error if any pod check fails.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:227", + "calls": [ + { + "name": "Pod.IsUsingSRIOV", + "kind": "function", + "source": [ + "func (p *Pod) IsUsingSRIOV() (bool, error) {", + "\tconst (", + "\t\tcncfNetworksAnnotation = \"k8s.v1.cni.cncf.io/networks\"", + "\t)", + "", + "\tcncfNetworks, exist := p.Annotations[cncfNetworksAnnotation]", + "\tif !exist {", + "\t\treturn false, nil", + "\t}", + "", + "\t// Get all CNCF network names", + "\tcncfNetworkNames := getCNCFNetworksNamesFromPodAnnotation(cncfNetworks)", + "", + "\t// For each CNCF network, get its network attachment definition and check", + "\t// whether its config's type is \"sriov\"", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tfor _, networkName := range cncfNetworkNames {", + "\t\tlog.Debug(\"%s: Reviewing network-attachment definition %q\", p, networkName)", + "\t\tnad, err := oc.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(p.Namespace).Get(context.TODO(), networkName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to get NetworkAttachment %s: %v\", networkName, err)", + "\t\t}", + "", + "\t\tisSRIOV, err := isNetworkAttachmentDefinitionConfigTypeSRIOV(nad.Spec.Config)", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to know if network-attachment %s is sriov: %v\", networkName, err)", + "\t\t}", + "", + "\t\tlog.Debug(\"%s: NAD config: %s\", p, nad.Spec.Config)", + "\t\tif isSRIOV {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\treturn false, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetPodsUsingSRIOV() ([]*Pod, error) {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tusesSRIOV, err := p.IsUsingSRIOV()", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"failed to check sriov usage for pod %s: %v\", p, err)", + "\t\t}", + "", + "\t\tif usesSRIOV {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods, nil", + "}" + ] + }, + { + "name": "GetPodsWithoutAffinityRequiredLabel", + "qualifiedName": "TestEnvironment.GetPodsWithoutAffinityRequiredLabel", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Pod)", + "doc": "TestEnvironment.GetPodsWithoutAffinityRequiredLabel Retrieves pods missing the required affinity label\n\nThe method scans all pods in the test environment, checks each pod for the\npresence of an affinity-required label using the Pod.AffinityRequired helper,\nand collects those that lack it. It returns a slice containing only these\npods, allowing callers to identify which resources need proper labeling.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:100", + "calls": [ + { + "name": "Pod.AffinityRequired", + "kind": "function", + "source": [ + "func (p *Pod) AffinityRequired() bool {", + "\tif val, ok := p.Labels[AffinityRequiredKey]; ok {", + "\t\tresult, err := strconv.ParseBool(val)", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"failure to parse bool %v\", val)", + "\t\t\treturn false", + "\t\t}", + "\t\treturn result", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetPodsWithoutAffinityRequiredLabel() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif !p.AffinityRequired() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "name": "GetShareProcessNamespacePods", + "qualifiedName": "TestEnvironment.GetShareProcessNamespacePods", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()([]*Pod)", + "doc": "TestEnvironment.GetShareProcessNamespacePods Retrieves pods that enable shared process namespaces\n\nThe function scans the TestEnvironment's collection of Pod objects, selecting\nthose whose ShareProcessNamespace flag is true. It accumulates these matching\npods into a new slice and returns it. The returned slice contains only pods\nconfigured for shared process namespace operation.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:211", + "calls": [ + { + "name": "Pod.IsShareProcessNamespace", + "kind": "function", + "source": [ + "func (p *Pod) IsShareProcessNamespace() bool {", + "\treturn p.Spec.ShareProcessNamespace != nil \u0026\u0026 *p.Spec.ShareProcessNamespace", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetShareProcessNamespacePods() []*Pod {", + "\tvar filteredPods []*Pod", + "\tfor _, p := range env.Pods {", + "\t\tif p.IsShareProcessNamespace() {", + "\t\t\tfilteredPods = append(filteredPods, p)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "name": "GetWorkerCount", + "qualifiedName": "TestEnvironment.GetWorkerCount", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()(int)", + "doc": "TestEnvironment.GetWorkerCount Returns the number of worker nodes in the environment\n\nThis method iterates over all nodes stored in the TestEnvironment, checking\neach one to determine if it is marked as a worker node. It counts how many\nnodes satisfy this condition and returns that integer count. The result\nreflects the current composition of worker nodes within the test setup.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:782", + "calls": [ + { + "name": "Node.IsWorkerNode", + "kind": "function", + "source": [ + "func (node *Node) IsWorkerNode() bool {", + "\tfor nodeLabel := range node.Data.Labels {", + "\t\tif stringhelper.StringInSlice(WorkerLabels, nodeLabel, true) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) GetWorkerCount() int {", + "\tworkerCount := 0", + "\tfor _, e := range env.Nodes {", + "\t\tif e.IsWorkerNode() {", + "\t\t\tworkerCount++", + "\t\t}", + "\t}", + "\treturn workerCount", + "}" + ] + }, + { + "name": "IsIntrusive", + "qualifiedName": "TestEnvironment.IsIntrusive", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()(bool)", + "doc": "TestEnvironment.IsIntrusive Indicates if the test environment is running in intrusive mode\n\nThe method checks a configuration flag stored in the environment's parameters\nand returns true when intrusive testing is enabled, otherwise false. It\nperforms no other side effects or computations.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:740", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) IsIntrusive() bool {", + "\treturn env.params.Intrusive", + "}" + ] + }, + { + "name": "IsPreflightInsecureAllowed", + "qualifiedName": "TestEnvironment.IsPreflightInsecureAllowed", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()(bool)", + "doc": "TestEnvironment.IsPreflightInsecureAllowed Indicates whether insecure Preflight connections are permitted\n\nThis method returns the value of the AllowPreflightInsecure flag stored in\nthe TestEnvironment parameters. It is used to decide if insecure network\nconnections should be allowed when executing Preflight checks for containers\nor operators.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:750", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Container.SetPreflightResults", + "kind": "function", + "source": [ + "func (c *Container) SetPreflightResults(preflightImageCache map[string]PreflightResultsDB, env *TestEnvironment) error {", + "\tlog.Info(\"Running Preflight container test for container %q with image %q\", c, c.Image)", + "", + "\t// Short circuit if the image already exists in the cache", + "\tif _, exists := preflightImageCache[c.Image]; exists {", + "\t\tlog.Info(\"Container image %q exists in the cache. Skipping this run.\", c.Image)", + "\t\tc.PreflightResults = preflightImageCache[c.Image]", + "\t\treturn nil", + "\t}", + "", + "\topts := []plibContainer.Option{}", + "\topts = append(opts, plibContainer.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibContainer.WithInsecureConnection())", + "\t}", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibContainer.NewCheck(c.Image, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "", + "\t\tresults.TestedImage = c.Image", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the Preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\t// Store the Preflight test results into the container's PreflightResults var and into the cache.", + "\tresultsDB := GetPreflightResultsDB(\u0026results)", + "\tc.PreflightResults = resultsDB", + "\tpreflightImageCache[c.Image] = resultsDB", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Operator.SetPreflightResults", + "kind": "function", + "source": [ + "func (op *Operator) SetPreflightResults(env *TestEnvironment) error {", + "\tif len(op.InstallPlans) == 0 {", + "\t\tlog.Warn(\"Operator %q has no InstallPlans. Skipping setting Preflight results\", op)", + "\t\treturn nil", + "\t}", + "", + "\tbundleImage := op.InstallPlans[0].BundleImage", + "\tindexImage := op.InstallPlans[0].IndexImage", + "\toc := clientsholder.GetClientsHolder()", + "", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\treturn err", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\topts := []plibOperator.Option{}", + "\topts = append(opts, plibOperator.WithDockerConfigJSONFromFile(env.GetDockerConfigFile()))", + "\tif env.IsPreflightInsecureAllowed() {", + "\t\tlog.Info(\"Insecure connections are being allowed to Preflight\")", + "\t\topts = append(opts, plibOperator.WithInsecureConnection())", + "\t}", + "", + "\t// Add logger output to the context", + "\tlogbytes := bytes.NewBuffer([]byte{})", + "\tchecklogger := defaultLog.Default()", + "\tchecklogger.SetOutput(logbytes)", + "\tlogger := stdr.New(checklogger)", + "\tctx = logr.NewContext(ctx, logger)", + "", + "\tcheck := plibOperator.NewCheck(bundleImage, indexImage, oc.KubeConfig, opts...)", + "", + "\tresults, runtimeErr := check.Run(ctx)", + "\tif runtimeErr != nil {", + "\t\t_, checks, err := check.List(ctx)", + "\t\tif err != nil {", + "\t\t\treturn fmt.Errorf(\"could not get preflight container test list\")", + "\t\t}", + "\t\tfor _, c := range checks {", + "\t\t\tresults.PassedOverall = false", + "\t\t\tresult := plibRuntime.Result{Check: c, ElapsedTime: 0}", + "\t\t\tresults.Errors = append(results.Errors, *result.WithError(runtimeErr))", + "\t\t}", + "\t}", + "", + "\t// Take all of the preflight logs and stick them into our log.", + "\tlog.Info(\"%s\", logbytes.String())", + "", + "\te := os.RemoveAll(\"artifacts/\")", + "\tif e != nil {", + "\t\tlog.Fatal(\"%v\", e)", + "\t}", + "", + "\tlog.Info(\"Storing operator Preflight results into object for %q\", bundleImage)", + "\top.PreflightResults = GetPreflightResultsDB(\u0026results)", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) IsPreflightInsecureAllowed() bool {", + "\treturn env.params.AllowPreflightInsecure", + "}" + ] + }, + { + "name": "IsSNO", + "qualifiedName": "TestEnvironment.IsSNO", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()(bool)", + "doc": "TestEnvironment.IsSNO Checks whether the environment contains a single node\n\nThe method inspects the collection of nodes in the test environment and\ndetermines if exactly one node is present. It returns true when the count\nequals one, indicating a single-node setup; otherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:812", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) IsSNO() bool {", + "\treturn len(env.Nodes) == 1", + "}" + ] + }, + { + "name": "SetNeedsRefresh", + "qualifiedName": "TestEnvironment.SetNeedsRefresh", + "exported": true, + "receiver": "TestEnvironment", + "signature": "func()()", + "doc": "TestEnvironment.SetNeedsRefresh Marks the test environment as needing a reload\n\nWhen invoked, this method clears the internal flag that tracks whether the\nenvironment has been initialized or loaded. It ensures subsequent operations\nwill reinitialize necessary resources before use. The function does not\nreturn any value and performs no additional side effects.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:731", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (env *TestEnvironment) SetNeedsRefresh() {", + "\tloaded = false", + "}" + ] + }, + { + "name": "addOperandPodsToTestPods", + "qualifiedName": "addOperandPodsToTestPods", + "exported": false, + "signature": "func([]*Pod, *TestEnvironment)()", + "doc": "addOperandPodsToTestPods Adds discovered operand pods to the test environment\n\nThis routine iterates over a list of operand pods, checking each against the\ncurrent set of test pods in the environment. If a pod is already present, it\nlogs that fact and marks the existing entry as an operand; otherwise it\nappends the new pod to the environment's pod list. The function ensures no\nduplicate entries while guaranteeing all operand pods are available for\nsubsequent tests.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:531", + "calls": [ + { + "name": "searchPodInSlice", + "kind": "function", + "source": [ + "func searchPodInSlice(name, namespace string, pods []*Pod) *Pod {", + "\t// Helper map to filter pods that have been already added", + "\tpodsMap := map[types.NamespacedName]*Pod{}", + "\tfor _, testPod := range pods {", + "\t\tpodsMap[types.NamespacedName{Namespace: testPod.Namespace, Name: testPod.Name}] = testPod", + "\t}", + "", + "\t// Search by namespace+name key", + "\tpodKey := types.NamespacedName{Namespace: namespace, Name: name}", + "\tif pod, found := podsMap[podKey]; found {", + "\t\treturn pod", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func addOperandPodsToTestPods(operandPods []*Pod, env *TestEnvironment) {", + "\tfor _, operandPod := range operandPods {", + "\t\t// Check whether the pod was already discovered", + "\t\ttestPod := searchPodInSlice(operandPod.Name, operandPod.Namespace, env.Pods)", + "\t\tif testPod != nil {", + "\t\t\tlog.Info(\"Operand pod %v/%v already discovered.\", testPod.Namespace, testPod.Name)", + "\t\t\t// Make sure it's flagged as operand pod.", + "\t\t\ttestPod.IsOperand = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Operand pod %v/%v added to test pod list\", operandPod.Namespace, operandPod.Name)", + "\t\t\t// Append pod to the test pod list.", + "\t\t\tenv.Pods = append(env.Pods, operandPod)", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "addOperatorPodsToTestPods", + "qualifiedName": "addOperatorPodsToTestPods", + "exported": false, + "signature": "func([]*Pod, *TestEnvironment)()", + "doc": "addOperatorPodsToTestPods Adds operator pods to the test pod list\n\nThis function iterates over a slice of operator pods, checking each one\nagainst the current environment's pod collection. If an operator pod is\nalready present, it marks that existing pod as an operator; otherwise, it\nappends the new pod to the test list. Logging statements provide visibility\ninto whether pods were added or already discovered.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:507", + "calls": [ + { + "name": "searchPodInSlice", + "kind": "function", + "source": [ + "func searchPodInSlice(name, namespace string, pods []*Pod) *Pod {", + "\t// Helper map to filter pods that have been already added", + "\tpodsMap := map[types.NamespacedName]*Pod{}", + "\tfor _, testPod := range pods {", + "\t\tpodsMap[types.NamespacedName{Namespace: testPod.Namespace, Name: testPod.Name}] = testPod", + "\t}", + "", + "\t// Search by namespace+name key", + "\tpodKey := types.NamespacedName{Namespace: namespace, Name: name}", + "\tif pod, found := podsMap[podKey]; found {", + "\t\treturn pod", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func addOperatorPodsToTestPods(operatorPods []*Pod, env *TestEnvironment) {", + "\tfor _, operatorPod := range operatorPods {", + "\t\t// Check whether the pod was already discovered", + "\t\ttestPod := searchPodInSlice(operatorPod.Name, operatorPod.Namespace, env.Pods)", + "\t\tif testPod != nil {", + "\t\t\tlog.Info(\"Operator pod %v/%v already discovered.\", testPod.Namespace, testPod.Name)", + "\t\t\t// Make sure it's flagged as operator pod.", + "\t\t\ttestPod.IsOperator = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Operator pod %v/%v added to test pod list\", operatorPod.Namespace, operatorPod.Name)", + "\t\t\t// Append pod to the test pod list.", + "\t\t\tenv.Pods = append(env.Pods, operatorPod)", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "buildContainerImageSource", + "qualifiedName": "buildContainerImageSource", + "exported": false, + "signature": "func(string, string)(ContainerImageIdentifier)", + "doc": "buildContainerImageSource Extracts registry, repository, tag, and digest information from image strings\n\nThe function parses a container image URL to obtain the registry, repository,\nand optional tag using a regular expression. It then extracts the image\ndigest from an image ID string with another regex. The parsed values are\nassembled into a ContainerImageIdentifier structure and returned for use\nelsewhere in the program.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:613", + "calls": [ + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "name": "FindStringSubmatch", + "kind": "function" + }, + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "name": "FindStringSubmatch", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getPodContainers", + "kind": "function", + "source": [ + "func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Container) {", + "\tfor j := 0; j \u003c len(aPod.Spec.Containers); j++ {", + "\t\tcut := \u0026(aPod.Spec.Containers[j])", + "", + "\t\tvar cutStatus corev1.ContainerStatus", + "\t\t// get Status for current container", + "\t\tfor index := range aPod.Status.ContainerStatuses {", + "\t\t\tif aPod.Status.ContainerStatuses[index].Name == cut.Name {", + "\t\t\t\tcutStatus = aPod.Status.ContainerStatuses[index]", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\taRuntime, uid := GetRuntimeUID(\u0026cutStatus)", + "\t\tcontainer := Container{Podname: aPod.Name, Namespace: aPod.Namespace,", + "\t\t\tNodeName: aPod.Spec.NodeName, Container: cut, Status: cutStatus, Runtime: aRuntime, UID: uid,", + "\t\t\tContainerImageIdentifier: buildContainerImageSource(aPod.Spec.Containers[j].Image, cutStatus.ImageID)}", + "", + "\t\t// Warn if readiness probe did not succeeded yet.", + "\t\tif !cutStatus.Ready {", + "\t\t\tlog.Warn(\"Container %q is not ready yet.\", \u0026container)", + "\t\t}", + "", + "\t\t// Warn if container state is not running.", + "\t\tif state := \u0026cutStatus.State; state.Running == nil {", + "\t\t\treason := \"\"", + "\t\t\tswitch {", + "\t\t\tcase state.Waiting != nil:", + "\t\t\t\treason = \"waiting - \" + state.Waiting.Reason", + "\t\t\tcase state.Terminated != nil:", + "\t\t\t\treason = \"terminated - \" + state.Terminated.Reason", + "\t\t\tdefault:", + "\t\t\t\t// When no state was explicitly set, it's assumed to be in \"waiting state\".", + "\t\t\t\treason = \"waiting state reason unknown\"", + "\t\t\t}", + "", + "\t\t\tlog.Warn(\"Container %q is not running (reason: %s, restarts %d): some test cases might fail.\",", + "\t\t\t\t\u0026container, reason, cutStatus.RestartCount)", + "\t\t}", + "", + "\t\t// Build slices of containers based on whether or not we are \"ignoring\" them or not.", + "\t\tif useIgnoreList \u0026\u0026 container.HasIgnoredContainerName() {", + "\t\t\tcontinue", + "\t\t}", + "\t\tcontainerList = append(containerList, \u0026container)", + "\t}", + "\treturn containerList", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func buildContainerImageSource(urlImage, urlImageID string) (source ContainerImageIdentifier) {", + "\tconst regexImageWithTag = `^([^/]*)/*([^@]*):(.*)`", + "\tconst regexImageDigest = `^([^/]*)/(.*)@(.*:.*)`", + "", + "\t// get image repository, Name and tag if present", + "\tre := regexp.MustCompile(regexImageWithTag)", + "\tmatch := re.FindStringSubmatch(urlImage)", + "", + "\tif match != nil {", + "\t\tif match[2] != \"\" {", + "\t\t\tsource.Registry = match[1]", + "\t\t\tsource.Repository = match[2]", + "\t\t\tsource.Tag = match[3]", + "\t\t} else {", + "\t\t\tsource.Repository = match[1]", + "\t\t\tsource.Tag = match[3]", + "\t\t}", + "\t}", + "", + "\t// get image Digest based on imageID only", + "\tre = regexp.MustCompile(regexImageDigest)", + "\tmatch = re.FindStringSubmatch(urlImageID)", + "", + "\tif match != nil {", + "\t\tsource.Digest = match[3]", + "\t}", + "", + "\tlog.Debug(\"Parsed image, repo: %s, name:%s, tag: %s, digest: %s\",", + "\t\tsource.Registry,", + "\t\tsource.Repository,", + "\t\tsource.Tag,", + "\t\tsource.Digest)", + "", + "\treturn source", + "}" + ] + }, + { + "name": "buildTestEnvironment", + "qualifiedName": "buildTestEnvironment", + "exported": false, + "signature": "func()()", + "doc": "buildTestEnvironment initializes the test environment state\n\nThe function starts by resetting the global environment structure and loading\nconfiguration parameters from a file. It then attempts to deploy a probe\ndaemonset; if that fails it records the failure but continues with limited\ntests. Next, it performs autodiscovery of cluster resources such as\noperators, pods, services, CRDs, and more, populating many fields in the\nenvironment struct. Throughout the process, it logs progress, handles errors\nby terminating on critical failures, and measures the total time taken.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:307", + "calls": [ + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "LoadConfiguration", + "kind": "function", + "source": [ + "func LoadConfiguration(filePath string) (TestConfiguration, error) {", + "\tif confLoaded {", + "\t\tlog.Debug(\"config file already loaded, return previous element\")", + "\t\treturn configuration, nil", + "\t}", + "", + "\tlog.Info(\"Loading config from file: %s\", filePath)", + "\tcontents, err := os.ReadFile(filePath)", + "\tif err != nil {", + "\t\treturn configuration, err", + "\t}", + "", + "\terr = yaml.Unmarshal(contents, \u0026configuration)", + "\tif err != nil {", + "\t\treturn configuration, err", + "\t}", + "", + "\t// Set default namespace for the probe daemonset pods, in case it was not set.", + "\tif configuration.ProbeDaemonSetNamespace == \"\" {", + "\t\tlog.Warn(\"No namespace configured for the probe daemonset. Defaulting to namespace %q\", defaultProbeDaemonSetNamespace)", + "\t\tconfiguration.ProbeDaemonSetNamespace = defaultProbeDaemonSetNamespace", + "\t} else {", + "\t\tlog.Info(\"Namespace for probe daemonset: %s\", configuration.ProbeDaemonSetNamespace)", + "\t}", + "", + "\tconfLoaded = true", + "\treturn configuration, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "deployDaemonSet", + "kind": "function", + "source": [ + "func deployDaemonSet(namespace string) error {", + "\tk8sPrivilegedDs.SetDaemonSetClient(clientsholder.GetClientsHolder().K8sClient)", + "", + "\tdsImage := env.params.CertSuiteProbeImage", + "\tif k8sPrivilegedDs.IsDaemonSetReady(DaemonSetName, namespace, dsImage) {", + "\t\treturn nil", + "\t}", + "", + "\tmatchLabels := make(map[string]string)", + "\tmatchLabels[\"name\"] = DaemonSetName", + "\tmatchLabels[\"redhat-best-practices-for-k8s.com/app\"] = DaemonSetName", + "\t_, err := k8sPrivilegedDs.CreateDaemonSet(DaemonSetName, namespace, containerName, dsImage, matchLabels, probePodsTimeout,", + "\t\tconfiguration.GetTestParameters().DaemonsetCPUReq,", + "\t\tconfiguration.GetTestParameters().DaemonsetCPULim,", + "\t\tconfiguration.GetTestParameters().DaemonsetMemReq,", + "\t\tconfiguration.GetTestParameters().DaemonsetMemLim,", + "\t\tcorev1.PullIfNotPresent,", + "\t)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not deploy certsuite daemonset, err=%v\", err)", + "\t}", + "\terr = k8sPrivilegedDs.WaitDaemonsetReady(namespace, DaemonSetName, probePodsTimeout)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"timed out waiting for certsuite daemonset, err=%v\", err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "DoAutoDiscover", + "kind": "function", + "source": [ + "func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData {", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tvar err error", + "\tdata.StorageClasses, err = getAllStorageClasses(oc.K8sClient.StorageV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to retrieve storageClasses - err: %v\", err)", + "\t}", + "", + "\tpodsUnderTestLabelsObjects := CreateLabels(config.PodsUnderTestLabels)", + "\toperatorsUnderTestLabelsObjects := CreateLabels(config.OperatorsUnderTestLabels)", + "", + "\tlog.Debug(\"Pods under test labels: %+v\", podsUnderTestLabelsObjects)", + "\tlog.Debug(\"Operators under test labels: %+v\", operatorsUnderTestLabelsObjects)", + "", + "\tdata.AllNamespaces, err = getAllNamespaces(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get namespaces, err: %v\", err)", + "\t}", + "\tdata.AllSubscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), []string{\"\"})", + "\tdata.AllCsvs, err = getAllOperators(oc.OlmClient.OperatorsV1alpha1())", + "\tif err != nil {", + "\t\tlog.Error(\"Cannot get operators, err: %v\", err)", + "\t}", + "\tdata.AllInstallPlans = getAllInstallPlans(oc.OlmClient.OperatorsV1alpha1())", + "\tdata.AllCatalogSources = getAllCatalogSources(oc.OlmClient.OperatorsV1alpha1())", + "\tlog.Info(\"Collected %d catalog sources during autodiscovery\", len(data.AllCatalogSources))", + "", + "\tdata.AllPackageManifests = getAllPackageManifests(oc.OlmPkgClient.PackageManifests(\"\"))", + "", + "\tdata.Namespaces = namespacesListToStringList(config.TargetNameSpaces)", + "\tdata.Pods, data.AllPods = FindPodsByLabels(oc.K8sClient.CoreV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.PodStates.BeforeExecution = CountPodsByStatus(data.AllPods)", + "\tdata.AbnormalEvents = findAbnormalEvents(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tprobeLabels := []labelObject{{LabelKey: probeHelperPodsLabelName, LabelValue: probeHelperPodsLabelValue}}", + "\tprobeNS := []string{config.ProbeDaemonSetNamespace}", + "\tdata.ProbePods, _ = FindPodsByLabels(oc.K8sClient.CoreV1(), probeLabels, probeNS)", + "\tdata.ResourceQuotaItems, err = getResourceQuotas(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get resource quotas, err: %v\", err)", + "\t}", + "\tdata.PodDisruptionBudgets, err = getPodDisruptionBudgets(oc.K8sClient.PolicyV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get pod disruption budgets, err: %v\", err)", + "\t}", + "\tdata.NetworkPolicies, err = getNetworkPolicies(oc.K8sNetworkingClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get network policies, err: %v\", err)", + "\t}", + "", + "\t// Get cluster crds", + "\tdata.AllCrds, err = getClusterCrdNames()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster CRD names, err: %v\", err)", + "\t}", + "\tdata.Crds = FindTestCrdNames(data.AllCrds, config.CrdFilters)", + "", + "\tdata.ScaleCrUnderTest = GetScaleCrUnderTest(data.Namespaces, data.Crds)", + "\tdata.Csvs = findOperatorsByLabels(oc.OlmClient.OperatorsV1alpha1(), operatorsUnderTestLabelsObjects, config.TargetNameSpaces)", + "\tdata.Subscriptions = findSubscriptions(oc.OlmClient.OperatorsV1alpha1(), data.Namespaces)", + "\tdata.HelmChartReleases = getHelmList(oc.RestConfig, data.Namespaces)", + "", + "\tdata.ClusterOperators, err = findClusterOperators(oc.OcpClient.ClusterOperators())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get cluster operators, err: %v\", err)", + "\t}", + "", + "\t// Get all operator pods", + "\tdata.CSVToPodListMap, err = getOperatorCsvPods(data.Csvs)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the operator pods, err: %v\", err)", + "\t}", + "", + "\t// Best effort mode autodiscovery for operand (running-only) pods.", + "\tpods, _ := FindPodsByLabels(oc.K8sClient.CoreV1(), nil, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get running pods, err: %v\", err)", + "\t}", + "", + "\tdata.OperandPods, err = getOperandPodsFromTestCsvs(data.Csvs, pods)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get operand pods, err: %v\", err)", + "\t}", + "", + "\topenshiftVersion, err := getOpenshiftVersion(oc.OcpClient)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get the OpenShift version, err: %v\", err)", + "\t}", + "", + "\tdata.OpenshiftVersion = openshiftVersion", + "\tk8sVersion, err := oc.K8sClient.Discovery().ServerVersion()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get the K8s version, err: %v\", err)", + "\t}", + "\tdata.ValidProtocolNames = config.ValidProtocolNames", + "\tdata.ServicesIgnoreList = config.ServicesIgnoreList", + "", + "\t// Find the status of the OCP version (pre-ga, end-of-life, maintenance, or generally available)", + "\tdata.OCPStatus = compatibility.DetermineOCPStatus(openshiftVersion, time.Now())", + "", + "\tdata.K8sVersion = k8sVersion.GitVersion", + "\tdata.Deployments = findDeploymentsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "\tdata.StatefulSet = findStatefulSetsByLabels(oc.K8sClient.AppsV1(), podsUnderTestLabelsObjects, data.Namespaces)", + "", + "\t// Check if the Istio Service Mesh is present", + "\tdata.IstioServiceMeshFound = isIstioServiceMeshInstalled(oc.K8sClient.AppsV1(), data.AllNamespaces)", + "", + "\t// Find ClusterRoleBindings", + "\tclusterRoleBindings, err := getClusterRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get cluster role bindings, err: %v\", err)", + "\t}", + "\tdata.ClusterRoleBindings = clusterRoleBindings", + "\t// Find RoleBindings", + "\troleBindings, err := getRoleBindings(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get role bindings, error: %v\", err)", + "\t}", + "\tdata.RoleBindings = roleBindings", + "\t// find roles", + "\troles, err := getRoles(oc.K8sClient.RbacV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get roles, err: %v\", err)", + "\t}", + "\tdata.Roles = roles", + "\tdata.Hpas = findHpaControllers(oc.K8sClient, data.Namespaces)", + "\tdata.Nodes, err = oc.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of nodes, err: %v\", err)", + "\t}", + "\tdata.PersistentVolumes, err = getPersistentVolumes(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volumes, error: %v\", err)", + "\t}", + "\tdata.PersistentVolumeClaims, err = getPersistentVolumeClaims(oc.K8sClient.CoreV1())", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of persistent volume claims, err: %v\", err)", + "\t}", + "\tdata.Services, err = getServices(oc.K8sClient.CoreV1(), data.Namespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of services, err: %v\", err)", + "\t}", + "\tdata.AllServices, err = getServices(oc.K8sClient.CoreV1(), data.AllNamespaces, data.ServicesIgnoreList)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all services, err: %v\", err)", + "\t}", + "\tdata.ServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of service accounts under test, err: %v\", err)", + "\t}", + "\tdata.AllServiceAccounts, err = getServiceAccounts(oc.K8sClient.CoreV1(), []string{metav1.NamespaceAll})", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of all service accounts, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworks, err = getSriovNetworks(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.SriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworks, err = getSriovNetworks(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov networks, err: %v\", err)", + "\t}", + "", + "\tdata.AllSriovNetworkNodePolicies, err = getSriovNetworkNodePolicies(oc, data.AllNamespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of sriov network node policies, err: %v\", err)", + "\t}", + "", + "\tdata.NetworkAttachmentDefinitions, err = getNetworkAttachmentDefinitions(oc, data.Namespaces)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get list of network attachment definitions, err: %v\", err)", + "\t}", + "", + "\tdata.ExecutedBy = config.ExecutedBy", + "\tdata.PartnerName = config.PartnerName", + "\tdata.CollectorAppPassword = config.CollectorAppPassword", + "\tdata.CollectorAppEndpoint = config.CollectorAppEndpoint", + "\tdata.ConnectAPIKey = config.ConnectAPIConfig.APIKey", + "\tdata.ConnectAPIBaseURL = config.ConnectAPIConfig.BaseURL", + "\tdata.ConnectProjectID = config.ConnectAPIConfig.ProjectID", + "\tdata.ConnectAPIProxyURL = config.ConnectAPIConfig.ProxyURL", + "\tdata.ConnectAPIProxyPort = config.ConnectAPIConfig.ProxyPort", + "", + "\treturn data", + "}" + ] + }, + { + "name": "GetAllOperatorGroups", + "kind": "function", + "source": [ + "func GetAllOperatorGroups() ([]*olmv1.OperatorGroup, error) {", + "\tclient := clientsholder.GetClientsHolder()", + "", + "\tlist, err := client.OlmClient.OperatorsV1().OperatorGroups(\"\").List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil \u0026\u0026 !k8serrors.IsNotFound(err) {", + "\t\treturn nil, err", + "\t}", + "", + "\tif k8serrors.IsNotFound(err) {", + "\t\tlog.Warn(\"No OperatorGroup(s) found in the cluster\")", + "\t\treturn nil, nil", + "\t}", + "", + "\tif len(list.Items) == 0 {", + "\t\tlog.Warn(\"OperatorGroup API resource found but no OperatorGroup(s) found in the cluster\")", + "\t\treturn nil, nil", + "\t}", + "", + "\t// Collect all OperatorGroup pointers", + "\tvar operatorGroups []*olmv1.OperatorGroup", + "\tfor i := range list.Items {", + "\t\toperatorGroups = append(operatorGroups, \u0026list.Items[i])", + "\t}", + "", + "\treturn operatorGroups, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "createOperators", + "kind": "function", + "source": [ + "func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion,", + "\tallSubscriptions []olmv1Alpha.Subscription,", + "\tallPackageManifests []*olmpkgv1.PackageManifest,", + "\tallInstallPlans []*olmv1Alpha.InstallPlan,", + "\tallCatalogSources []*olmv1Alpha.CatalogSource,", + "\tsucceededRequired,", + "\tkeepCsvDetails bool) []*Operator {", + "\tconst (", + "\t\tmaxSize = 2", + "\t)", + "", + "\toperators := []*Operator{}", + "", + "\t// Make map with unique csv names to original index in the env.Csvs slice.", + "\t// Otherwise, cluster-wide operators info will be repeated unnecessarily.", + "\tuniqueCsvs := getUniqueCsvListByName(csvs)", + "", + "\tfor _, csv := range uniqueCsvs {", + "\t\t// Skip CSVs that are not in the Succeeded phase if the flag is set.", + "\t\tif csv.Status.Phase != olmv1Alpha.CSVPhaseSucceeded \u0026\u0026 succeededRequired {", + "\t\t\tcontinue", + "\t\t}", + "\t\top := \u0026Operator{Name: csv.Name, Namespace: csv.Namespace}", + "\t\tif keepCsvDetails {", + "\t\t\top.Csv = csv", + "\t\t}", + "\t\top.Phase = csv.Status.Phase", + "\t\tpackageAndVersion := strings.SplitN(csv.Name, \".\", maxSize)", + "\t\tif len(packageAndVersion) == 0 {", + "\t\t\tlog.Debug(\"Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v\", csv)", + "\t\t\tcontinue", + "\t\t}", + "\t\top.PackageFromCsvName = packageAndVersion[0]", + "\t\top.Version = csv.Spec.Version.String()", + "\t\t// Get at least one subscription and update the Operator object with it.", + "\t\tif getAtLeastOneSubscription(op, csv, allSubscriptions, allPackageManifests) {", + "\t\t\ttargetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to get target namespaces for operator %s: %v\", csv.Name, err)", + "\t\t\t} else {", + "\t\t\t\top.TargetNamespaces = targetNamespaces", + "\t\t\t\top.IsClusterWide = len(targetNamespaces) == 0", + "\t\t\t}", + "\t\t} else {", + "\t\t\tlog.Warn(\"Subscription not found for CSV: %s (ns %s)\", csv.Name, csv.Namespace)", + "\t\t}", + "\t\tlog.Info(\"Getting installplans for op %s (subs %s ns %s)\", op.Name, op.SubscriptionName, op.SubscriptionNamespace)", + "\t\t// Get at least one Install Plan and update the Operator object with it.", + "\t\tgetAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources)", + "\t\toperators = append(operators, op)", + "\t}", + "\treturn operators", + "}" + ] + }, + { + "name": "getSummaryAllOperators", + "kind": "function", + "source": [ + "func getSummaryAllOperators(operators []*Operator) (summary []string) {", + "\toperatorMap := map[string]bool{}", + "\tfor _, o := range operators {", + "\t\tkey := fmt.Sprintf(\"%s operator: %s ver: %s\", o.Phase, o.PackageFromCsvName, o.Version)", + "\t\tif o.IsClusterWide {", + "\t\t\tkey += \" (all namespaces)\"", + "\t\t} else {", + "\t\t\tkey += fmt.Sprintf(\" in ns: %v\", o.TargetNamespaces)", + "\t\t}", + "\t\toperatorMap[key] = true", + "\t}", + "", + "\tfor s := range operatorMap {", + "\t\tsummary = append(summary, s)", + "\t}", + "\tsort.Strings(summary)", + "\treturn summary", + "}" + ] + }, + { + "name": "createNodes", + "kind": "function", + "source": [ + "func createNodes(nodes []corev1.Node) map[string]Node {", + "\twrapperNodes := map[string]Node{}", + "", + "\t// machineConfigs is a helper map to avoid download \u0026 process the same mc twice.", + "\tmachineConfigs := map[string]MachineConfig{}", + "\tfor i := range nodes {", + "\t\tnode := \u0026nodes[i]", + "", + "\t\tif !IsOCPCluster() {", + "\t\t\t// Avoid getting Mc info for non ocp clusters.", + "\t\t\twrapperNodes[node.Name] = Node{Data: node}", + "\t\t\tlog.Warn(\"Non-OCP cluster detected. MachineConfig retrieval for node %q skipped.\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Get Node's machineConfig name", + "\t\tmcName, exists := node.Annotations[\"machineconfiguration.openshift.io/currentConfig\"]", + "\t\tif !exists {", + "\t\t\tlog.Error(\"Failed to get machineConfig name for node %q\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "\t\tlog.Info(\"Node %q - mc name %q\", node.Name, mcName)", + "\t\tmc, err := getMachineConfig(mcName, machineConfigs)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get machineConfig %q, err: %v\", mcName, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\twrapperNodes[node.Name] = Node{", + "\t\t\tData: node,", + "\t\t\tMc: mc,", + "\t\t}", + "\t}", + "", + "\treturn wrapperNodes", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "NewEvent", + "kind": "function", + "source": [ + "func NewEvent(aEvent *corev1.Event) (out Event) {", + "\tout.Event = aEvent", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "NewPod", + "kind": "function", + "source": [ + "func NewPod(aPod *corev1.Pod) (out Pod) {", + "\tvar err error", + "\tout.Pod = aPod", + "\tout.MultusNetworkInterfaces = make(map[string]CniNetworkInterface)", + "\tannotations := aPod.GetAnnotations()", + "\tnetStatus, exists := annotations[CniNetworksStatusKey]", + "\tif !exists || strings.TrimSpace(netStatus) == \"\" {", + "\t\t// Be graceful: log which annotations are present when the expected one is missing/empty", + "\t\tkeys := make([]string, 0, len(annotations))", + "\t\tfor k := range annotations {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "\t\tlog.Info(\"Pod %q (namespace %q) missing or empty annotation %q. Present annotations: %v\", aPod.Name, aPod.Namespace, CniNetworksStatusKey, keys)", + "\t} else {", + "\t\tout.MultusNetworkInterfaces, err = GetPodIPsPerNet(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get IPs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "", + "\t\tout.MultusPCIs, err = GetPciPerPod(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get PCIs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "\t}", + "", + "\tif _, ok := aPod.GetLabels()[skipConnectivityTestsLabel]; ok {", + "\t\tout.SkipNetTests = true", + "\t}", + "\tif _, ok := aPod.GetLabels()[skipMultusConnectivityTestsLabel]; ok {", + "\t\tout.SkipMultusNetTests = true", + "\t}", + "\tout.Containers = append(out.Containers, getPodContainers(aPod, false)...)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "NewPod", + "kind": "function", + "source": [ + "func NewPod(aPod *corev1.Pod) (out Pod) {", + "\tvar err error", + "\tout.Pod = aPod", + "\tout.MultusNetworkInterfaces = make(map[string]CniNetworkInterface)", + "\tannotations := aPod.GetAnnotations()", + "\tnetStatus, exists := annotations[CniNetworksStatusKey]", + "\tif !exists || strings.TrimSpace(netStatus) == \"\" {", + "\t\t// Be graceful: log which annotations are present when the expected one is missing/empty", + "\t\tkeys := make([]string, 0, len(annotations))", + "\t\tfor k := range annotations {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "\t\tlog.Info(\"Pod %q (namespace %q) missing or empty annotation %q. Present annotations: %v\", aPod.Name, aPod.Namespace, CniNetworksStatusKey, keys)", + "\t} else {", + "\t\tout.MultusNetworkInterfaces, err = GetPodIPsPerNet(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get IPs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "", + "\t\tout.MultusPCIs, err = GetPciPerPod(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get PCIs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "\t}", + "", + "\tif _, ok := aPod.GetLabels()[skipConnectivityTestsLabel]; ok {", + "\t\tout.SkipNetTests = true", + "\t}", + "\tif _, ok := aPod.GetLabels()[skipMultusConnectivityTestsLabel]; ok {", + "\t\tout.SkipMultusNetTests = true", + "\t}", + "\tout.Containers = append(out.Containers, getPodContainers(aPod, false)...)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "NewPod", + "kind": "function", + "source": [ + "func NewPod(aPod *corev1.Pod) (out Pod) {", + "\tvar err error", + "\tout.Pod = aPod", + "\tout.MultusNetworkInterfaces = make(map[string]CniNetworkInterface)", + "\tannotations := aPod.GetAnnotations()", + "\tnetStatus, exists := annotations[CniNetworksStatusKey]", + "\tif !exists || strings.TrimSpace(netStatus) == \"\" {", + "\t\t// Be graceful: log which annotations are present when the expected one is missing/empty", + "\t\tkeys := make([]string, 0, len(annotations))", + "\t\tfor k := range annotations {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "\t\tlog.Info(\"Pod %q (namespace %q) missing or empty annotation %q. Present annotations: %v\", aPod.Name, aPod.Namespace, CniNetworksStatusKey, keys)", + "\t} else {", + "\t\tout.MultusNetworkInterfaces, err = GetPodIPsPerNet(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get IPs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "", + "\t\tout.MultusPCIs, err = GetPciPerPod(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get PCIs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "\t}", + "", + "\tif _, ok := aPod.GetLabels()[skipConnectivityTestsLabel]; ok {", + "\t\tout.SkipNetTests = true", + "\t}", + "\tif _, ok := aPod.GetLabels()[skipMultusConnectivityTestsLabel]; ok {", + "\t\tout.SkipMultusNetTests = true", + "\t}", + "\tout.Containers = append(out.Containers, getPodContainers(aPod, false)...)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "addOperatorPodsToTestPods", + "kind": "function", + "source": [ + "func addOperatorPodsToTestPods(operatorPods []*Pod, env *TestEnvironment) {", + "\tfor _, operatorPod := range operatorPods {", + "\t\t// Check whether the pod was already discovered", + "\t\ttestPod := searchPodInSlice(operatorPod.Name, operatorPod.Namespace, env.Pods)", + "\t\tif testPod != nil {", + "\t\t\tlog.Info(\"Operator pod %v/%v already discovered.\", testPod.Namespace, testPod.Name)", + "\t\t\t// Make sure it's flagged as operator pod.", + "\t\t\ttestPod.IsOperator = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Operator pod %v/%v added to test pod list\", operatorPod.Namespace, operatorPod.Name)", + "\t\t\t// Append pod to the test pod list.", + "\t\t\tenv.Pods = append(env.Pods, operatorPod)", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "NewPod", + "kind": "function", + "source": [ + "func NewPod(aPod *corev1.Pod) (out Pod) {", + "\tvar err error", + "\tout.Pod = aPod", + "\tout.MultusNetworkInterfaces = make(map[string]CniNetworkInterface)", + "\tannotations := aPod.GetAnnotations()", + "\tnetStatus, exists := annotations[CniNetworksStatusKey]", + "\tif !exists || strings.TrimSpace(netStatus) == \"\" {", + "\t\t// Be graceful: log which annotations are present when the expected one is missing/empty", + "\t\tkeys := make([]string, 0, len(annotations))", + "\t\tfor k := range annotations {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "\t\tlog.Info(\"Pod %q (namespace %q) missing or empty annotation %q. Present annotations: %v\", aPod.Name, aPod.Namespace, CniNetworksStatusKey, keys)", + "\t} else {", + "\t\tout.MultusNetworkInterfaces, err = GetPodIPsPerNet(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get IPs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "", + "\t\tout.MultusPCIs, err = GetPciPerPod(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get PCIs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "\t}", + "", + "\tif _, ok := aPod.GetLabels()[skipConnectivityTestsLabel]; ok {", + "\t\tout.SkipNetTests = true", + "\t}", + "\tif _, ok := aPod.GetLabels()[skipMultusConnectivityTestsLabel]; ok {", + "\t\tout.SkipMultusNetTests = true", + "\t}", + "\tout.Containers = append(out.Containers, getPodContainers(aPod, false)...)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "addOperandPodsToTestPods", + "kind": "function", + "source": [ + "func addOperandPodsToTestPods(operandPods []*Pod, env *TestEnvironment) {", + "\tfor _, operandPod := range operandPods {", + "\t\t// Check whether the pod was already discovered", + "\t\ttestPod := searchPodInSlice(operandPod.Name, operandPod.Namespace, env.Pods)", + "\t\tif testPod != nil {", + "\t\t\tlog.Info(\"Operand pod %v/%v already discovered.\", testPod.Namespace, testPod.Name)", + "\t\t\t// Make sure it's flagged as operand pod.", + "\t\t\ttestPod.IsOperand = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Operand pod %v/%v added to test pod list\", operandPod.Namespace, operandPod.Name)", + "\t\t\t// Append pod to the test pod list.", + "\t\t\tenv.Pods = append(env.Pods, operandPod)", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "getPodContainers", + "kind": "function", + "source": [ + "func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Container) {", + "\tfor j := 0; j \u003c len(aPod.Spec.Containers); j++ {", + "\t\tcut := \u0026(aPod.Spec.Containers[j])", + "", + "\t\tvar cutStatus corev1.ContainerStatus", + "\t\t// get Status for current container", + "\t\tfor index := range aPod.Status.ContainerStatuses {", + "\t\t\tif aPod.Status.ContainerStatuses[index].Name == cut.Name {", + "\t\t\t\tcutStatus = aPod.Status.ContainerStatuses[index]", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\taRuntime, uid := GetRuntimeUID(\u0026cutStatus)", + "\t\tcontainer := Container{Podname: aPod.Name, Namespace: aPod.Namespace,", + "\t\t\tNodeName: aPod.Spec.NodeName, Container: cut, Status: cutStatus, Runtime: aRuntime, UID: uid,", + "\t\t\tContainerImageIdentifier: buildContainerImageSource(aPod.Spec.Containers[j].Image, cutStatus.ImageID)}", + "", + "\t\t// Warn if readiness probe did not succeeded yet.", + "\t\tif !cutStatus.Ready {", + "\t\t\tlog.Warn(\"Container %q is not ready yet.\", \u0026container)", + "\t\t}", + "", + "\t\t// Warn if container state is not running.", + "\t\tif state := \u0026cutStatus.State; state.Running == nil {", + "\t\t\treason := \"\"", + "\t\t\tswitch {", + "\t\t\tcase state.Waiting != nil:", + "\t\t\t\treason = \"waiting - \" + state.Waiting.Reason", + "\t\t\tcase state.Terminated != nil:", + "\t\t\t\treason = \"terminated - \" + state.Terminated.Reason", + "\t\t\tdefault:", + "\t\t\t\t// When no state was explicitly set, it's assumed to be in \"waiting state\".", + "\t\t\t\treason = \"waiting state reason unknown\"", + "\t\t\t}", + "", + "\t\t\tlog.Warn(\"Container %q is not running (reason: %s, restarts %d): some test cases might fail.\",", + "\t\t\t\t\u0026container, reason, cutStatus.RestartCount)", + "\t\t}", + "", + "\t\t// Build slices of containers based on whether or not we are \"ignoring\" them or not.", + "\t\tif useIgnoreList \u0026\u0026 container.HasIgnoredContainerName() {", + "\t\t\tcontinue", + "\t\t}", + "\t\tcontainerList = append(containerList, \u0026container)", + "\t}", + "\treturn containerList", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "isSkipHelmChart", + "kind": "function", + "source": [ + "func isSkipHelmChart(helmName string, skipHelmChartList []configuration.SkipHelmChartList) bool {", + "\tif len(skipHelmChartList) == 0 {", + "\t\treturn false", + "\t}", + "\tfor _, helm := range skipHelmChartList {", + "\t\tif helmName == helm.Name {", + "\t\t\tlog.Info(\"Helm chart with name %s was skipped\", helmName)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "updateCrUnderTest", + "kind": "function", + "source": [ + "func updateCrUnderTest(scaleCrUnderTest []autodiscover.ScaleObject) []ScaleObject {", + "\tvar scaleCrUndeTestTemp []ScaleObject", + "\tfor i := range scaleCrUnderTest {", + "\t\taNewScaleCrUnderTest := ScaleObject{Scale: CrScale{scaleCrUnderTest[i].Scale},", + "\t\t\tGroupResourceSchema: scaleCrUnderTest[i].GroupResourceSchema}", + "\t\tscaleCrUndeTestTemp = append(scaleCrUndeTestTemp, aNewScaleCrUnderTest)", + "\t}", + "\treturn scaleCrUndeTestTemp", + "}" + ] + }, + { + "name": "createOperators", + "kind": "function", + "source": [ + "func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion,", + "\tallSubscriptions []olmv1Alpha.Subscription,", + "\tallPackageManifests []*olmpkgv1.PackageManifest,", + "\tallInstallPlans []*olmv1Alpha.InstallPlan,", + "\tallCatalogSources []*olmv1Alpha.CatalogSource,", + "\tsucceededRequired,", + "\tkeepCsvDetails bool) []*Operator {", + "\tconst (", + "\t\tmaxSize = 2", + "\t)", + "", + "\toperators := []*Operator{}", + "", + "\t// Make map with unique csv names to original index in the env.Csvs slice.", + "\t// Otherwise, cluster-wide operators info will be repeated unnecessarily.", + "\tuniqueCsvs := getUniqueCsvListByName(csvs)", + "", + "\tfor _, csv := range uniqueCsvs {", + "\t\t// Skip CSVs that are not in the Succeeded phase if the flag is set.", + "\t\tif csv.Status.Phase != olmv1Alpha.CSVPhaseSucceeded \u0026\u0026 succeededRequired {", + "\t\t\tcontinue", + "\t\t}", + "\t\top := \u0026Operator{Name: csv.Name, Namespace: csv.Namespace}", + "\t\tif keepCsvDetails {", + "\t\t\top.Csv = csv", + "\t\t}", + "\t\top.Phase = csv.Status.Phase", + "\t\tpackageAndVersion := strings.SplitN(csv.Name, \".\", maxSize)", + "\t\tif len(packageAndVersion) == 0 {", + "\t\t\tlog.Debug(\"Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v\", csv)", + "\t\t\tcontinue", + "\t\t}", + "\t\top.PackageFromCsvName = packageAndVersion[0]", + "\t\top.Version = csv.Spec.Version.String()", + "\t\t// Get at least one subscription and update the Operator object with it.", + "\t\tif getAtLeastOneSubscription(op, csv, allSubscriptions, allPackageManifests) {", + "\t\t\ttargetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to get target namespaces for operator %s: %v\", csv.Name, err)", + "\t\t\t} else {", + "\t\t\t\top.TargetNamespaces = targetNamespaces", + "\t\t\t\top.IsClusterWide = len(targetNamespaces) == 0", + "\t\t\t}", + "\t\t} else {", + "\t\t\tlog.Warn(\"Subscription not found for CSV: %s (ns %s)\", csv.Name, csv.Namespace)", + "\t\t}", + "\t\tlog.Info(\"Getting installplans for op %s (subs %s ns %s)\", op.Name, op.SubscriptionName, op.SubscriptionNamespace)", + "\t\t// Get at least one Install Plan and update the Operator object with it.", + "\t\tgetAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources)", + "\t\toperators = append(operators, op)", + "\t}", + "\treturn operators", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "Pod.CreatedByDeploymentConfig", + "kind": "function", + "source": [ + "func (p *Pod) CreatedByDeploymentConfig() (bool, error) {", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, podOwner := range p.GetOwnerReferences() {", + "\t\tif podOwner.Kind == replicationController {", + "\t\t\treplicationControllers, err := oc.K8sClient.CoreV1().ReplicationControllers(p.Namespace).Get(context.TODO(), podOwner.Name, metav1.GetOptions{})", + "\t\t\tif err != nil {", + "\t\t\t\treturn false, err", + "\t\t\t}", + "\t\t\tfor _, rcOwner := range replicationControllers.GetOwnerReferences() {", + "\t\t\t\tif rcOwner.Name == podOwner.Name \u0026\u0026 rcOwner.Kind == deploymentConfig {", + "\t\t\t\t\treturn true, err", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Seconds", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Since", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + }, + { + "name": "createNodes", + "qualifiedName": "createNodes", + "exported": false, + "signature": "func([]corev1.Node)(map[string]Node)", + "doc": "createNodes Builds a mapping of node names to enriched node structures\n\nThe function iterates over supplied node objects, skipping machine\nconfiguration retrieval for non‑OpenShift clusters and logging warnings in\nthat case. For OpenShift nodes it extracts the current MachineConfig\nannotation, fetches or reuses the corresponding config, and attaches it to\nthe resulting Node wrapper. The returned map keys each node name to its\nenriched data structure.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:856", + "calls": [ + { + "name": "IsOCPCluster", + "kind": "function", + "source": [ + "func IsOCPCluster() bool {", + "\treturn env.OpenshiftVersion != autodiscover.NonOpenshiftClusterVersion", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "getMachineConfig", + "kind": "function", + "source": [ + "func getMachineConfig(mcName string, machineConfigs map[string]MachineConfig) (MachineConfig, error) {", + "\tclient := clientsholder.GetClientsHolder()", + "", + "\t// Check whether we had already downloaded and parsed that machineConfig resource.", + "\tif mc, exists := machineConfigs[mcName]; exists {", + "\t\treturn mc, nil", + "\t}", + "", + "\tnodeMc, err := client.MachineCfg.MachineconfigurationV1().MachineConfigs().Get(context.TODO(), mcName, metav1.GetOptions{})", + "\tif err != nil {", + "\t\treturn MachineConfig{}, err", + "\t}", + "", + "\tmc := MachineConfig{", + "\t\tMachineConfig: nodeMc,", + "\t}", + "", + "\terr = json.Unmarshal(nodeMc.Spec.Config.Raw, \u0026mc.Config)", + "\tif err != nil {", + "\t\treturn MachineConfig{}, fmt.Errorf(\"failed to unmarshal mc's Config field, err: %v\", err)", + "\t}", + "", + "\treturn mc, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createNodes(nodes []corev1.Node) map[string]Node {", + "\twrapperNodes := map[string]Node{}", + "", + "\t// machineConfigs is a helper map to avoid download \u0026 process the same mc twice.", + "\tmachineConfigs := map[string]MachineConfig{}", + "\tfor i := range nodes {", + "\t\tnode := \u0026nodes[i]", + "", + "\t\tif !IsOCPCluster() {", + "\t\t\t// Avoid getting Mc info for non ocp clusters.", + "\t\t\twrapperNodes[node.Name] = Node{Data: node}", + "\t\t\tlog.Warn(\"Non-OCP cluster detected. MachineConfig retrieval for node %q skipped.\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Get Node's machineConfig name", + "\t\tmcName, exists := node.Annotations[\"machineconfiguration.openshift.io/currentConfig\"]", + "\t\tif !exists {", + "\t\t\tlog.Error(\"Failed to get machineConfig name for node %q\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "\t\tlog.Info(\"Node %q - mc name %q\", node.Name, mcName)", + "\t\tmc, err := getMachineConfig(mcName, machineConfigs)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get machineConfig %q, err: %v\", mcName, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\twrapperNodes[node.Name] = Node{", + "\t\t\tData: node,", + "\t\t\tMc: mc,", + "\t\t}", + "\t}", + "", + "\treturn wrapperNodes", + "}" + ] + }, + { + "name": "createOperators", + "qualifiedName": "createOperators", + "exported": false, + "signature": "func([]*olmv1Alpha.ClusterServiceVersion, []olmv1Alpha.Subscription, []*olmpkgv1.PackageManifest, []*olmv1Alpha.InstallPlan, []*olmv1Alpha.CatalogSource, bool, bool)([]*Operator)", + "doc": "createOperators Creates a list of operator objects from CSV data\n\nThe function iterates over unique cluster service versions, filters out\nfailed ones if required, and builds an Operator struct for each. It extracts\npackage and version information from the CSV name, associates at least one\nsubscription to determine target namespaces, and gathers install plans linked\nto the CSV. The resulting slice contains operators enriched with phase,\nnamespace, and optional CSV details.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:195", + "calls": [ + { + "name": "getUniqueCsvListByName", + "kind": "function", + "source": [ + "func getUniqueCsvListByName(csvs []*olmv1Alpha.ClusterServiceVersion) []*olmv1Alpha.ClusterServiceVersion {", + "\tuniqueCsvsMap := map[string]*olmv1Alpha.ClusterServiceVersion{}", + "\tfor _, csv := range csvs {", + "\t\tuniqueCsvsMap[csv.Name] = csv", + "\t}", + "", + "\tuniqueCsvsList := []*olmv1Alpha.ClusterServiceVersion{}", + "\tlog.Info(\"Found %d unique CSVs\", len(uniqueCsvsMap))", + "\tfor name, csv := range uniqueCsvsMap {", + "\t\tlog.Info(\" CSV: %s\", name)", + "\t\tuniqueCsvsList = append(uniqueCsvsList, csv)", + "\t}", + "", + "\t// Sort by name: (1) creates a deterministic output, (2) makes UT easier.", + "\tsort.Slice(uniqueCsvsList, func(i, j int) bool { return uniqueCsvsList[i].Name \u003c uniqueCsvsList[j].Name })", + "\treturn uniqueCsvsList", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "SplitN", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "getAtLeastOneSubscription", + "kind": "function", + "source": [ + "func getAtLeastOneSubscription(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, subscriptions []olmv1Alpha.Subscription, packageManifests []*olmpkgv1.PackageManifest) (atLeastOneSubscription bool) {", + "\tatLeastOneSubscription = false", + "\tfor s := range subscriptions {", + "\t\tsubscription := \u0026subscriptions[s]", + "\t\tif subscription.Status.InstalledCSV != csv.Name {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\top.SubscriptionName = subscription.Name", + "\t\top.SubscriptionNamespace = subscription.Namespace", + "\t\top.Package = subscription.Spec.Package", + "\t\top.Org = subscription.Spec.CatalogSource", + "\t\top.Channel = subscription.Spec.Channel", + "\t\tatLeastOneSubscription = true", + "", + "\t\t// If the channel is not present in the subscription, get the default channel from the package manifest", + "\t\tif op.Channel == \"\" {", + "\t\t\taPackageManifest := getPackageManifestWithSubscription(subscription, packageManifests)", + "\t\t\tif aPackageManifest != nil {", + "\t\t\t\top.Channel = aPackageManifest.Status.DefaultChannel", + "\t\t\t} else {", + "\t\t\t\tlog.Error(\"Could not determine the default channel, this operator will always fail certification\")", + "\t\t\t}", + "\t\t}", + "\t\tbreak", + "\t}", + "\treturn atLeastOneSubscription", + "}" + ] + }, + { + "name": "getOperatorTargetNamespaces", + "kind": "function", + "source": [ + "func getOperatorTargetNamespaces(namespace string) ([]string, error) {", + "\tclient := clientsholder.GetClientsHolder()", + "", + "\tlist, err := client.OlmClient.OperatorsV1().OperatorGroups(namespace).List(", + "\t\tcontext.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif len(list.Items) == 0 {", + "\t\treturn nil, errors.New(\"no OperatorGroup found\")", + "\t}", + "", + "\treturn list.Items[0].Spec.TargetNamespaces, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "getAtLeastOneInstallPlan", + "kind": "function", + "source": [ + "func getAtLeastOneInstallPlan(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, allInstallPlans []*olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource) (atLeastOneInstallPlan bool) {", + "\tatLeastOneInstallPlan = false", + "\tfor _, installPlan := range allInstallPlans {", + "\t\tif installPlan.Namespace != op.SubscriptionNamespace {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If the install plan does not deploys this CSV, check the next one", + "\t\tif !getAtLeastOneCsv(csv, installPlan) {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tindexImage, catalogErr := getCatalogSourceImageIndexFromInstallPlan(installPlan, allCatalogSources)", + "\t\tif catalogErr != nil {", + "\t\t\tlog.Debug(\"failed to get installPlan image index for csv %s (ns %s) installPlan %s, err: %v\",", + "\t\t\t\tcsv.Name, csv.Namespace, installPlan.Name, catalogErr)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\top.InstallPlans = append(op.InstallPlans, CsvInstallPlan{", + "\t\t\tName: installPlan.Name,", + "\t\t\tBundleImage: installPlan.Status.BundleLookups[0].Path,", + "\t\t\tIndexImage: indexImage,", + "\t\t})", + "\t\tatLeastOneInstallPlan = true", + "\t}", + "\treturn atLeastOneInstallPlan", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion,", + "\tallSubscriptions []olmv1Alpha.Subscription,", + "\tallPackageManifests []*olmpkgv1.PackageManifest,", + "\tallInstallPlans []*olmv1Alpha.InstallPlan,", + "\tallCatalogSources []*olmv1Alpha.CatalogSource,", + "\tsucceededRequired,", + "\tkeepCsvDetails bool) []*Operator {", + "\tconst (", + "\t\tmaxSize = 2", + "\t)", + "", + "\toperators := []*Operator{}", + "", + "\t// Make map with unique csv names to original index in the env.Csvs slice.", + "\t// Otherwise, cluster-wide operators info will be repeated unnecessarily.", + "\tuniqueCsvs := getUniqueCsvListByName(csvs)", + "", + "\tfor _, csv := range uniqueCsvs {", + "\t\t// Skip CSVs that are not in the Succeeded phase if the flag is set.", + "\t\tif csv.Status.Phase != olmv1Alpha.CSVPhaseSucceeded \u0026\u0026 succeededRequired {", + "\t\t\tcontinue", + "\t\t}", + "\t\top := \u0026Operator{Name: csv.Name, Namespace: csv.Namespace}", + "\t\tif keepCsvDetails {", + "\t\t\top.Csv = csv", + "\t\t}", + "\t\top.Phase = csv.Status.Phase", + "\t\tpackageAndVersion := strings.SplitN(csv.Name, \".\", maxSize)", + "\t\tif len(packageAndVersion) == 0 {", + "\t\t\tlog.Debug(\"Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v\", csv)", + "\t\t\tcontinue", + "\t\t}", + "\t\top.PackageFromCsvName = packageAndVersion[0]", + "\t\top.Version = csv.Spec.Version.String()", + "\t\t// Get at least one subscription and update the Operator object with it.", + "\t\tif getAtLeastOneSubscription(op, csv, allSubscriptions, allPackageManifests) {", + "\t\t\ttargetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to get target namespaces for operator %s: %v\", csv.Name, err)", + "\t\t\t} else {", + "\t\t\t\top.TargetNamespaces = targetNamespaces", + "\t\t\t\top.IsClusterWide = len(targetNamespaces) == 0", + "\t\t\t}", + "\t\t} else {", + "\t\t\tlog.Warn(\"Subscription not found for CSV: %s (ns %s)\", csv.Name, csv.Namespace)", + "\t\t}", + "\t\tlog.Info(\"Getting installplans for op %s (subs %s ns %s)\", op.Name, op.SubscriptionName, op.SubscriptionNamespace)", + "\t\t// Get at least one Install Plan and update the Operator object with it.", + "\t\tgetAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources)", + "\t\toperators = append(operators, op)", + "\t}", + "\treturn operators", + "}" + ] + }, + { + "name": "deployDaemonSet", + "qualifiedName": "deployDaemonSet", + "exported": false, + "signature": "func(string)(error)", + "doc": "deployDaemonSet Deploys the privileged probe daemonset\n\nThis function first configures a Kubernetes client for privileged daemonset\noperations and checks whether the target daemonset is already running with\nthe correct image. If it is not ready, it creates the daemonset using the\nspecified image and resource limits from configuration parameters. After\ncreation, it waits until all pods of the daemonset are ready or times out,\nreturning an error if any step fails.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:269", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/privileged-daemonset", + "name": "SetDaemonSetClient", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/privileged-daemonset", + "name": "IsDaemonSetReady", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/privileged-daemonset", + "name": "CreateDaemonSet", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/privileged-daemonset", + "name": "WaitDaemonsetReady", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func deployDaemonSet(namespace string) error {", + "\tk8sPrivilegedDs.SetDaemonSetClient(clientsholder.GetClientsHolder().K8sClient)", + "", + "\tdsImage := env.params.CertSuiteProbeImage", + "\tif k8sPrivilegedDs.IsDaemonSetReady(DaemonSetName, namespace, dsImage) {", + "\t\treturn nil", + "\t}", + "", + "\tmatchLabels := make(map[string]string)", + "\tmatchLabels[\"name\"] = DaemonSetName", + "\tmatchLabels[\"redhat-best-practices-for-k8s.com/app\"] = DaemonSetName", + "\t_, err := k8sPrivilegedDs.CreateDaemonSet(DaemonSetName, namespace, containerName, dsImage, matchLabels, probePodsTimeout,", + "\t\tconfiguration.GetTestParameters().DaemonsetCPUReq,", + "\t\tconfiguration.GetTestParameters().DaemonsetCPULim,", + "\t\tconfiguration.GetTestParameters().DaemonsetMemReq,", + "\t\tconfiguration.GetTestParameters().DaemonsetMemLim,", + "\t\tcorev1.PullIfNotPresent,", + "\t)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not deploy certsuite daemonset, err=%v\", err)", + "\t}", + "\terr = k8sPrivilegedDs.WaitDaemonsetReady(namespace, DaemonSetName, probePodsTimeout)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"timed out waiting for certsuite daemonset, err=%v\", err)", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "filterDPDKRunningPods", + "qualifiedName": "filterDPDKRunningPods", + "exported": false, + "signature": "func([]*Pod)([]*Pod)", + "doc": "filterDPDKRunningPods Filters pods that are running DPDK-enabled devices\n\nThis function examines a slice of pod objects, executing a command inside\neach container to locate the device file path specified by the pod’s Multus\nPCI annotation. If the output contains the string \"vfio-pci\", indicating the\npresence of a DPDK driver, the pod is added to a new list. The resulting\nslice contains only pods that have confirmed DPDK support.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:180", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetCPUPinningPodsWithDpdk", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetCPUPinningPodsWithDpdk() []*Pod {", + "\treturn filterDPDKRunningPods(env.GetGuaranteedPodsWithExclusiveCPUs())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func filterDPDKRunningPods(pods []*Pod) []*Pod {", + "\tvar filteredPods []*Pod", + "\tconst (", + "\t\tdpdkDriver = \"vfio-pci\"", + "\t\tfindDeviceSubCommand = \"find /sys -name\"", + "\t)", + "\to := clientsholder.GetClientsHolder()", + "\tfor _, pod := range pods {", + "\t\tif len(pod.MultusPCIs) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tctx := clientsholder.NewContext(pod.Namespace, pod.Name, pod.Spec.Containers[0].Name)", + "\t\tfindCommand := fmt.Sprintf(\"%s '%s'\", findDeviceSubCommand, pod.MultusPCIs[0])", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, findCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tlog.Error(\"Failed to execute command %s in probe %s, errStr: %s, err: %v\", findCommand, pod.String(), errStr, err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tif strings.Contains(outStr, dpdkDriver) {", + "\t\t\tfilteredPods = append(filteredPods, pod)", + "\t\t}", + "\t}", + "\treturn filteredPods", + "}" + ] + }, + { + "name": "filterPodsWithoutHostPID", + "qualifiedName": "filterPodsWithoutHostPID", + "exported": false, + "signature": "func([]*Pod)([]*Pod)", + "doc": "filterPodsWithoutHostPID filters out pods that enable HostPID\n\nThe function receives a slice of pod objects and iterates through each one,\nchecking whether the HostPID flag is set in the pod specification. Pods with\nthis flag enabled are skipped; all others are collected into a new slice. The\nresulting slice contains only those pods that do not use the host's PID\nnamespace.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:161", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID() []*Container {", + "\treturn getContainers(filterPodsWithoutHostPID(env.GetGuaranteedPodsWithExclusiveCPUs()))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID() []*Container {", + "\treturn getContainers(filterPodsWithoutHostPID(env.GetGuaranteedPodsWithIsolatedCPUs()))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetNonGuaranteedPodContainersWithoutHostPID() []*Container {", + "\treturn getContainers(filterPodsWithoutHostPID(env.GetNonGuaranteedPods()))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func filterPodsWithoutHostPID(pods []*Pod) []*Pod {", + "\tvar withoutHostPIDPods []*Pod", + "", + "\tfor _, pod := range pods {", + "\t\tif pod.Spec.HostPID {", + "\t\t\tcontinue", + "\t\t}", + "\t\twithoutHostPIDPods = append(withoutHostPIDPods, pod)", + "\t}", + "\treturn withoutHostPIDPods", + "}" + ] + }, + { + "name": "getAtLeastOneCsv", + "qualifiedName": "getAtLeastOneCsv", + "exported": false, + "signature": "func(*olmv1Alpha.ClusterServiceVersion, *olmv1Alpha.InstallPlan)(bool)", + "doc": "getAtLeastOneCsv Determines if an install plan includes a specific CSV\n\nThe function iterates through the names listed in the install plan’s\nspecification to see if it matches the provided CSV. If a match is found, it\nverifies that the install plan contains bundle lookup information; otherwise\nit logs a warning and skips that plan. It returns true when a matching CSV\nwith valid bundle lookups exists, false otherwise.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:311", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getAtLeastOneInstallPlan", + "kind": "function", + "source": [ + "func getAtLeastOneInstallPlan(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, allInstallPlans []*olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource) (atLeastOneInstallPlan bool) {", + "\tatLeastOneInstallPlan = false", + "\tfor _, installPlan := range allInstallPlans {", + "\t\tif installPlan.Namespace != op.SubscriptionNamespace {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If the install plan does not deploys this CSV, check the next one", + "\t\tif !getAtLeastOneCsv(csv, installPlan) {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tindexImage, catalogErr := getCatalogSourceImageIndexFromInstallPlan(installPlan, allCatalogSources)", + "\t\tif catalogErr != nil {", + "\t\t\tlog.Debug(\"failed to get installPlan image index for csv %s (ns %s) installPlan %s, err: %v\",", + "\t\t\t\tcsv.Name, csv.Namespace, installPlan.Name, catalogErr)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\top.InstallPlans = append(op.InstallPlans, CsvInstallPlan{", + "\t\t\tName: installPlan.Name,", + "\t\t\tBundleImage: installPlan.Status.BundleLookups[0].Path,", + "\t\t\tIndexImage: indexImage,", + "\t\t})", + "\t\tatLeastOneInstallPlan = true", + "\t}", + "\treturn atLeastOneInstallPlan", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getAtLeastOneCsv(csv *olmv1Alpha.ClusterServiceVersion, installPlan *olmv1Alpha.InstallPlan) (atLeastOneCsv bool) {", + "\tatLeastOneCsv = false", + "\tfor _, csvName := range installPlan.Spec.ClusterServiceVersionNames {", + "\t\tif csv.Name != csvName {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif installPlan.Status.BundleLookups == nil {", + "\t\t\tlog.Warn(\"InstallPlan %s for csv %s (ns %s) does not have bundle lookups. It will be skipped.\", installPlan.Name, csv.Name, csv.Namespace)", + "\t\t\tcontinue", + "\t\t}", + "\t\tatLeastOneCsv = true", + "\t\tbreak", + "\t}", + "\treturn atLeastOneCsv", + "}" + ] + }, + { + "name": "getAtLeastOneInstallPlan", + "qualifiedName": "getAtLeastOneInstallPlan", + "exported": false, + "signature": "func(*Operator, *olmv1Alpha.ClusterServiceVersion, []*olmv1Alpha.InstallPlan, []*olmv1Alpha.CatalogSource)(bool)", + "doc": "getAtLeastOneInstallPlan retrieves at least one install plan for an operator\n\nThis function iterates through all available install plans, filtering by\nnamespace and ensuring the plan includes the specified CSV. For each\nqualifying plan it extracts bundle and index image information from catalog\nsources. The install plan details are appended to the operator’s\nInstallPlans slice and a true flag is returned when at least one plan has\nbeen added.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:336", + "calls": [ + { + "name": "getAtLeastOneCsv", + "kind": "function", + "source": [ + "func getAtLeastOneCsv(csv *olmv1Alpha.ClusterServiceVersion, installPlan *olmv1Alpha.InstallPlan) (atLeastOneCsv bool) {", + "\tatLeastOneCsv = false", + "\tfor _, csvName := range installPlan.Spec.ClusterServiceVersionNames {", + "\t\tif csv.Name != csvName {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif installPlan.Status.BundleLookups == nil {", + "\t\t\tlog.Warn(\"InstallPlan %s for csv %s (ns %s) does not have bundle lookups. It will be skipped.\", installPlan.Name, csv.Name, csv.Namespace)", + "\t\t\tcontinue", + "\t\t}", + "\t\tatLeastOneCsv = true", + "\t\tbreak", + "\t}", + "\treturn atLeastOneCsv", + "}" + ] + }, + { + "name": "getCatalogSourceImageIndexFromInstallPlan", + "kind": "function", + "source": [ + "func getCatalogSourceImageIndexFromInstallPlan(installPlan *olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource) (string, error) {", + "\t// ToDo/Technical debt: what to do if installPlan has more than one BundleLookups entries.", + "\tcatalogSourceName := installPlan.Status.BundleLookups[0].CatalogSourceRef.Name", + "\tcatalogSourceNamespace := installPlan.Status.BundleLookups[0].CatalogSourceRef.Namespace", + "", + "\tfor _, s := range allCatalogSources {", + "\t\tif s.Namespace == catalogSourceNamespace \u0026\u0026 s.Name == catalogSourceName {", + "\t\t\treturn s.Spec.Image, nil", + "\t\t}", + "\t}", + "", + "\treturn \"\", fmt.Errorf(\"failed to get catalogsource: not found\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createOperators", + "kind": "function", + "source": [ + "func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion,", + "\tallSubscriptions []olmv1Alpha.Subscription,", + "\tallPackageManifests []*olmpkgv1.PackageManifest,", + "\tallInstallPlans []*olmv1Alpha.InstallPlan,", + "\tallCatalogSources []*olmv1Alpha.CatalogSource,", + "\tsucceededRequired,", + "\tkeepCsvDetails bool) []*Operator {", + "\tconst (", + "\t\tmaxSize = 2", + "\t)", + "", + "\toperators := []*Operator{}", + "", + "\t// Make map with unique csv names to original index in the env.Csvs slice.", + "\t// Otherwise, cluster-wide operators info will be repeated unnecessarily.", + "\tuniqueCsvs := getUniqueCsvListByName(csvs)", + "", + "\tfor _, csv := range uniqueCsvs {", + "\t\t// Skip CSVs that are not in the Succeeded phase if the flag is set.", + "\t\tif csv.Status.Phase != olmv1Alpha.CSVPhaseSucceeded \u0026\u0026 succeededRequired {", + "\t\t\tcontinue", + "\t\t}", + "\t\top := \u0026Operator{Name: csv.Name, Namespace: csv.Namespace}", + "\t\tif keepCsvDetails {", + "\t\t\top.Csv = csv", + "\t\t}", + "\t\top.Phase = csv.Status.Phase", + "\t\tpackageAndVersion := strings.SplitN(csv.Name, \".\", maxSize)", + "\t\tif len(packageAndVersion) == 0 {", + "\t\t\tlog.Debug(\"Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v\", csv)", + "\t\t\tcontinue", + "\t\t}", + "\t\top.PackageFromCsvName = packageAndVersion[0]", + "\t\top.Version = csv.Spec.Version.String()", + "\t\t// Get at least one subscription and update the Operator object with it.", + "\t\tif getAtLeastOneSubscription(op, csv, allSubscriptions, allPackageManifests) {", + "\t\t\ttargetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to get target namespaces for operator %s: %v\", csv.Name, err)", + "\t\t\t} else {", + "\t\t\t\top.TargetNamespaces = targetNamespaces", + "\t\t\t\top.IsClusterWide = len(targetNamespaces) == 0", + "\t\t\t}", + "\t\t} else {", + "\t\t\tlog.Warn(\"Subscription not found for CSV: %s (ns %s)\", csv.Name, csv.Namespace)", + "\t\t}", + "\t\tlog.Info(\"Getting installplans for op %s (subs %s ns %s)\", op.Name, op.SubscriptionName, op.SubscriptionNamespace)", + "\t\t// Get at least one Install Plan and update the Operator object with it.", + "\t\tgetAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources)", + "\t\toperators = append(operators, op)", + "\t}", + "\treturn operators", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getAtLeastOneInstallPlan(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, allInstallPlans []*olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource) (atLeastOneInstallPlan bool) {", + "\tatLeastOneInstallPlan = false", + "\tfor _, installPlan := range allInstallPlans {", + "\t\tif installPlan.Namespace != op.SubscriptionNamespace {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If the install plan does not deploys this CSV, check the next one", + "\t\tif !getAtLeastOneCsv(csv, installPlan) {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tindexImage, catalogErr := getCatalogSourceImageIndexFromInstallPlan(installPlan, allCatalogSources)", + "\t\tif catalogErr != nil {", + "\t\t\tlog.Debug(\"failed to get installPlan image index for csv %s (ns %s) installPlan %s, err: %v\",", + "\t\t\t\tcsv.Name, csv.Namespace, installPlan.Name, catalogErr)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\top.InstallPlans = append(op.InstallPlans, CsvInstallPlan{", + "\t\t\tName: installPlan.Name,", + "\t\t\tBundleImage: installPlan.Status.BundleLookups[0].Path,", + "\t\t\tIndexImage: indexImage,", + "\t\t})", + "\t\tatLeastOneInstallPlan = true", + "\t}", + "\treturn atLeastOneInstallPlan", + "}" + ] + }, + { + "name": "getAtLeastOneSubscription", + "qualifiedName": "getAtLeastOneSubscription", + "exported": false, + "signature": "func(*Operator, *olmv1Alpha.ClusterServiceVersion, []olmv1Alpha.Subscription, []*olmpkgv1.PackageManifest)(bool)", + "doc": "getAtLeastOneSubscription Finds a subscription linked to the given CSV and updates the operator record\n\nThe function scans through all subscriptions, matching one whose installed\nCSV name equals that of the provided CSV. When found, it populates the\noperator with subscription details such as name, namespace, package, catalog\nsource, and channel. If the channel is missing, it retrieves the default\nchannel from the related package manifest; otherwise it logs an error.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:256", + "calls": [ + { + "name": "getPackageManifestWithSubscription", + "kind": "function", + "source": [ + "func getPackageManifestWithSubscription(subscription *olmv1Alpha.Subscription, packageManifests []*olmpkgv1.PackageManifest) *olmpkgv1.PackageManifest {", + "\tfor index := range packageManifests {", + "\t\tif packageManifests[index].Status.PackageName == subscription.Spec.Package \u0026\u0026", + "\t\t\tpackageManifests[index].Namespace == subscription.Spec.CatalogSourceNamespace \u0026\u0026", + "\t\t\tpackageManifests[index].Status.CatalogSource == subscription.Spec.CatalogSource {", + "\t\t\treturn packageManifests[index]", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createOperators", + "kind": "function", + "source": [ + "func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion,", + "\tallSubscriptions []olmv1Alpha.Subscription,", + "\tallPackageManifests []*olmpkgv1.PackageManifest,", + "\tallInstallPlans []*olmv1Alpha.InstallPlan,", + "\tallCatalogSources []*olmv1Alpha.CatalogSource,", + "\tsucceededRequired,", + "\tkeepCsvDetails bool) []*Operator {", + "\tconst (", + "\t\tmaxSize = 2", + "\t)", + "", + "\toperators := []*Operator{}", + "", + "\t// Make map with unique csv names to original index in the env.Csvs slice.", + "\t// Otherwise, cluster-wide operators info will be repeated unnecessarily.", + "\tuniqueCsvs := getUniqueCsvListByName(csvs)", + "", + "\tfor _, csv := range uniqueCsvs {", + "\t\t// Skip CSVs that are not in the Succeeded phase if the flag is set.", + "\t\tif csv.Status.Phase != olmv1Alpha.CSVPhaseSucceeded \u0026\u0026 succeededRequired {", + "\t\t\tcontinue", + "\t\t}", + "\t\top := \u0026Operator{Name: csv.Name, Namespace: csv.Namespace}", + "\t\tif keepCsvDetails {", + "\t\t\top.Csv = csv", + "\t\t}", + "\t\top.Phase = csv.Status.Phase", + "\t\tpackageAndVersion := strings.SplitN(csv.Name, \".\", maxSize)", + "\t\tif len(packageAndVersion) == 0 {", + "\t\t\tlog.Debug(\"Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v\", csv)", + "\t\t\tcontinue", + "\t\t}", + "\t\top.PackageFromCsvName = packageAndVersion[0]", + "\t\top.Version = csv.Spec.Version.String()", + "\t\t// Get at least one subscription and update the Operator object with it.", + "\t\tif getAtLeastOneSubscription(op, csv, allSubscriptions, allPackageManifests) {", + "\t\t\ttargetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to get target namespaces for operator %s: %v\", csv.Name, err)", + "\t\t\t} else {", + "\t\t\t\top.TargetNamespaces = targetNamespaces", + "\t\t\t\top.IsClusterWide = len(targetNamespaces) == 0", + "\t\t\t}", + "\t\t} else {", + "\t\t\tlog.Warn(\"Subscription not found for CSV: %s (ns %s)\", csv.Name, csv.Namespace)", + "\t\t}", + "\t\tlog.Info(\"Getting installplans for op %s (subs %s ns %s)\", op.Name, op.SubscriptionName, op.SubscriptionNamespace)", + "\t\t// Get at least one Install Plan and update the Operator object with it.", + "\t\tgetAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources)", + "\t\toperators = append(operators, op)", + "\t}", + "\treturn operators", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getAtLeastOneSubscription(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, subscriptions []olmv1Alpha.Subscription, packageManifests []*olmpkgv1.PackageManifest) (atLeastOneSubscription bool) {", + "\tatLeastOneSubscription = false", + "\tfor s := range subscriptions {", + "\t\tsubscription := \u0026subscriptions[s]", + "\t\tif subscription.Status.InstalledCSV != csv.Name {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\top.SubscriptionName = subscription.Name", + "\t\top.SubscriptionNamespace = subscription.Namespace", + "\t\top.Package = subscription.Spec.Package", + "\t\top.Org = subscription.Spec.CatalogSource", + "\t\top.Channel = subscription.Spec.Channel", + "\t\tatLeastOneSubscription = true", + "", + "\t\t// If the channel is not present in the subscription, get the default channel from the package manifest", + "\t\tif op.Channel == \"\" {", + "\t\t\taPackageManifest := getPackageManifestWithSubscription(subscription, packageManifests)", + "\t\t\tif aPackageManifest != nil {", + "\t\t\t\top.Channel = aPackageManifest.Status.DefaultChannel", + "\t\t\t} else {", + "\t\t\t\tlog.Error(\"Could not determine the default channel, this operator will always fail certification\")", + "\t\t\t}", + "\t\t}", + "\t\tbreak", + "\t}", + "\treturn atLeastOneSubscription", + "}" + ] + }, + { + "name": "getCNCFNetworksNamesFromPodAnnotation", + "qualifiedName": "getCNCFNetworksNamesFromPodAnnotation", + "exported": false, + "signature": "func(string)([]string)", + "doc": "getCNCFNetworksNamesFromPodAnnotation Extracts network names from a pod's CNCF annotation\n\nThe function receives the raw value of the k8s.v1.cni.cncf.io/networks\nannotation, which can be either a comma‑separated list or a JSON array of\nobjects. It attempts to unmarshal the JSON; if that succeeds it collects the\n\"name\" field from each object. If unmarshalling fails, it falls back to\nsplitting the string on commas and trimming spaces, returning all non‑empty\nnames. The result is a slice of strings containing only the network\nidentifiers.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:345", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsUsingSRIOV", + "kind": "function", + "source": [ + "func (p *Pod) IsUsingSRIOV() (bool, error) {", + "\tconst (", + "\t\tcncfNetworksAnnotation = \"k8s.v1.cni.cncf.io/networks\"", + "\t)", + "", + "\tcncfNetworks, exist := p.Annotations[cncfNetworksAnnotation]", + "\tif !exist {", + "\t\treturn false, nil", + "\t}", + "", + "\t// Get all CNCF network names", + "\tcncfNetworkNames := getCNCFNetworksNamesFromPodAnnotation(cncfNetworks)", + "", + "\t// For each CNCF network, get its network attachment definition and check", + "\t// whether its config's type is \"sriov\"", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tfor _, networkName := range cncfNetworkNames {", + "\t\tlog.Debug(\"%s: Reviewing network-attachment definition %q\", p, networkName)", + "\t\tnad, err := oc.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(p.Namespace).Get(context.TODO(), networkName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to get NetworkAttachment %s: %v\", networkName, err)", + "\t\t}", + "", + "\t\tisSRIOV, err := isNetworkAttachmentDefinitionConfigTypeSRIOV(nad.Spec.Config)", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to know if network-attachment %s is sriov: %v\", networkName, err)", + "\t\t}", + "", + "\t\tlog.Debug(\"%s: NAD config: %s\", p, nad.Spec.Config)", + "\t\tif isSRIOV {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\treturn false, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsUsingSRIOVWithMTU", + "kind": "function", + "source": [ + "func (p *Pod) IsUsingSRIOVWithMTU() (bool, error) {", + "\tconst (", + "\t\tcncfNetworksAnnotation = \"k8s.v1.cni.cncf.io/networks\"", + "\t)", + "", + "\tcncfNetworks, exist := p.Annotations[cncfNetworksAnnotation]", + "\tif !exist {", + "\t\treturn false, nil", + "\t}", + "", + "\t// Get all CNCF network names", + "\tcncfNetworkNames := getCNCFNetworksNamesFromPodAnnotation(cncfNetworks)", + "", + "\t// For each CNCF network, get its network attachment definition and check", + "\t// whether its config's type is \"sriov\"", + "", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, networkName := range cncfNetworkNames {", + "\t\tlog.Debug(\"%s: Reviewing network-attachment definition %q\", p, networkName)", + "\t\tnad, err := oc.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(", + "\t\t\tp.Namespace).Get(context.TODO(), networkName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to get NetworkAttachment %s: %v\", networkName, err)", + "\t\t}", + "", + "\t\t// If the network-status annotation is not set, let's check the SriovNetwork/SriovNetworkNodePolicy CRs", + "\t\t// to see if the MTU is set there.", + "\t\tlog.Debug(\"Number of SriovNetworks: %d\", len(env.AllSriovNetworks))", + "\t\tlog.Debug(\"Number of SriovNetworkNodePolicies: %d\", len(env.AllSriovNetworkNodePolicies))", + "\t\tif sriovNetworkUsesMTU(env.AllSriovNetworks, env.AllSriovNetworkNodePolicies, nad.Name) {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\treturn false, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getCNCFNetworksNamesFromPodAnnotation(networksAnnotation string) []string {", + "\t// Each CNCF network has many more fields, but here we only need to unmarshal the name.", + "\t// See https://github.com/k8snetworkplumbingwg/multus-cni/blob/e692127d19623c8bdfc4d391224ea542658b584c/pkg/types/types.go#L127", + "\ttype CNCFNetwork struct {", + "\t\tName string `json:\"name\"`", + "\t}", + "", + "\tnetworkObjects := []CNCFNetwork{}", + "\tnetworkNames := []string{}", + "", + "\t// Let's start trying to unmarshal a json array of objects.", + "\t// We will not care about bad-formatted/invalid annotation value. If that's the case,", + "\t// the pod wouldn't have been deployed or wouldn't be in running state.", + "\tif err := json.Unmarshal([]byte(networksAnnotation), \u0026networkObjects); err == nil {", + "\t\tfor _, network := range networkObjects {", + "\t\t\tnetworkNames = append(networkNames, network.Name)", + "\t\t}", + "\t\treturn networkNames", + "\t}", + "", + "\t// If the previous unmarshalling didn't work, let's try with parsing the comma separated names list.", + "\tnetworks := strings.TrimSpace(networksAnnotation)", + "", + "\t// First, avoid empty strings (unlikely).", + "\tif networks == \"\" {", + "\t\treturn []string{}", + "\t}", + "", + "\tfor _, networkName := range strings.Split(networks, \",\") {", + "\t\tnetworkNames = append(networkNames, strings.TrimSpace(networkName))", + "\t}", + "\treturn networkNames", + "}" + ] + }, + { + "name": "getCatalogSourceBundleCountFromPackageManifests", + "qualifiedName": "getCatalogSourceBundleCountFromPackageManifests", + "exported": false, + "signature": "func(*TestEnvironment, *olmv1Alpha.CatalogSource)(int)", + "doc": "getCatalogSourceBundleCountFromPackageManifests Counts bundles from package manifests linked to a catalog source\n\nIt iterates over all known package manifests in the test environment, filters\nthose that belong to the specified catalog source by name and namespace, then\nsums the number of entries across every channel for each matching manifest.\nThe total count is returned as an integer representing how many bundles are\navailable via the manifests.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/catalogsources.go:114", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func GetCatalogSourceBundleCount(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// Now that we know the catalog source, we are going to count up all of the relatedImages", + "\t// that are associated with the catalog source. This will give us the number of bundles that", + "\t// are available in the catalog source.", + "", + "\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count", + "\tconst (", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn 0", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\treturn getCatalogSourceBundleCountFromProbeContainer(env, cs)", + "\t\t}", + "", + "\t\t// If we didn't find the bundle count via the probe container, we can attempt to use the package manifests", + "\t}", + "", + "\t// If we didn't find the bundle count via the probe container, we can use the package manifests", + "\t// to get the bundle count", + "\treturn getCatalogSourceBundleCountFromPackageManifests(env, cs)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getCatalogSourceBundleCountFromPackageManifests(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\ttotalRelatedBundles := 0", + "\tfor _, pm := range env.AllPackageManifests {", + "\t\t// Skip if the package manifest is not associated with the catalog source", + "\t\tif pm.Status.CatalogSource != cs.Name || pm.Status.CatalogSourceNamespace != cs.Namespace {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Count up the number of related bundles", + "\t\tfor c := range pm.Status.Channels {", + "\t\t\ttotalRelatedBundles += len(pm.Status.Channels[c].Entries)", + "\t\t}", + "\t}", + "", + "\treturn totalRelatedBundles", + "}" + ] + }, + { + "name": "getCatalogSourceBundleCountFromProbeContainer", + "qualifiedName": "getCatalogSourceBundleCountFromProbeContainer", + "exported": false, + "signature": "func(*TestEnvironment, *olmv1Alpha.CatalogSource)(int)", + "doc": "getCatalogSourceBundleCountFromProbeContainer retrieves the number of bundles for a catalog source via probe container\n\nThe function locates the service linked to the given catalog source, then\nruns a grpcurl command inside each available probe pod to list registry\nbundles. It parses the output into an integer and returns that count. If no\nmatching service or probe pod yields a valid result, it logs a warning and\nreturns -1.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/catalogsources.go:61", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "Pod.String", + "kind": "function", + "source": [ + "func (p *Pod) String() string {", + "\treturn fmt.Sprintf(\"pod: %s ns: %s\",", + "\t\tp.Name,", + "\t\tp.Namespace,", + "\t)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Trim", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func GetCatalogSourceBundleCount(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// Now that we know the catalog source, we are going to count up all of the relatedImages", + "\t// that are associated with the catalog source. This will give us the number of bundles that", + "\t// are available in the catalog source.", + "", + "\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count", + "\tconst (", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn 0", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\treturn getCatalogSourceBundleCountFromProbeContainer(env, cs)", + "\t\t}", + "", + "\t\t// If we didn't find the bundle count via the probe container, we can attempt to use the package manifests", + "\t}", + "", + "\t// If we didn't find the bundle count via the probe container, we can use the package manifests", + "\t// to get the bundle count", + "\treturn getCatalogSourceBundleCountFromPackageManifests(env, cs)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getCatalogSourceBundleCountFromProbeContainer(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// We need to use the probe container to get the bundle count", + "\t// This is because the package manifests are not available in the cluster", + "\t// for OCP versions \u003c= 4.12", + "\to := clientsholder.GetClientsHolder()", + "", + "\t// Find the kubernetes service associated with the catalog source", + "\tfor _, svc := range env.AllServices {", + "\t\t// Skip if the service is not associated with the catalog source", + "\t\tif svc.Spec.Selector[\"olm.catalogSource\"] != cs.Name {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tlog.Info(\"Found service %q associated with catalog source %q.\", svc.Name, cs.Name)", + "", + "\t\t// Use a probe pod to get the bundle count", + "\t\tfor _, probePod := range env.ProbePods {", + "\t\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\t\tcmd := \"grpcurl -plaintext \" + svc.Spec.ClusterIP + \":50051 api.Registry.ListBundles | jq -s 'length'\"", + "\t\t\tcmdValue, errStr, err := o.ExecCommandContainer(ctx, cmd)", + "\t\t\tif err != nil || errStr != \"\" {", + "\t\t\t\tlog.Error(\"Failed to execute command %s in probe pod %s\", cmd, probePod.String())", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Sanitize the command output", + "\t\t\tcmdValue = strings.TrimSpace(cmdValue)", + "\t\t\tcmdValue = strings.Trim(cmdValue, \"\\\"\")", + "", + "\t\t\t// Parse the command output", + "\t\t\tbundleCount, err := strconv.Atoi(cmdValue)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to convert bundle count to integer: %s\", cmdValue)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Try each probe pod until we get a valid bundle count (which should only be 1 probe pod)", + "\t\t\tlog.Info(\"Found bundle count via grpcurl %d for catalog source %q.\", bundleCount, cs.Name)", + "\t\t\treturn bundleCount", + "\t\t}", + "\t}", + "", + "\tlog.Warn(\"Warning: No services found associated with catalog source %q.\", cs.Name)", + "\treturn -1", + "}" + ] + }, + { + "name": "getCatalogSourceImageIndexFromInstallPlan", + "qualifiedName": "getCatalogSourceImageIndexFromInstallPlan", + "exported": false, + "signature": "func(*olmv1Alpha.InstallPlan, []*olmv1Alpha.CatalogSource)(string, error)", + "doc": "getCatalogSourceImageIndexFromInstallPlan retrieves the image index of a catalog source referenced by an install plan\n\nThe function takes an install plan and a list of catalog sources, finds the\ncatalog source referenced in the first bundle lookup, and returns its image\nfield. If no matching catalog source is found it reports an error. The\nreturned string is used elsewhere to identify the index image for a CSV.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:409", + "calls": [ + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getAtLeastOneInstallPlan", + "kind": "function", + "source": [ + "func getAtLeastOneInstallPlan(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, allInstallPlans []*olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource) (atLeastOneInstallPlan bool) {", + "\tatLeastOneInstallPlan = false", + "\tfor _, installPlan := range allInstallPlans {", + "\t\tif installPlan.Namespace != op.SubscriptionNamespace {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// If the install plan does not deploys this CSV, check the next one", + "\t\tif !getAtLeastOneCsv(csv, installPlan) {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tindexImage, catalogErr := getCatalogSourceImageIndexFromInstallPlan(installPlan, allCatalogSources)", + "\t\tif catalogErr != nil {", + "\t\t\tlog.Debug(\"failed to get installPlan image index for csv %s (ns %s) installPlan %s, err: %v\",", + "\t\t\t\tcsv.Name, csv.Namespace, installPlan.Name, catalogErr)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\top.InstallPlans = append(op.InstallPlans, CsvInstallPlan{", + "\t\t\tName: installPlan.Name,", + "\t\t\tBundleImage: installPlan.Status.BundleLookups[0].Path,", + "\t\t\tIndexImage: indexImage,", + "\t\t})", + "\t\tatLeastOneInstallPlan = true", + "\t}", + "\treturn atLeastOneInstallPlan", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getCatalogSourceImageIndexFromInstallPlan(installPlan *olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource) (string, error) {", + "\t// ToDo/Technical debt: what to do if installPlan has more than one BundleLookups entries.", + "\tcatalogSourceName := installPlan.Status.BundleLookups[0].CatalogSourceRef.Name", + "\tcatalogSourceNamespace := installPlan.Status.BundleLookups[0].CatalogSourceRef.Namespace", + "", + "\tfor _, s := range allCatalogSources {", + "\t\tif s.Namespace == catalogSourceNamespace \u0026\u0026 s.Name == catalogSourceName {", + "\t\t\treturn s.Spec.Image, nil", + "\t\t}", + "\t}", + "", + "\treturn \"\", fmt.Errorf(\"failed to get catalogsource: not found\")", + "}" + ] + }, + { + "name": "getContainers", + "qualifiedName": "getContainers", + "exported": false, + "signature": "func([]*Pod)([]*Container)", + "doc": "getContainers collects all containers from a list of pods\n\nThe function iterates over each pod in the provided slice, appending every\ncontainer within those pods to a new slice. It returns this aggregated slice,\nallowing callers to work with a flat list of containers regardless of their\noriginating pod.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/filters.go:248", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUs", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUs() []*Container {", + "\treturn getContainers(env.GetGuaranteedPodsWithExclusiveCPUs())", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID() []*Container {", + "\treturn getContainers(filterPodsWithoutHostPID(env.GetGuaranteedPodsWithExclusiveCPUs()))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID() []*Container {", + "\treturn getContainers(filterPodsWithoutHostPID(env.GetGuaranteedPodsWithIsolatedCPUs()))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID", + "kind": "function", + "source": [ + "func (env *TestEnvironment) GetNonGuaranteedPodContainersWithoutHostPID() []*Container {", + "\treturn getContainers(filterPodsWithoutHostPID(env.GetNonGuaranteedPods()))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getContainers(pods []*Pod) []*Container {", + "\tvar containers []*Container", + "", + "\tfor _, pod := range pods {", + "\t\tcontainers = append(containers, pod.Containers...)", + "\t}", + "\treturn containers", + "}" + ] + }, + { + "name": "getMachineConfig", + "qualifiedName": "getMachineConfig", + "exported": false, + "signature": "func(string, map[string]MachineConfig)(MachineConfig, error)", + "doc": "getMachineConfig Retrieves a machine configuration by name, using caching\n\nThe function first checks an in-memory map for the requested configuration;\nif present it returns it immediately. Otherwise it queries the Kubernetes API\nfor the MachineConfig resource, decodes its raw YAML into a Go struct, and\nstores the result for future calls. Errors from fetching or unmarshalling are\npropagated to the caller.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:823", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "Get", + "kind": "function" + }, + { + "name": "MachineConfigs", + "kind": "function" + }, + { + "name": "MachineconfigurationV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createNodes", + "kind": "function", + "source": [ + "func createNodes(nodes []corev1.Node) map[string]Node {", + "\twrapperNodes := map[string]Node{}", + "", + "\t// machineConfigs is a helper map to avoid download \u0026 process the same mc twice.", + "\tmachineConfigs := map[string]MachineConfig{}", + "\tfor i := range nodes {", + "\t\tnode := \u0026nodes[i]", + "", + "\t\tif !IsOCPCluster() {", + "\t\t\t// Avoid getting Mc info for non ocp clusters.", + "\t\t\twrapperNodes[node.Name] = Node{Data: node}", + "\t\t\tlog.Warn(\"Non-OCP cluster detected. MachineConfig retrieval for node %q skipped.\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Get Node's machineConfig name", + "\t\tmcName, exists := node.Annotations[\"machineconfiguration.openshift.io/currentConfig\"]", + "\t\tif !exists {", + "\t\t\tlog.Error(\"Failed to get machineConfig name for node %q\", node.Name)", + "\t\t\tcontinue", + "\t\t}", + "\t\tlog.Info(\"Node %q - mc name %q\", node.Name, mcName)", + "\t\tmc, err := getMachineConfig(mcName, machineConfigs)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get machineConfig %q, err: %v\", mcName, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\twrapperNodes[node.Name] = Node{", + "\t\t\tData: node,", + "\t\t\tMc: mc,", + "\t\t}", + "\t}", + "", + "\treturn wrapperNodes", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getMachineConfig(mcName string, machineConfigs map[string]MachineConfig) (MachineConfig, error) {", + "\tclient := clientsholder.GetClientsHolder()", + "", + "\t// Check whether we had already downloaded and parsed that machineConfig resource.", + "\tif mc, exists := machineConfigs[mcName]; exists {", + "\t\treturn mc, nil", + "\t}", + "", + "\tnodeMc, err := client.MachineCfg.MachineconfigurationV1().MachineConfigs().Get(context.TODO(), mcName, metav1.GetOptions{})", + "\tif err != nil {", + "\t\treturn MachineConfig{}, err", + "\t}", + "", + "\tmc := MachineConfig{", + "\t\tMachineConfig: nodeMc,", + "\t}", + "", + "\terr = json.Unmarshal(nodeMc.Spec.Config.Raw, \u0026mc.Config)", + "\tif err != nil {", + "\t\treturn MachineConfig{}, fmt.Errorf(\"failed to unmarshal mc's Config field, err: %v\", err)", + "\t}", + "", + "\treturn mc, nil", + "}" + ] + }, + { + "name": "getOperatorTargetNamespaces", + "qualifiedName": "getOperatorTargetNamespaces", + "exported": false, + "signature": "func(string)([]string, error)", + "doc": "getOperatorTargetNamespaces Retrieves the list of namespaces an operator targets\n\nThe function queries the Operator Group resource within a specified namespace\nto determine which namespaces the operator is allowed to operate in. It\nreturns a slice of target namespace names and an error if no OperatorGroup\nexists or if the API call fails.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:429", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "OperatorGroups", + "kind": "function" + }, + { + "name": "OperatorsV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "errors", + "name": "New", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createOperators", + "kind": "function", + "source": [ + "func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion,", + "\tallSubscriptions []olmv1Alpha.Subscription,", + "\tallPackageManifests []*olmpkgv1.PackageManifest,", + "\tallInstallPlans []*olmv1Alpha.InstallPlan,", + "\tallCatalogSources []*olmv1Alpha.CatalogSource,", + "\tsucceededRequired,", + "\tkeepCsvDetails bool) []*Operator {", + "\tconst (", + "\t\tmaxSize = 2", + "\t)", + "", + "\toperators := []*Operator{}", + "", + "\t// Make map with unique csv names to original index in the env.Csvs slice.", + "\t// Otherwise, cluster-wide operators info will be repeated unnecessarily.", + "\tuniqueCsvs := getUniqueCsvListByName(csvs)", + "", + "\tfor _, csv := range uniqueCsvs {", + "\t\t// Skip CSVs that are not in the Succeeded phase if the flag is set.", + "\t\tif csv.Status.Phase != olmv1Alpha.CSVPhaseSucceeded \u0026\u0026 succeededRequired {", + "\t\t\tcontinue", + "\t\t}", + "\t\top := \u0026Operator{Name: csv.Name, Namespace: csv.Namespace}", + "\t\tif keepCsvDetails {", + "\t\t\top.Csv = csv", + "\t\t}", + "\t\top.Phase = csv.Status.Phase", + "\t\tpackageAndVersion := strings.SplitN(csv.Name, \".\", maxSize)", + "\t\tif len(packageAndVersion) == 0 {", + "\t\t\tlog.Debug(\"Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v\", csv)", + "\t\t\tcontinue", + "\t\t}", + "\t\top.PackageFromCsvName = packageAndVersion[0]", + "\t\top.Version = csv.Spec.Version.String()", + "\t\t// Get at least one subscription and update the Operator object with it.", + "\t\tif getAtLeastOneSubscription(op, csv, allSubscriptions, allPackageManifests) {", + "\t\t\ttargetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to get target namespaces for operator %s: %v\", csv.Name, err)", + "\t\t\t} else {", + "\t\t\t\top.TargetNamespaces = targetNamespaces", + "\t\t\t\top.IsClusterWide = len(targetNamespaces) == 0", + "\t\t\t}", + "\t\t} else {", + "\t\t\tlog.Warn(\"Subscription not found for CSV: %s (ns %s)\", csv.Name, csv.Namespace)", + "\t\t}", + "\t\tlog.Info(\"Getting installplans for op %s (subs %s ns %s)\", op.Name, op.SubscriptionName, op.SubscriptionNamespace)", + "\t\t// Get at least one Install Plan and update the Operator object with it.", + "\t\tgetAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources)", + "\t\toperators = append(operators, op)", + "\t}", + "\treturn operators", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getOperatorTargetNamespaces(namespace string) ([]string, error) {", + "\tclient := clientsholder.GetClientsHolder()", + "", + "\tlist, err := client.OlmClient.OperatorsV1().OperatorGroups(namespace).List(", + "\t\tcontext.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\treturn nil, err", + "\t}", + "", + "\tif len(list.Items) == 0 {", + "\t\treturn nil, errors.New(\"no OperatorGroup found\")", + "\t}", + "", + "\treturn list.Items[0].Spec.TargetNamespaces, nil", + "}" + ] + }, + { + "name": "getPackageManifestWithSubscription", + "qualifiedName": "getPackageManifestWithSubscription", + "exported": false, + "signature": "func(*olmv1Alpha.Subscription, []*olmpkgv1.PackageManifest)(*olmpkgv1.PackageManifest)", + "doc": "getPackageManifestWithSubscription Finds a matching package manifest for a subscription\n\nThe function iterates over the provided package manifests, checking whether\neach one matches the subscription’s package name, catalog source namespace,\nand catalog source. If a match is found, that package manifest is returned;\notherwise the function returns nil. This lookup assists in determining\ndefault channel information when it is not explicitly set in the\nsubscription.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:293", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "getAtLeastOneSubscription", + "kind": "function", + "source": [ + "func getAtLeastOneSubscription(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, subscriptions []olmv1Alpha.Subscription, packageManifests []*olmpkgv1.PackageManifest) (atLeastOneSubscription bool) {", + "\tatLeastOneSubscription = false", + "\tfor s := range subscriptions {", + "\t\tsubscription := \u0026subscriptions[s]", + "\t\tif subscription.Status.InstalledCSV != csv.Name {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\top.SubscriptionName = subscription.Name", + "\t\top.SubscriptionNamespace = subscription.Namespace", + "\t\top.Package = subscription.Spec.Package", + "\t\top.Org = subscription.Spec.CatalogSource", + "\t\top.Channel = subscription.Spec.Channel", + "\t\tatLeastOneSubscription = true", + "", + "\t\t// If the channel is not present in the subscription, get the default channel from the package manifest", + "\t\tif op.Channel == \"\" {", + "\t\t\taPackageManifest := getPackageManifestWithSubscription(subscription, packageManifests)", + "\t\t\tif aPackageManifest != nil {", + "\t\t\t\top.Channel = aPackageManifest.Status.DefaultChannel", + "\t\t\t} else {", + "\t\t\t\tlog.Error(\"Could not determine the default channel, this operator will always fail certification\")", + "\t\t\t}", + "\t\t}", + "\t\tbreak", + "\t}", + "\treturn atLeastOneSubscription", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getPackageManifestWithSubscription(subscription *olmv1Alpha.Subscription, packageManifests []*olmpkgv1.PackageManifest) *olmpkgv1.PackageManifest {", + "\tfor index := range packageManifests {", + "\t\tif packageManifests[index].Status.PackageName == subscription.Spec.Package \u0026\u0026", + "\t\t\tpackageManifests[index].Namespace == subscription.Spec.CatalogSourceNamespace \u0026\u0026", + "\t\t\tpackageManifests[index].Status.CatalogSource == subscription.Spec.CatalogSource {", + "\t\t\treturn packageManifests[index]", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "name": "getPodContainers", + "qualifiedName": "getPodContainers", + "exported": false, + "signature": "func(*corev1.Pod, bool)([]*Container)", + "doc": "getPodContainers Collects relevant container information from a pod while optionally filtering ignored containers\n\nThe function iterates over the pod’s declared containers, matching each\nwith its status to extract runtime details and image identifiers. It logs\nwarnings for containers that are not ready or not running, providing reasons\nand restart counts. If the caller enables ignore mode, containers whose names\nmatch predefined patterns are skipped; otherwise they are added to the\nreturned slice.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:514", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "GetRuntimeUID", + "kind": "function", + "source": [ + "func GetRuntimeUID(cs *corev1.ContainerStatus) (runtime, uid string) {", + "\tsplit := strings.Split(cs.ContainerID, \"://\")", + "\tif len(split) \u003e 0 {", + "\t\tuid = split[len(split)-1]", + "\t\truntime = split[0]", + "\t}", + "\treturn runtime, uid", + "}" + ] + }, + { + "name": "buildContainerImageSource", + "kind": "function", + "source": [ + "func buildContainerImageSource(urlImage, urlImageID string) (source ContainerImageIdentifier) {", + "\tconst regexImageWithTag = `^([^/]*)/*([^@]*):(.*)`", + "\tconst regexImageDigest = `^([^/]*)/(.*)@(.*:.*)`", + "", + "\t// get image repository, Name and tag if present", + "\tre := regexp.MustCompile(regexImageWithTag)", + "\tmatch := re.FindStringSubmatch(urlImage)", + "", + "\tif match != nil {", + "\t\tif match[2] != \"\" {", + "\t\t\tsource.Registry = match[1]", + "\t\t\tsource.Repository = match[2]", + "\t\t\tsource.Tag = match[3]", + "\t\t} else {", + "\t\t\tsource.Repository = match[1]", + "\t\t\tsource.Tag = match[3]", + "\t\t}", + "\t}", + "", + "\t// get image Digest based on imageID only", + "\tre = regexp.MustCompile(regexImageDigest)", + "\tmatch = re.FindStringSubmatch(urlImageID)", + "", + "\tif match != nil {", + "\t\tsource.Digest = match[3]", + "\t}", + "", + "\tlog.Debug(\"Parsed image, repo: %s, name:%s, tag: %s, digest: %s\",", + "\t\tsource.Registry,", + "\t\tsource.Repository,", + "\t\tsource.Tag,", + "\t\tsource.Digest)", + "", + "\treturn source", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "Container.HasIgnoredContainerName", + "kind": "function", + "source": [ + "func (c *Container) HasIgnoredContainerName() bool {", + "\tfor _, ign := range ignoredContainerNames {", + "\t\tif c.IsIstioProxy() || strings.Contains(c.Name, ign) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "NewPod", + "kind": "function", + "source": [ + "func NewPod(aPod *corev1.Pod) (out Pod) {", + "\tvar err error", + "\tout.Pod = aPod", + "\tout.MultusNetworkInterfaces = make(map[string]CniNetworkInterface)", + "\tannotations := aPod.GetAnnotations()", + "\tnetStatus, exists := annotations[CniNetworksStatusKey]", + "\tif !exists || strings.TrimSpace(netStatus) == \"\" {", + "\t\t// Be graceful: log which annotations are present when the expected one is missing/empty", + "\t\tkeys := make([]string, 0, len(annotations))", + "\t\tfor k := range annotations {", + "\t\t\tkeys = append(keys, k)", + "\t\t}", + "\t\tlog.Info(\"Pod %q (namespace %q) missing or empty annotation %q. Present annotations: %v\", aPod.Name, aPod.Namespace, CniNetworksStatusKey, keys)", + "\t} else {", + "\t\tout.MultusNetworkInterfaces, err = GetPodIPsPerNet(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get IPs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "", + "\t\tout.MultusPCIs, err = GetPciPerPod(netStatus)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Could not get PCIs for Pod %q (namespace %q), err: %v\", aPod.Name, aPod.Namespace, err)", + "\t\t}", + "\t}", + "", + "\tif _, ok := aPod.GetLabels()[skipConnectivityTestsLabel]; ok {", + "\t\tout.SkipNetTests = true", + "\t}", + "\tif _, ok := aPod.GetLabels()[skipMultusConnectivityTestsLabel]; ok {", + "\t\tout.SkipMultusNetTests = true", + "\t}", + "\tout.Containers = append(out.Containers, getPodContainers(aPod, false)...)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Container) {", + "\tfor j := 0; j \u003c len(aPod.Spec.Containers); j++ {", + "\t\tcut := \u0026(aPod.Spec.Containers[j])", + "", + "\t\tvar cutStatus corev1.ContainerStatus", + "\t\t// get Status for current container", + "\t\tfor index := range aPod.Status.ContainerStatuses {", + "\t\t\tif aPod.Status.ContainerStatuses[index].Name == cut.Name {", + "\t\t\t\tcutStatus = aPod.Status.ContainerStatuses[index]", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\taRuntime, uid := GetRuntimeUID(\u0026cutStatus)", + "\t\tcontainer := Container{Podname: aPod.Name, Namespace: aPod.Namespace,", + "\t\t\tNodeName: aPod.Spec.NodeName, Container: cut, Status: cutStatus, Runtime: aRuntime, UID: uid,", + "\t\t\tContainerImageIdentifier: buildContainerImageSource(aPod.Spec.Containers[j].Image, cutStatus.ImageID)}", + "", + "\t\t// Warn if readiness probe did not succeeded yet.", + "\t\tif !cutStatus.Ready {", + "\t\t\tlog.Warn(\"Container %q is not ready yet.\", \u0026container)", + "\t\t}", + "", + "\t\t// Warn if container state is not running.", + "\t\tif state := \u0026cutStatus.State; state.Running == nil {", + "\t\t\treason := \"\"", + "\t\t\tswitch {", + "\t\t\tcase state.Waiting != nil:", + "\t\t\t\treason = \"waiting - \" + state.Waiting.Reason", + "\t\t\tcase state.Terminated != nil:", + "\t\t\t\treason = \"terminated - \" + state.Terminated.Reason", + "\t\t\tdefault:", + "\t\t\t\t// When no state was explicitly set, it's assumed to be in \"waiting state\".", + "\t\t\t\treason = \"waiting state reason unknown\"", + "\t\t\t}", + "", + "\t\t\tlog.Warn(\"Container %q is not running (reason: %s, restarts %d): some test cases might fail.\",", + "\t\t\t\t\u0026container, reason, cutStatus.RestartCount)", + "\t\t}", + "", + "\t\t// Build slices of containers based on whether or not we are \"ignoring\" them or not.", + "\t\tif useIgnoreList \u0026\u0026 container.HasIgnoredContainerName() {", + "\t\t\tcontinue", + "\t\t}", + "\t\tcontainerList = append(containerList, \u0026container)", + "\t}", + "\treturn containerList", + "}" + ] + }, + { + "name": "getSummaryAllOperators", + "qualifiedName": "getSummaryAllOperators", + "exported": false, + "signature": "func([]*Operator)([]string)", + "doc": "getSummaryAllOperators Creates a sorted list of unique operator status strings\n\nThis function iterates over a slice of operators, building a key that\nincludes the phase, package name, version and namespace information. It\nstores each distinct key in a map to avoid duplicates, then collects the keys\ninto a slice, sorts them alphabetically, and returns the result.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:384", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Strings", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getSummaryAllOperators(operators []*Operator) (summary []string) {", + "\toperatorMap := map[string]bool{}", + "\tfor _, o := range operators {", + "\t\tkey := fmt.Sprintf(\"%s operator: %s ver: %s\", o.Phase, o.PackageFromCsvName, o.Version)", + "\t\tif o.IsClusterWide {", + "\t\t\tkey += \" (all namespaces)\"", + "\t\t} else {", + "\t\t\tkey += fmt.Sprintf(\" in ns: %v\", o.TargetNamespaces)", + "\t\t}", + "\t\toperatorMap[key] = true", + "\t}", + "", + "\tfor s := range operatorMap {", + "\t\tsummary = append(summary, s)", + "\t}", + "\tsort.Strings(summary)", + "\treturn summary", + "}" + ] + }, + { + "name": "getUniqueCsvListByName", + "qualifiedName": "getUniqueCsvListByName", + "exported": false, + "signature": "func([]*olmv1Alpha.ClusterServiceVersion)([]*olmv1Alpha.ClusterServiceVersion)", + "doc": "getUniqueCsvListByName filters a list to include only one instance per CSV name\n\nThe function receives a slice of ClusterServiceVersion objects, removes any\nduplicates by keeping the last occurrence for each unique name, logs how many\nunique entries were found, and then returns the deduplicated slice sorted\nalphabetically by CSV name. It uses an internal map to track seen names and\nsort.Slice for deterministic ordering.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:169", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Slice", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "createOperators", + "kind": "function", + "source": [ + "func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion,", + "\tallSubscriptions []olmv1Alpha.Subscription,", + "\tallPackageManifests []*olmpkgv1.PackageManifest,", + "\tallInstallPlans []*olmv1Alpha.InstallPlan,", + "\tallCatalogSources []*olmv1Alpha.CatalogSource,", + "\tsucceededRequired,", + "\tkeepCsvDetails bool) []*Operator {", + "\tconst (", + "\t\tmaxSize = 2", + "\t)", + "", + "\toperators := []*Operator{}", + "", + "\t// Make map with unique csv names to original index in the env.Csvs slice.", + "\t// Otherwise, cluster-wide operators info will be repeated unnecessarily.", + "\tuniqueCsvs := getUniqueCsvListByName(csvs)", + "", + "\tfor _, csv := range uniqueCsvs {", + "\t\t// Skip CSVs that are not in the Succeeded phase if the flag is set.", + "\t\tif csv.Status.Phase != olmv1Alpha.CSVPhaseSucceeded \u0026\u0026 succeededRequired {", + "\t\t\tcontinue", + "\t\t}", + "\t\top := \u0026Operator{Name: csv.Name, Namespace: csv.Namespace}", + "\t\tif keepCsvDetails {", + "\t\t\top.Csv = csv", + "\t\t}", + "\t\top.Phase = csv.Status.Phase", + "\t\tpackageAndVersion := strings.SplitN(csv.Name, \".\", maxSize)", + "\t\tif len(packageAndVersion) == 0 {", + "\t\t\tlog.Debug(\"Empty CSV Name (package.version), cannot extract a package or a version, skipping. Csv: %+v\", csv)", + "\t\t\tcontinue", + "\t\t}", + "\t\top.PackageFromCsvName = packageAndVersion[0]", + "\t\top.Version = csv.Spec.Version.String()", + "\t\t// Get at least one subscription and update the Operator object with it.", + "\t\tif getAtLeastOneSubscription(op, csv, allSubscriptions, allPackageManifests) {", + "\t\t\ttargetNamespaces, err := getOperatorTargetNamespaces(op.SubscriptionNamespace)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Failed to get target namespaces for operator %s: %v\", csv.Name, err)", + "\t\t\t} else {", + "\t\t\t\top.TargetNamespaces = targetNamespaces", + "\t\t\t\top.IsClusterWide = len(targetNamespaces) == 0", + "\t\t\t}", + "\t\t} else {", + "\t\t\tlog.Warn(\"Subscription not found for CSV: %s (ns %s)\", csv.Name, csv.Namespace)", + "\t\t}", + "\t\tlog.Info(\"Getting installplans for op %s (subs %s ns %s)\", op.Name, op.SubscriptionName, op.SubscriptionNamespace)", + "\t\t// Get at least one Install Plan and update the Operator object with it.", + "\t\tgetAtLeastOneInstallPlan(op, csv, allInstallPlans, allCatalogSources)", + "\t\toperators = append(operators, op)", + "\t}", + "\treturn operators", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getUniqueCsvListByName(csvs []*olmv1Alpha.ClusterServiceVersion) []*olmv1Alpha.ClusterServiceVersion {", + "\tuniqueCsvsMap := map[string]*olmv1Alpha.ClusterServiceVersion{}", + "\tfor _, csv := range csvs {", + "\t\tuniqueCsvsMap[csv.Name] = csv", + "\t}", + "", + "\tuniqueCsvsList := []*olmv1Alpha.ClusterServiceVersion{}", + "\tlog.Info(\"Found %d unique CSVs\", len(uniqueCsvsMap))", + "\tfor name, csv := range uniqueCsvsMap {", + "\t\tlog.Info(\" CSV: %s\", name)", + "\t\tuniqueCsvsList = append(uniqueCsvsList, csv)", + "\t}", + "", + "\t// Sort by name: (1) creates a deterministic output, (2) makes UT easier.", + "\tsort.Slice(uniqueCsvsList, func(i, j int) bool { return uniqueCsvsList[i].Name \u003c uniqueCsvsList[j].Name })", + "\treturn uniqueCsvsList", + "}" + ] + }, + { + "name": "isNetworkAttachmentDefinitionConfigTypeSRIOV", + "qualifiedName": "isNetworkAttachmentDefinitionConfigTypeSRIOV", + "exported": false, + "signature": "func(string)(bool, error)", + "doc": "isNetworkAttachmentDefinitionConfigTypeSRIOV checks if a CNI configuration string contains an SR-IOV plugin\n\nThe function parses the JSON-formatted CNI config, handling both\nsingle-plugin and multi-plugin layouts. It looks for a \"type\" field or\niterates through the plugins array to find an entry with type \"sriov\",\nreturning true if found. Errors are produced for malformed JSON or unexpected\nstructures.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:449", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsUsingSRIOV", + "kind": "function", + "source": [ + "func (p *Pod) IsUsingSRIOV() (bool, error) {", + "\tconst (", + "\t\tcncfNetworksAnnotation = \"k8s.v1.cni.cncf.io/networks\"", + "\t)", + "", + "\tcncfNetworks, exist := p.Annotations[cncfNetworksAnnotation]", + "\tif !exist {", + "\t\treturn false, nil", + "\t}", + "", + "\t// Get all CNCF network names", + "\tcncfNetworkNames := getCNCFNetworksNamesFromPodAnnotation(cncfNetworks)", + "", + "\t// For each CNCF network, get its network attachment definition and check", + "\t// whether its config's type is \"sriov\"", + "\toc := clientsholder.GetClientsHolder()", + "", + "\tfor _, networkName := range cncfNetworkNames {", + "\t\tlog.Debug(\"%s: Reviewing network-attachment definition %q\", p, networkName)", + "\t\tnad, err := oc.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(p.Namespace).Get(context.TODO(), networkName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to get NetworkAttachment %s: %v\", networkName, err)", + "\t\t}", + "", + "\t\tisSRIOV, err := isNetworkAttachmentDefinitionConfigTypeSRIOV(nad.Spec.Config)", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to know if network-attachment %s is sriov: %v\", networkName, err)", + "\t\t}", + "", + "\t\tlog.Debug(\"%s: NAD config: %s\", p, nad.Spec.Config)", + "\t\tif isSRIOV {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\treturn false, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isNetworkAttachmentDefinitionConfigTypeSRIOV(nadConfig string) (bool, error) {", + "\tconst (", + "\t\ttypeSriov = \"sriov\"", + "\t)", + "", + "\ttype CNIConfig struct {", + "\t\tCniVersion string `json:\"cniVersion\"`", + "\t\tName string `json:\"name\"`", + "\t\tType *string `json:\"type,omitempty\"`", + "\t\tPlugins *[]struct {", + "\t\t\tType string `json:\"type\"`", + "\t\t} `json:\"plugins,omitempty\"`", + "\t}", + "", + "\tcniConfig := CNIConfig{}", + "\tif err := json.Unmarshal([]byte(nadConfig), \u0026cniConfig); err != nil {", + "\t\treturn false, fmt.Errorf(\"failed to unmarshal cni config %s: %v\", nadConfig, err)", + "\t}", + "", + "\t// If type is found, it's a single plugin CNI config.", + "\tif cniConfig.Type != nil {", + "\t\tlog.Debug(\"Single plugin config type found: %+v, type=%s\", cniConfig, *cniConfig.Type)", + "\t\treturn *cniConfig.Type == typeSriov, nil", + "\t}", + "", + "\tif cniConfig.Plugins == nil {", + "\t\treturn false, fmt.Errorf(\"invalid multi-plugins cni config: %s\", nadConfig)", + "\t}", + "", + "\tlog.Debug(\"CNI plugins: %+v\", *cniConfig.Plugins)", + "\tfor i := range *cniConfig.Plugins {", + "\t\tplugin := (*cniConfig.Plugins)[i]", + "\t\tif plugin.Type == typeSriov {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\t// No sriov plugin type found.", + "\treturn false, nil", + "}" + ] + }, + { + "name": "isNetworkAttachmentDefinitionSRIOVConfigMTUSet", + "qualifiedName": "isNetworkAttachmentDefinitionSRIOVConfigMTUSet", + "exported": false, + "signature": "func(string)(bool, error)", + "doc": "isNetworkAttachmentDefinitionSRIOVConfigMTUSet determines whether a SR-IOV plugin specifies an MTU\n\nThe function parses the JSON network attachment definition string into a CNI\nconfiguration structure, verifies that it contains multiple plugins, and then\niterates over those plugins to find one of type \"sriov\" with a positive MTU\nvalue. If such a plugin is found, it returns true; otherwise false. Errors\nare returned for malformed JSON or missing plugin list.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:406", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isNetworkAttachmentDefinitionSRIOVConfigMTUSet(nadConfig string) (bool, error) {", + "\tconst (", + "\t\ttypeSriov = \"sriov\"", + "\t)", + "", + "\ttype CNIConfig struct {", + "\t\tCniVersion string `json:\"cniVersion\"`", + "\t\tName string `json:\"name\"`", + "\t\tType *string `json:\"type,omitempty\"`", + "\t\tPlugins *[]struct {", + "\t\t\tType string `json:\"type\"`", + "\t\t\tMTU int `json:\"mtu\"`", + "\t\t} `json:\"plugins,omitempty\"`", + "\t}", + "", + "\tcniConfig := CNIConfig{}", + "\tif err := json.Unmarshal([]byte(nadConfig), \u0026cniConfig); err != nil {", + "\t\treturn false, fmt.Errorf(\"failed to unmarshal cni config %s: %v\", nadConfig, err)", + "\t}", + "", + "\tif cniConfig.Plugins == nil {", + "\t\treturn false, fmt.Errorf(\"invalid multi-plugins cni config: %s\", nadConfig)", + "\t}", + "", + "\tlog.Debug(\"CNI plugins: %+v\", *cniConfig.Plugins)", + "\tfor i := range *cniConfig.Plugins {", + "\t\tplugin := (*cniConfig.Plugins)[i]", + "\t\tif plugin.Type == typeSriov \u0026\u0026 plugin.MTU \u003e 0 {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\t// No sriov plugin type found.", + "\treturn false, nil", + "}" + ] + }, + { + "name": "isSkipHelmChart", + "qualifiedName": "isSkipHelmChart", + "exported": false, + "signature": "func(string, []configuration.SkipHelmChartList)(bool)", + "doc": "isSkipHelmChart determines whether a Helm chart should be excluded from processing\n\nThe function receives the name of a Helm release and a list of names to skip.\nIt checks if the list is empty, returning false immediately. Otherwise it\niterates through each entry; if a match is found it logs that the chart was\nskipped and returns true. If no match is found after the loop, it returns\nfalse.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:569", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isSkipHelmChart(helmName string, skipHelmChartList []configuration.SkipHelmChartList) bool {", + "\tif len(skipHelmChartList) == 0 {", + "\t\treturn false", + "\t}", + "\tfor _, helm := range skipHelmChartList {", + "\t\tif helmName == helm.Name {", + "\t\t\tlog.Info(\"Helm chart with name %s was skipped\", helmName)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "searchPodInSlice", + "qualifiedName": "searchPodInSlice", + "exported": false, + "signature": "func(string, string, []*Pod)(*Pod)", + "doc": "searchPodInSlice Finds a pod in a list by name and namespace\n\nThe function receives a pod name, its namespace, and a slice of pod objects.\nIt builds an index map keyed on the namespaced name and looks up the\nrequested key, returning the matching pod if found or nil otherwise.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/operators.go:484", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "addOperandPodsToTestPods", + "kind": "function", + "source": [ + "func addOperandPodsToTestPods(operandPods []*Pod, env *TestEnvironment) {", + "\tfor _, operandPod := range operandPods {", + "\t\t// Check whether the pod was already discovered", + "\t\ttestPod := searchPodInSlice(operandPod.Name, operandPod.Namespace, env.Pods)", + "\t\tif testPod != nil {", + "\t\t\tlog.Info(\"Operand pod %v/%v already discovered.\", testPod.Namespace, testPod.Name)", + "\t\t\t// Make sure it's flagged as operand pod.", + "\t\t\ttestPod.IsOperand = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Operand pod %v/%v added to test pod list\", operandPod.Namespace, operandPod.Name)", + "\t\t\t// Append pod to the test pod list.", + "\t\t\tenv.Pods = append(env.Pods, operandPod)", + "\t\t}", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "addOperatorPodsToTestPods", + "kind": "function", + "source": [ + "func addOperatorPodsToTestPods(operatorPods []*Pod, env *TestEnvironment) {", + "\tfor _, operatorPod := range operatorPods {", + "\t\t// Check whether the pod was already discovered", + "\t\ttestPod := searchPodInSlice(operatorPod.Name, operatorPod.Namespace, env.Pods)", + "\t\tif testPod != nil {", + "\t\t\tlog.Info(\"Operator pod %v/%v already discovered.\", testPod.Namespace, testPod.Name)", + "\t\t\t// Make sure it's flagged as operator pod.", + "\t\t\ttestPod.IsOperator = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Operator pod %v/%v added to test pod list\", operatorPod.Namespace, operatorPod.Name)", + "\t\t\t// Append pod to the test pod list.", + "\t\t\tenv.Pods = append(env.Pods, operatorPod)", + "\t\t}", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func searchPodInSlice(name, namespace string, pods []*Pod) *Pod {", + "\t// Helper map to filter pods that have been already added", + "\tpodsMap := map[types.NamespacedName]*Pod{}", + "\tfor _, testPod := range pods {", + "\t\tpodsMap[types.NamespacedName{Namespace: testPod.Namespace, Name: testPod.Name}] = testPod", + "\t}", + "", + "\t// Search by namespace+name key", + "\tpodKey := types.NamespacedName{Namespace: namespace, Name: name}", + "\tif pod, found := podsMap[podKey]; found {", + "\t\treturn pod", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "sriovNetworkUsesMTU", + "qualifiedName": "sriovNetworkUsesMTU", + "exported": false, + "signature": "func([]unstructured.Unstructured, []unstructured.Unstructured, string)(bool)", + "doc": "sriovNetworkUsesMTU Checks whether a SriovNetwork has an MTU configured\n\nThe function iterates through all provided SriovNetworks and matches one by\nname to the given NetworkAttachmentDefinition. For each match it looks for a\nSriovNetworkNodePolicy in the same namespace that shares the same\nresourceName, then examines its spec for an MTU value greater than zero. If\nsuch a policy is found, true is returned; otherwise false.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:586", + "calls": [ + { + "name": "GetName", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "name": "NestedMap", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "name": "NestedString", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "GetNamespace", + "kind": "function" + }, + { + "name": "GetNamespace", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "name": "NestedMap", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "name": "NestedString", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "name": "NestedInt64", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Pod.IsUsingSRIOVWithMTU", + "kind": "function", + "source": [ + "func (p *Pod) IsUsingSRIOVWithMTU() (bool, error) {", + "\tconst (", + "\t\tcncfNetworksAnnotation = \"k8s.v1.cni.cncf.io/networks\"", + "\t)", + "", + "\tcncfNetworks, exist := p.Annotations[cncfNetworksAnnotation]", + "\tif !exist {", + "\t\treturn false, nil", + "\t}", + "", + "\t// Get all CNCF network names", + "\tcncfNetworkNames := getCNCFNetworksNamesFromPodAnnotation(cncfNetworks)", + "", + "\t// For each CNCF network, get its network attachment definition and check", + "\t// whether its config's type is \"sriov\"", + "", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, networkName := range cncfNetworkNames {", + "\t\tlog.Debug(\"%s: Reviewing network-attachment definition %q\", p, networkName)", + "\t\tnad, err := oc.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(", + "\t\t\tp.Namespace).Get(context.TODO(), networkName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn false, fmt.Errorf(\"failed to get NetworkAttachment %s: %v\", networkName, err)", + "\t\t}", + "", + "\t\t// If the network-status annotation is not set, let's check the SriovNetwork/SriovNetworkNodePolicy CRs", + "\t\t// to see if the MTU is set there.", + "\t\tlog.Debug(\"Number of SriovNetworks: %d\", len(env.AllSriovNetworks))", + "\t\tlog.Debug(\"Number of SriovNetworkNodePolicies: %d\", len(env.AllSriovNetworkNodePolicies))", + "\t\tif sriovNetworkUsesMTU(env.AllSriovNetworks, env.AllSriovNetworkNodePolicies, nad.Name) {", + "\t\t\treturn true, nil", + "\t\t}", + "\t}", + "", + "\treturn false, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func sriovNetworkUsesMTU(sriovNetworks, sriovNetworkNodePolicies []unstructured.Unstructured, nadName string) bool {", + "\tfor _, sriovNetwork := range sriovNetworks {", + "\t\tnetworkName := sriovNetwork.GetName()", + "\t\tlog.Debug(\"Checking SriovNetwork %s\", networkName)", + "\t\tif networkName == nadName {", + "\t\t\tlog.Debug(\"SriovNetwork %s found to match the NAD name %s\", networkName, nadName)", + "", + "\t\t\t// Get the ResourceName from the SriovNetwork spec", + "\t\t\tspec, found, err := unstructured.NestedMap(sriovNetwork.Object, \"spec\")", + "\t\t\tif !found || err != nil {", + "\t\t\t\tlog.Debug(\"Failed to get spec from SriovNetwork %s: %v\", networkName, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tresourceName, found, err := unstructured.NestedString(spec, \"resourceName\")", + "\t\t\tif !found || err != nil {", + "\t\t\t\tlog.Debug(\"Failed to get resourceName from SriovNetwork %s: %v\", networkName, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tfor _, nodePolicy := range sriovNetworkNodePolicies {", + "\t\t\t\tpolicyNamespace := nodePolicy.GetNamespace()", + "\t\t\t\tnetworkNamespace := sriovNetwork.GetNamespace()", + "", + "\t\t\t\tlog.Debug(\"Checking SriovNetworkNodePolicy in namespace %s\", policyNamespace)", + "\t\t\t\tif policyNamespace == networkNamespace {", + "\t\t\t\t\t// Get the ResourceName and MTU from the SriovNetworkNodePolicy spec", + "\t\t\t\t\tpolicySpec, found, err := unstructured.NestedMap(nodePolicy.Object, \"spec\")", + "\t\t\t\t\tif !found || err != nil {", + "\t\t\t\t\t\tlog.Debug(\"Failed to get spec from SriovNetworkNodePolicy: %v\", err)", + "\t\t\t\t\t\tcontinue", + "\t\t\t\t\t}", + "", + "\t\t\t\t\tpolicyResourceName, found, err := unstructured.NestedString(policySpec, \"resourceName\")", + "\t\t\t\t\tif !found || err != nil {", + "\t\t\t\t\t\tlog.Debug(\"Failed to get resourceName from SriovNetworkNodePolicy: %v\", err)", + "\t\t\t\t\t\tcontinue", + "\t\t\t\t\t}", + "", + "\t\t\t\t\tif policyResourceName == resourceName {", + "\t\t\t\t\t\tmtu, found, err := unstructured.NestedInt64(policySpec, \"mtu\")", + "\t\t\t\t\t\tif found \u0026\u0026 err == nil \u0026\u0026 mtu \u003e 0 {", + "\t\t\t\t\t\t\treturn true", + "\t\t\t\t\t\t}", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "updateCrUnderTest", + "qualifiedName": "updateCrUnderTest", + "exported": false, + "signature": "func([]autodiscover.ScaleObject)([]ScaleObject)", + "doc": "updateCrUnderTest Transforms raw scale objects into internal representation\n\nThe function receives a slice of autodiscover.ScaleObject items, converts\neach entry into the provider's ScaleObject type by copying its scaling\ninformation and resource schema, and accumulates them in a new slice. It\nreturns this populated slice for use elsewhere in the test environment\nconstruction.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:496", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "buildTestEnvironment", + "kind": "function", + "source": [ + "func buildTestEnvironment() { //nolint:funlen,gocyclo", + "\tstart := time.Now()", + "\tenv = TestEnvironment{}", + "", + "\tenv.params = *configuration.GetTestParameters()", + "\tconfig, err := configuration.LoadConfiguration(env.params.ConfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot load configuration file: %v\", err)", + "\t}", + "\tlog.Debug(\"CERTSUITE configuration: %+v\", config)", + "", + "\t// Wait for the probe pods to be ready before the autodiscovery starts.", + "\tif err := deployDaemonSet(config.ProbeDaemonSetNamespace); err != nil {", + "\t\tlog.Error(\"The TNF daemonset could not be deployed, err: %v\", err)", + "\t\t// Because of this failure, we are only able to run a certain amount of tests that do not rely", + "\t\t// on the existence of the daemonset probe pods.", + "\t\tenv.DaemonsetFailedToSpawn = true", + "\t}", + "", + "\tdata := autodiscover.DoAutoDiscover(\u0026config)", + "\t// OpenshiftVersion needs to be set asap, as other helper functions will use it here.", + "\tenv.OpenshiftVersion = data.OpenshiftVersion", + "\tenv.Config = config", + "\tenv.Crds = data.Crds", + "\tenv.AllInstallPlans = data.AllInstallPlans", + "\tenv.OperatorGroups, err = GetAllOperatorGroups()", + "\tif err != nil {", + "\t\tlog.Fatal(\"Cannot get OperatorGroups: %v\", err)", + "\t}", + "\tenv.AllSubscriptions = data.AllSubscriptions", + "\tenv.AllCatalogSources = data.AllCatalogSources", + "\tenv.AllPackageManifests = data.AllPackageManifests", + "\tenv.AllOperators = createOperators(data.AllCsvs, data.AllSubscriptions, data.AllPackageManifests, data.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.ClusterOperators = data.ClusterOperators", + "\tenv.AllCsvs = data.AllCsvs", + "\tenv.AllOperatorsSummary = getSummaryAllOperators(env.AllOperators)", + "\tenv.AllCrds = data.AllCrds", + "\tenv.Namespaces = data.Namespaces", + "\tenv.Nodes = createNodes(data.Nodes.Items)", + "\tenv.IstioServiceMeshFound = data.IstioServiceMeshFound", + "\tenv.ValidProtocolNames = append(env.ValidProtocolNames, data.ValidProtocolNames...)", + "\tfor i := range data.AbnormalEvents {", + "\t\taEvent := NewEvent(\u0026data.AbnormalEvents[i])", + "\t\tenv.AbnormalEvents = append(env.AbnormalEvents, \u0026aEvent)", + "\t}", + "", + "\t// Service accounts", + "\tenv.ServiceAccounts = data.ServiceAccounts", + "\tenv.AllServiceAccounts = data.AllServiceAccounts", + "\tenv.AllServiceAccountsMap = make(map[string]*corev1.ServiceAccount)", + "\tfor i := 0; i \u003c len(data.AllServiceAccounts); i++ {", + "\t\tmapIndex := data.AllServiceAccounts[i].Namespace + data.AllServiceAccounts[i].Name", + "\t\tenv.AllServiceAccountsMap[mapIndex] = data.AllServiceAccounts[i]", + "\t}", + "\t// Pods", + "\tpods := data.Pods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.Pods = append(env.Pods, \u0026aNewPod)", + "\t}", + "\tpods = data.AllPods", + "\tfor i := 0; i \u003c len(pods); i++ {", + "\t\taNewPod := NewPod(\u0026pods[i])", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\tenv.AllPods = append(env.AllPods, \u0026aNewPod)", + "\t}", + "\tenv.ProbePods = make(map[string]*corev1.Pod)", + "\tfor i := 0; i \u003c len(data.ProbePods); i++ {", + "\t\tnodeName := data.ProbePods[i].Spec.NodeName", + "\t\tenv.ProbePods[nodeName] = \u0026data.ProbePods[i]", + "\t}", + "", + "\tenv.PodStates = data.PodStates", + "", + "\tcsvPods := []*Pod{}", + "\tenv.CSVToPodListMap = make(map[string][]*Pod)", + "\tfor csv, podList := range data.CSVToPodListMap {", + "\t\tvar pods []*Pod", + "\t\tfor i := 0; i \u003c len(podList); i++ {", + "\t\t\taNewPod := NewPod(podList[i])", + "\t\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\t\taNewPod.IsOperator = true", + "\t\t\tpods = append(pods, \u0026aNewPod)", + "\t\t\tlog.Info(\"CSV: %v, Operator Pod: %v/%v\", csv, podList[i].Namespace, podList[i].Name)", + "\t\t}", + "\t\tenv.CSVToPodListMap[csv.String()] = pods", + "\t\tcsvPods = append(csvPods, pods...)", + "\t}", + "", + "\t// Add operator pods to list of normal pods to test.", + "\taddOperatorPodsToTestPods(csvPods, \u0026env)", + "", + "\t// Best effort mode autodiscovery for operand pods.", + "\toperandPods := []*Pod{}", + "\tfor _, pod := range data.OperandPods {", + "\t\taNewPod := NewPod(pod)", + "\t\taNewPod.AllServiceAccountsMap = \u0026env.AllServiceAccountsMap", + "\t\taNewPod.IsOperand = true", + "\t\toperandPods = append(operandPods, \u0026aNewPod)", + "\t}", + "", + "\taddOperandPodsToTestPods(operandPods, \u0026env)", + "\t// Add operator pods' containers to the list.", + "\tfor _, pod := range env.Pods {", + "\t\t// Note: 'getPodContainers' is returning a filtered list of Container objects.", + "\t\tenv.Containers = append(env.Containers, getPodContainers(pod.Pod, true)...)", + "\t}", + "", + "\tlog.Info(\"Found pods in %d csvs\", len(env.CSVToPodListMap))", + "", + "\tenv.OCPStatus = data.OCPStatus", + "\tenv.K8sVersion = data.K8sVersion", + "\tenv.ResourceQuotas = data.ResourceQuotaItems", + "\tenv.PodDisruptionBudgets = data.PodDisruptionBudgets", + "\tenv.PersistentVolumes = data.PersistentVolumes", + "\tenv.PersistentVolumeClaims = data.PersistentVolumeClaims", + "\tenv.ClusterRoleBindings = data.ClusterRoleBindings", + "\tenv.RoleBindings = data.RoleBindings", + "\tenv.Roles = data.Roles", + "\tenv.Services = data.Services", + "\tenv.AllServices = data.AllServices", + "\tenv.NetworkPolicies = data.NetworkPolicies", + "\tfor _, nsHelmChartReleases := range data.HelmChartReleases {", + "\t\tfor _, helmChartRelease := range nsHelmChartReleases {", + "\t\t\tif !isSkipHelmChart(helmChartRelease.Name, config.SkipHelmChartList) {", + "\t\t\t\tenv.HelmChartReleases = append(env.HelmChartReleases, helmChartRelease)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tfor i := range data.Deployments {", + "\t\taNewDeployment := \u0026Deployment{", + "\t\t\t\u0026data.Deployments[i],", + "\t\t}", + "\t\tenv.Deployments = append(env.Deployments, aNewDeployment)", + "\t}", + "\tfor i := range data.StatefulSet {", + "\t\taNewStatefulSet := \u0026StatefulSet{", + "\t\t\t\u0026data.StatefulSet[i],", + "\t\t}", + "\t\tenv.StatefulSets = append(env.StatefulSets, aNewStatefulSet)", + "\t}", + "", + "\tenv.ScaleCrUnderTest = updateCrUnderTest(data.ScaleCrUnderTest)", + "\tenv.HorizontalScaler = data.Hpas", + "\tenv.StorageClassList = data.StorageClasses", + "", + "\tenv.ExecutedBy = data.ExecutedBy", + "\tenv.PartnerName = data.PartnerName", + "\tenv.CollectorAppPassword = data.CollectorAppPassword", + "\tenv.CollectorAppEndpoint = data.CollectorAppEndpoint", + "\tenv.ConnectAPIKey = data.ConnectAPIKey", + "\tenv.ConnectProjectID = data.ConnectProjectID", + "\tenv.ConnectAPIProxyURL = data.ConnectAPIProxyURL", + "\tenv.ConnectAPIProxyPort = data.ConnectAPIProxyPort", + "\tenv.ConnectAPIBaseURL = data.ConnectAPIBaseURL", + "", + "\toperators := createOperators(data.Csvs, data.AllSubscriptions, data.AllPackageManifests,", + "\t\tdata.AllInstallPlans, data.AllCatalogSources, false, true)", + "\tenv.Operators = operators", + "\tlog.Info(\"Operators found: %d\", len(env.Operators))", + "\t// SR-IOV", + "\tenv.SriovNetworks = data.SriovNetworks", + "\tenv.SriovNetworkNodePolicies = data.SriovNetworkNodePolicies", + "\tenv.AllSriovNetworks = data.AllSriovNetworks", + "\tenv.AllSriovNetworkNodePolicies = data.AllSriovNetworkNodePolicies", + "\tenv.NetworkAttachmentDefinitions = data.NetworkAttachmentDefinitions", + "\tfor _, pod := range env.Pods {", + "\t\tisCreatedByDeploymentConfig, err := pod.CreatedByDeploymentConfig()", + "\t\tif err != nil {", + "\t\t\tlog.Warn(\"Pod %q failed to get parent resource: %v\", pod, err)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isCreatedByDeploymentConfig {", + "\t\t\tlog.Warn(\"Pod %q has been deployed using a DeploymentConfig, please use Deployment or StatefulSet instead.\", pod.String())", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed the test environment build process in %.2f seconds\", time.Since(start).Seconds())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func updateCrUnderTest(scaleCrUnderTest []autodiscover.ScaleObject) []ScaleObject {", + "\tvar scaleCrUndeTestTemp []ScaleObject", + "\tfor i := range scaleCrUnderTest {", + "\t\taNewScaleCrUnderTest := ScaleObject{Scale: CrScale{scaleCrUnderTest[i].Scale},", + "\t\t\tGroupResourceSchema: scaleCrUnderTest[i].GroupResourceSchema}", + "\t\tscaleCrUndeTestTemp = append(scaleCrUndeTestTemp, aNewScaleCrUnderTest)", + "\t}", + "\treturn scaleCrUndeTestTemp", + "}" + ] + } + ], + "globals": [ + { + "name": "MasterLabels", + "exported": true, + "type": "", + "doc": "Node's roles labels. Node is role R if it has **any** of the labels of each list.\nMaster's role label \"master\" is deprecated since k8s 1.20.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:72" + }, + { + "name": "WorkerLabels", + "exported": true, + "type": "", + "doc": "Node's roles labels. Node is role R if it has **any** of the labels of each list.\nMaster's role label \"master\" is deprecated since k8s 1.20.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:71" + }, + { + "name": "env", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:257" + }, + { + "name": "ignoredContainerNames", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/containers.go:40" + }, + { + "name": "loaded", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:258" + } + ], + "consts": [ + { + "name": "AffinityRequiredKey", + "exported": true, + "doc": "CentOS Stream CoreOS starts being used instead of rhcos from OCP 4.13 latest.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:56" + }, + { + "name": "CniNetworksStatusKey", + "exported": true, + "doc": "CentOS Stream CoreOS starts being used instead of rhcos from OCP 4.13 latest.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:60" + }, + { + "name": "DaemonSetName", + "exported": true, + "doc": "CentOS Stream CoreOS starts being used instead of rhcos from OCP 4.13 latest.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:58" + }, + { + "name": "HugePages1Gi", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:38" + }, + { + "name": "HugePages2Mi", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:37" + }, + { + "name": "IstioProxyContainerName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:42" + }, + { + "name": "containerName", + "exported": false, + "doc": "CentOS Stream CoreOS starts being used instead of rhcos from OCP 4.13 latest.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:57" + }, + { + "name": "cscosName", + "exported": false, + "doc": "CentOS Stream CoreOS starts being used instead of rhcos from OCP 4.13 latest.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:64" + }, + { + "name": "deploymentConfig", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:41" + }, + { + "name": "expectedValue", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:196" + }, + { + "name": "hugePages", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:39" + }, + { + "name": "isHyperThreadCommand", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/provider/nodes.go:197" + }, + { + "name": "probePodsTimeout", + "exported": false, + "doc": "CentOS Stream CoreOS starts being used instead of rhcos from OCP 4.13 latest.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:59" + }, + { + "name": "replicationController", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/provider/pods.go:40" + }, + { + "name": "rhcosName", + "exported": false, + "doc": "CentOS Stream CoreOS starts being used instead of rhcos from OCP 4.13 latest.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:63" + }, + { + "name": "rhelName", + "exported": false, + "doc": "CentOS Stream CoreOS starts being used instead of rhcos from OCP 4.13 latest.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:65" + }, + { + "name": "skipConnectivityTestsLabel", + "exported": false, + "doc": "CentOS Stream CoreOS starts being used instead of rhcos from OCP 4.13 latest.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:61" + }, + { + "name": "skipMultusConnectivityTestsLabel", + "exported": false, + "doc": "CentOS Stream CoreOS starts being used instead of rhcos from OCP 4.13 latest.", + "position": "/Users/deliedit/dev/certsuite/pkg/provider/provider.go:62" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling", + "name": "scheduling", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "strconv", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "GetProcessCPUScheduling", + "qualifiedName": "GetProcessCPUScheduling", + "exported": true, + "signature": "func(int, *provider.Container)(string, int, error)", + "doc": "GetProcessCPUScheduling retrieves a process's CPU scheduling policy and priority\n\nThe function runs the \"chrt -p\" command inside a node probe pod to gather\nscheduling information for a given PID within a container. It parses the\ncommand output to extract the scheduling policy string and numeric priority,\nhandling errors when the probe context or command fails. The results are\nreturned along with any error encountered during execution.", + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:143", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetNodeProbePodContext", + "kind": "function", + "source": [ + "func GetNodeProbePodContext(node string, env *provider.TestEnvironment) (clientsholder.Context, error) {", + "\tprobePod := env.ProbePods[node]", + "\tif probePod == nil {", + "\t\treturn clientsholder.Context{}, fmt.Errorf(\"probe pod not found on node %s\", node)", + "\t}", + "", + "\treturn clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name), nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "GetPodName", + "kind": "function" + }, + { + "name": "parseSchedulingPolicyAndPriority", + "kind": "function", + "source": [ + "func parseSchedulingPolicyAndPriority(chrtCommandOutput string) (schedPolicy string, schedPriority int, err error) {", + "\t/*\tSample output:", + "\t\tpid 476's current scheduling policy: SCHED_OTHER", + "\t\tpid 476's current scheduling priority: 0*/", + "", + "\tlines := strings.Split(chrtCommandOutput, newLineCharacter)", + "", + "\tfor _, line := range lines {", + "\t\tif line == \"\" {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\ttokens := strings.Fields(line)", + "\t\tlastToken := tokens[len(tokens)-1]", + "", + "\t\tswitch {", + "\t\tcase strings.Contains(line, CurrentSchedulingPolicy):", + "\t\t\tschedPolicy = lastToken", + "\t\tcase strings.Contains(line, CurrentSchedulingPriority):", + "\t\t\tschedPriority, err = strconv.Atoi(lastToken)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error obtained during strconv %v\", err)", + "\t\t\t\treturn schedPolicy, InvalidPriority, err", + "\t\t\t}", + "\t\tdefault:", + "\t\t\treturn schedPolicy, InvalidPriority, fmt.Errorf(\"invalid: %s\", line)", + "\t\t}", + "\t}", + "\treturn schedPolicy, schedPriority, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testRtAppsNoExecProbes", + "kind": "function", + "source": [ + "func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcuts := env.GetNonGuaranteedPodContainersWithoutHostPID()", + "\tfor _, cut := range cuts {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif !cut.HasExecProbes() {", + "\t\t\tcheck.LogInfo(\"Container %q does not define exec probes\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not define exec probes\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tprocesses, err := crclient.GetContainerProcesses(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not determine the processes pids for container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the processes pids for container\", false))", + "\t\t\tbreak", + "\t\t}", + "", + "\t\tnotExecProbeProcesses, compliantObjectsProbes := filterProbeProcesses(processes, cut)", + "\t\tcompliantObjects = append(compliantObjects, compliantObjectsProbes...)", + "\t\tallProcessesCompliant := true", + "\t\tfor _, p := range notExecProbeProcesses {", + "\t\t\tcheck.LogInfo(\"Testing process %q\", p)", + "\t\t\tschedPolicy, _, err := scheduling.GetProcessCPUScheduling(p.Pid, cut)", + "\t\t\tif err != nil {", + "\t\t\t\t// If the process does not exist anymore it means that it has finished since the time the process list", + "\t\t\t\t// was retrieved. In this case, just ignore the error and continue processing the rest of the processes.", + "\t\t\t\tif strings.Contains(err.Error(), noProcessFoundErrMsg) {", + "\t\t\t\t\tcheck.LogWarn(\"Container process %q disappeared\", p)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container process disappeared\", true).", + "\t\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\tcheck.LogError(\"Could not determine the scheduling policy for container %q (pid=%d), err: %v\", cut, p.Pid, err)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the scheduling policy for container\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif scheduling.PolicyIsRT(schedPolicy) {", + "\t\t\t\tcheck.LogError(\"Container %q defines exec probes while having a RT scheduling policy for process %q\", cut, p)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes while having a RT scheduling policy\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t}", + "\t\t}", + "", + "\t\tif allProcessesCompliant {", + "\t\t\tcheck.LogInfo(\"Container %q defines exec probes but does not have a RT scheduling policy\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes but does not have a RT scheduling policy\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetProcessCPUScheduling(pid int, testContainer *provider.Container) (schedulePolicy string, schedulePriority int, err error) {", + "\tlog.Info(\"Checking the scheduling policy/priority in %v for pid=%d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"chrt -p %d\", pid)", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := crclient.GetNodeProbePodContext(testContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", 0, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\tstdout, stderr, err := ch.ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"command %q failed to run in probe pod %s (node %s): %v (stderr: %v)\",", + "\t\t\tcommand, ctx.GetPodName(), testContainer.NodeName, err, stderr)", + "\t}", + "", + "\tschedulePolicy, schedulePriority, err = parseSchedulingPolicyAndPriority(stdout)", + "\tif err != nil {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"error getting the scheduling policy and priority for %v : %v\", testContainer, err)", + "\t}", + "\tlog.Info(\"pid %d in %v has the cpu scheduling policy %s, scheduling priority %d\", pid, testContainer, schedulePolicy, schedulePriority)", + "", + "\treturn schedulePolicy, schedulePriority, err", + "}" + ] + }, + { + "name": "PolicyIsRT", + "qualifiedName": "PolicyIsRT", + "exported": true, + "signature": "func(string)(bool)", + "doc": "PolicyIsRT Determines whether a scheduling policy represents a real‑time policy\n\nThe function receives the name of a Linux CPU scheduling policy and returns\ntrue if it matches either the First‑In‑First‑Out or Round‑Robin\npolicies, which are considered real‑time in this context. Any other policy\nstring results in false, indicating non‑real‑time behavior.", + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:176", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testRtAppsNoExecProbes", + "kind": "function", + "source": [ + "func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcuts := env.GetNonGuaranteedPodContainersWithoutHostPID()", + "\tfor _, cut := range cuts {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif !cut.HasExecProbes() {", + "\t\t\tcheck.LogInfo(\"Container %q does not define exec probes\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not define exec probes\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tprocesses, err := crclient.GetContainerProcesses(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not determine the processes pids for container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the processes pids for container\", false))", + "\t\t\tbreak", + "\t\t}", + "", + "\t\tnotExecProbeProcesses, compliantObjectsProbes := filterProbeProcesses(processes, cut)", + "\t\tcompliantObjects = append(compliantObjects, compliantObjectsProbes...)", + "\t\tallProcessesCompliant := true", + "\t\tfor _, p := range notExecProbeProcesses {", + "\t\t\tcheck.LogInfo(\"Testing process %q\", p)", + "\t\t\tschedPolicy, _, err := scheduling.GetProcessCPUScheduling(p.Pid, cut)", + "\t\t\tif err != nil {", + "\t\t\t\t// If the process does not exist anymore it means that it has finished since the time the process list", + "\t\t\t\t// was retrieved. In this case, just ignore the error and continue processing the rest of the processes.", + "\t\t\t\tif strings.Contains(err.Error(), noProcessFoundErrMsg) {", + "\t\t\t\t\tcheck.LogWarn(\"Container process %q disappeared\", p)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container process disappeared\", true).", + "\t\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\tcheck.LogError(\"Could not determine the scheduling policy for container %q (pid=%d), err: %v\", cut, p.Pid, err)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the scheduling policy for container\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif scheduling.PolicyIsRT(schedPolicy) {", + "\t\t\t\tcheck.LogError(\"Container %q defines exec probes while having a RT scheduling policy for process %q\", cut, p)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes while having a RT scheduling policy\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t}", + "\t\t}", + "", + "\t\tif allProcessesCompliant {", + "\t\t\tcheck.LogInfo(\"Container %q defines exec probes but does not have a RT scheduling policy\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes but does not have a RT scheduling policy\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PolicyIsRT(schedPolicy string) bool {", + "\treturn schedPolicy == SchedulingFirstInFirstOut || schedPolicy == SchedulingRoundRobin", + "}" + ] + }, + { + "name": "ProcessPidsCPUScheduling", + "qualifiedName": "ProcessPidsCPUScheduling", + "exported": true, + "signature": "func([]*crclient.Process, *provider.Container, string, *log.Logger)([]*testhelper.ReportObject)", + "doc": "ProcessPidsCPUScheduling Evaluates CPU scheduling compliance for container processes\n\nThe function iterates over a list of process objects, retrieves each\nprocess's CPU scheduling policy and priority, and checks them against the\nspecified scheduling . For each process it records whether it meets the\nrequirements, creating a report object that includes scheduling details and\narguments. The result is two slices: one for compliant processes and another\nfor non‑compliant ones.", + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:102", + "calls": [ + { + "name": "Debug", + "kind": "function" + }, + { + "name": "GetProcessCPUSchedulingFn", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "SetContainerProcessValues", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "SetContainerProcessValues", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testSchedulingPolicyInCPUPool", + "kind": "function", + "source": [ + "func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvironment,", + "\tpodContainers []*provider.Container, schedulingType string) {", + "\tvar compliantContainersPids []*testhelper.ReportObject", + "\tvar nonCompliantContainersPids []*testhelper.ReportObject", + "\tfor _, cut := range podContainers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\t// Get the pid namespace", + "\t\tpidNamespace, err := crclient.GetContainerPidNamespace(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get pid namespace for Container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcheck.LogDebug(\"PID namespace for Container %q is %q\", cut, pidNamespace)", + "", + "\t\t// Get the list of process ids running in the pid namespace", + "\t\tprocesses, err := crclient.GetPidsFromPidNamespace(pidNamespace, cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get PIDs from PID namespace %q for Container %q, err: %v\", pidNamespace, cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t}", + "", + "\t\tcompliantPids, nonCompliantPids := scheduling.ProcessPidsCPUScheduling(processes, cut, schedulingType, check.GetLogger())", + "\t\t// Check for the specified priority for each processes running in that pid namespace", + "", + "\t\tcompliantContainersPids = append(compliantContainersPids, compliantPids...)", + "\t\tnonCompliantContainersPids = append(nonCompliantContainersPids, nonCompliantPids...)", + "\t}", + "", + "\tcheck.SetResult(compliantContainersPids, nonCompliantContainersPids)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *provider.Container, check string, logger *log.Logger) (compliantContainerPids, nonCompliantContainerPids []*testhelper.ReportObject) {", + "\thasCPUSchedulingConditionSuccess := false", + "\tfor _, process := range processes {", + "\t\tlogger.Debug(\"Testing process %q\", process)", + "\t\tschedulePolicy, schedulePriority, err := GetProcessCPUSchedulingFn(process.Pid, testContainer)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Unable to get the scheduling policy and priority : %v\", err)", + "\t\t\treturn compliantContainerPids, nonCompliantContainerPids", + "\t\t}", + "", + "\t\tswitch check {", + "\t\tcase SharedCPUScheduling:", + "\t\t\thasCPUSchedulingConditionSuccess = schedulePriority == 0", + "\t\tcase ExclusiveCPUScheduling:", + "\t\t\thasCPUSchedulingConditionSuccess = schedulePriority == 0 || (schedulePriority \u003c 10 \u0026\u0026 (schedulePolicy == SchedulingRoundRobin || schedulePolicy == SchedulingFirstInFirstOut))", + "\t\tcase IsolatedCPUScheduling:", + "\t\t\thasCPUSchedulingConditionSuccess = schedulePriority \u003e= 10 \u0026\u0026 (schedulePolicy == SchedulingRoundRobin || schedulePolicy == SchedulingFirstInFirstOut)", + "\t\t}", + "", + "\t\tif !hasCPUSchedulingConditionSuccess {", + "\t\t\tlogger.Error(\"Process %q in Container %q with cpu scheduling policy=%s, priority=%d did not satisfy cpu scheduling requirements\", process, testContainer, schedulePolicy, schedulePriority)", + "\t\t\taPidOut := testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, \"process does not satisfy: \"+schedulingRequirements[check], false).", + "\t\t\t\tSetContainerProcessValues(schedulePolicy, fmt.Sprint(schedulePriority), process.Args)", + "\t\t\tnonCompliantContainerPids = append(nonCompliantContainerPids, aPidOut)", + "\t\t\tcontinue", + "\t\t}", + "\t\tlogger.Info(\"Process %q in Container %q with cpu scheduling policy=%s, priority=%d satisfies cpu scheduling requirements\", process, testContainer, schedulePolicy, schedulePriority)", + "\t\taPidOut := testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, \"process satisfies: \"+schedulingRequirements[check], true).", + "\t\t\tSetContainerProcessValues(schedulePolicy, fmt.Sprint(schedulePriority), process.Args)", + "\t\tcompliantContainerPids = append(compliantContainerPids, aPidOut)", + "\t}", + "\treturn compliantContainerPids, nonCompliantContainerPids", + "}" + ] + }, + { + "name": "parseSchedulingPolicyAndPriority", + "qualifiedName": "parseSchedulingPolicyAndPriority", + "exported": false, + "signature": "func(string)(string, int, error)", + "doc": "parseSchedulingPolicyAndPriority Extracts CPU scheduling policy and priority from chrt command output\n\nThe function parses the string produced by the \"chrt -p\" command, looking for\nlines that indicate the current scheduling policy or priority. It splits the\noutput into lines, tokenizes each line, and captures the last word as either\nthe policy name or a numeric priority value. If parsing fails or an\nunexpected line appears, it returns an error; otherwise it provides the\nextracted policy string and integer priority.", + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:59", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Fields", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling", + "name": "GetProcessCPUScheduling", + "kind": "function", + "source": [ + "func GetProcessCPUScheduling(pid int, testContainer *provider.Container) (schedulePolicy string, schedulePriority int, err error) {", + "\tlog.Info(\"Checking the scheduling policy/priority in %v for pid=%d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"chrt -p %d\", pid)", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := crclient.GetNodeProbePodContext(testContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", 0, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\tstdout, stderr, err := ch.ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"command %q failed to run in probe pod %s (node %s): %v (stderr: %v)\",", + "\t\t\tcommand, ctx.GetPodName(), testContainer.NodeName, err, stderr)", + "\t}", + "", + "\tschedulePolicy, schedulePriority, err = parseSchedulingPolicyAndPriority(stdout)", + "\tif err != nil {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"error getting the scheduling policy and priority for %v : %v\", testContainer, err)", + "\t}", + "\tlog.Info(\"pid %d in %v has the cpu scheduling policy %s, scheduling priority %d\", pid, testContainer, schedulePolicy, schedulePriority)", + "", + "\treturn schedulePolicy, schedulePriority, err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func parseSchedulingPolicyAndPriority(chrtCommandOutput string) (schedPolicy string, schedPriority int, err error) {", + "\t/*\tSample output:", + "\t\tpid 476's current scheduling policy: SCHED_OTHER", + "\t\tpid 476's current scheduling priority: 0*/", + "", + "\tlines := strings.Split(chrtCommandOutput, newLineCharacter)", + "", + "\tfor _, line := range lines {", + "\t\tif line == \"\" {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\ttokens := strings.Fields(line)", + "\t\tlastToken := tokens[len(tokens)-1]", + "", + "\t\tswitch {", + "\t\tcase strings.Contains(line, CurrentSchedulingPolicy):", + "\t\t\tschedPolicy = lastToken", + "\t\tcase strings.Contains(line, CurrentSchedulingPriority):", + "\t\t\tschedPriority, err = strconv.Atoi(lastToken)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error obtained during strconv %v\", err)", + "\t\t\t\treturn schedPolicy, InvalidPriority, err", + "\t\t\t}", + "\t\tdefault:", + "\t\t\treturn schedPolicy, InvalidPriority, fmt.Errorf(\"invalid: %s\", line)", + "\t\t}", + "\t}", + "\treturn schedPolicy, schedPriority, nil", + "}" + ] + } + ], + "globals": [ + { + "name": "CrcClientExecCommandContainerNSEnter", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:47" + }, + { + "name": "GetProcessCPUSchedulingFn", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:48" + }, + { + "name": "schedulingRequirements", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:90" + } + ], + "consts": [ + { + "name": "CurrentSchedulingPolicy", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:32" + }, + { + "name": "CurrentSchedulingPriority", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:33" + }, + { + "name": "ExclusiveCPUScheduling", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:37" + }, + { + "name": "InvalidPriority", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:43" + }, + { + "name": "IsolatedCPUScheduling", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:38" + }, + { + "name": "SchedulingFirstInFirstOut", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:41" + }, + { + "name": "SchedulingRoundRobin", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:40" + }, + { + "name": "SharedCPUScheduling", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:36" + }, + { + "name": "newLineCharacter", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/pkg/scheduling/scheduling.go:34" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "stringhelper", + "files": 1, + "imports": [ + "fmt", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "HasAtLeastOneCommonElement", + "qualifiedName": "HasAtLeastOneCommonElement", + "exported": true, + "signature": "func([]string, []string)(bool)", + "doc": "HasAtLeastOneCommonElement verifies whether two string collections contain a shared value\n\nThe routine iterates over the second slice and checks each element against\nthe first using a helper that compares trimmed strings for equality. If any\nmatch is found, it immediately returns true; otherwise it completes the loop\nand returns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/stringhelper/stringhelper.go:66", + "calls": [ + { + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func HasAtLeastOneCommonElement(s1, s2 []string) bool {", + "\tfor _, v := range s2 {", + "\t\tif StringInSlice(s1, v, false) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "PointerToString", + "qualifiedName": "PointerToString", + "exported": true, + "signature": "func(*T)(string)", + "doc": "PointerToString converts a pointer to its string representation\n\nWhen the argument is nil, it returns \"nil\"; otherwise it dereferences the\npointer and formats the value using standard printing rules. The function\nworks for any type thanks to generics, making it useful in log traces or\ndebugging output.", + "position": "/Users/deliedit/dev/certsuite/pkg/stringhelper/stringhelper.go:96", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprint", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Container.IsContainerRunAsNonRoot", + "kind": "function", + "source": [ + "func (c *Container) IsContainerRunAsNonRoot(podRunAsNonRoot *bool) (isContainerRunAsNonRoot bool, reason string) {", + "\tif c.SecurityContext != nil \u0026\u0026 c.SecurityContext.RunAsNonRoot != nil {", + "\t\treturn *c.SecurityContext.RunAsNonRoot, fmt.Sprintf(\"RunAsNonRoot is set to %t at the container level, overriding a %v value defined at pod level\",", + "\t\t\t*c.SecurityContext.RunAsNonRoot, stringhelper.PointerToString(podRunAsNonRoot))", + "\t}", + "", + "\tif podRunAsNonRoot != nil {", + "\t\treturn *podRunAsNonRoot, fmt.Sprintf(\"RunAsNonRoot is set to nil at container level and inheriting a %t value from the pod level RunAsNonRoot setting\", *podRunAsNonRoot)", + "\t}", + "", + "\treturn false, \"RunAsNonRoot is set to nil at pod and container level\"", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Container.IsContainerRunAsNonRootUserID", + "kind": "function", + "source": [ + "func (c *Container) IsContainerRunAsNonRootUserID(podRunAsNonRootUserID *int64) (isContainerRunAsNonRootUserID bool, reason string) {", + "\tif c.SecurityContext != nil \u0026\u0026 c.SecurityContext.RunAsUser != nil {", + "\t\treturn *c.SecurityContext.RunAsUser != 0, fmt.Sprintf(\"RunAsUser is set to %v at the container level, overriding a %s value defined at pod level\",", + "\t\t\t*c.SecurityContext.RunAsUser, stringhelper.PointerToString(podRunAsNonRootUserID))", + "\t}", + "", + "\tif podRunAsNonRootUserID != nil {", + "\t\treturn *podRunAsNonRootUserID != 0, fmt.Sprintf(\"RunAsUser is set to nil at container level and inheriting a %v value from the pod level RunAsUser setting\", *podRunAsNonRootUserID)", + "\t}", + "", + "\treturn false, \"RunAsUser is set to nil at pod and container level\"", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PointerToString[T any](p *T) string {", + "\tif p == nil {", + "\t\treturn \"nil\"", + "\t} else {", + "\t\treturn fmt.Sprint(*p)", + "\t}", + "}" + ] + }, + { + "name": "RemoveEmptyStrings", + "qualifiedName": "RemoveEmptyStrings", + "exported": true, + "signature": "func([]string)([]string)", + "doc": "RemoveEmptyStrings Filters out empty entries from a slice\n\nThis function iterates over an input list of strings, selecting only those\nthat are not empty. It builds a new slice containing the non-empty values and\nreturns it. The original slice is left unchanged.", + "position": "/Users/deliedit/dev/certsuite/pkg/stringhelper/stringhelper.go:80", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func RemoveEmptyStrings(s []string) []string {", + "\tvar r []string", + "\tfor _, str := range s {", + "\t\tif str != \"\" {", + "\t\t\tr = append(r, str)", + "\t\t}", + "\t}", + "\treturn r", + "}" + ] + }, + { + "name": "StringInSlice", + "qualifiedName": "StringInSlice", + "exported": true, + "signature": "func([]T, T, bool)(bool)", + "doc": "StringInSlice Checks if a value exists in a string slice\n\nThe function iterates over the provided slice, trimming whitespace from each\nelement before comparison. If containsCheck is false it tests for exact\nequality; otherwise it checks whether the element contains the target\nsubstring. It returns true as soon as a match is found, otherwise false.", + "position": "/Users/deliedit/dev/certsuite/pkg/stringhelper/stringhelper.go:30", + "calls": [ + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "getServices", + "kind": "function", + "source": [ + "func getServices(oc corev1client.CoreV1Interface, namespaces, ignoreList []string) (allServices []*corev1.Service, err error) {", + "\tfor _, ns := range namespaces {", + "\t\ts, err := oc.Services(ns).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\treturn allServices, err", + "\t\t}", + "\t\tfor i := range s.Items {", + "\t\t\tif stringhelper.StringInSlice(ignoreList, s.Items[i].Name, false) {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tallServices = append(allServices, \u0026s.Items[i])", + "\t\t}", + "\t}", + "\treturn allServices, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover", + "name": "isIstioServiceMeshInstalled", + "kind": "function", + "source": [ + "func isIstioServiceMeshInstalled(appClient appv1client.AppsV1Interface, allNs []string) bool {", + "\t// The Istio namespace must be present", + "\tif !stringhelper.StringInSlice(allNs, istioNamespace, false) {", + "\t\tlog.Info(\"Istio Service Mesh not present (the namespace %q does not exists)\", istioNamespace)", + "\t\treturn false", + "\t}", + "", + "\t// The Deployment \"istiod\" must be present in an active service mesh", + "\t_, err := appClient.Deployments(istioNamespace).Get(context.TODO(), istioDeploymentName, metav1.GetOptions{})", + "\tif errors.IsNotFound(err) {", + "\t\tlog.Warn(\"The Istio Deployment %q is missing (but the Istio namespace exists)\", istioDeploymentName)", + "\t\treturn false", + "\t} else if err != nil {", + "\t\tlog.Error(\"Failed getting Deployment %q\", istioDeploymentName)", + "\t\treturn false", + "\t}", + "", + "\tlog.Info(\"Istio Service Mesh detected\")", + "", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "GetTestSuites", + "kind": "function", + "source": [ + "func GetTestSuites() []string {", + "\t// Collect all of the unique test suites from the resultsDB", + "\tvar suites []string", + "\tfor key := range resultsDB {", + "\t\t// Only append to the slice if it does not already exist", + "\t\tif !stringhelper.StringInSlice(suites, key, false) {", + "\t\t\tsuites = append(suites, key)", + "\t\t}", + "\t}", + "\treturn suites", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "name": "BetaRHCOSVersionsFoundToMatch", + "kind": "function", + "source": [ + "func BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion string) bool {", + "\tocpVersion = FindMajorMinor(ocpVersion)", + "\tmachineVersion = FindMajorMinor(machineVersion)", + "", + "\t// Check if the versions exist in the beta list", + "\tif !stringhelper.StringInSlice(ocpBetaVersions, ocpVersion, false) || !stringhelper.StringInSlice(ocpBetaVersions, machineVersion, false) {", + "\t\treturn false", + "\t}", + "", + "\t// Check if the versions match", + "\treturn ocpVersion == machineVersion", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Node.IsControlPlaneNode", + "kind": "function", + "source": [ + "func (node *Node) IsControlPlaneNode() bool {", + "\tfor nodeLabel := range node.Data.Labels {", + "\t\tif stringhelper.StringInSlice(MasterLabels, nodeLabel, true) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Node.IsWorkerNode", + "kind": "function", + "source": [ + "func (node *Node) IsWorkerNode() bool {", + "\tfor nodeLabel := range node.Data.Labels {", + "\t\tif stringhelper.StringInSlice(WorkerLabels, nodeLabel, true) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "HasAtLeastOneCommonElement", + "kind": "function", + "source": [ + "func HasAtLeastOneCommonElement(s1, s2 []string) bool {", + "\tfor _, v := range s2 {", + "\t\tif StringInSlice(s1, v, false) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "SubSlice", + "kind": "function", + "source": [ + "func SubSlice(s, sub []string) bool {", + "\tfor _, v := range sub {", + "\t\tif !StringInSlice(s, v, false) {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "isContainerCapabilitySet", + "kind": "function", + "source": [ + "func isContainerCapabilitySet(containerCapabilities *corev1.Capabilities, capability string) bool {", + "\tif containerCapabilities == nil {", + "\t\treturn false", + "\t}", + "", + "\tif len(containerCapabilities.Add) == 0 {", + "\t\treturn false", + "\t}", + "", + "\tif stringhelper.StringInSlice(containerCapabilities.Add, corev1.Capability(\"ALL\"), true) ||", + "\t\tstringhelper.StringInSlice(containerCapabilities.Add, corev1.Capability(capability), true) {", + "\t\treturn true", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testSysPtraceCapability", + "kind": "function", + "source": [ + "func testSysPtraceCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.GetShareProcessNamespacePods() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tsysPtraceEnabled := false", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tif cut.SecurityContext == nil ||", + "\t\t\t\tcut.SecurityContext.Capabilities == nil ||", + "\t\t\t\tlen(cut.SecurityContext.Capabilities.Add) == 0 {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif stringhelper.StringInSlice(cut.SecurityContext.Capabilities.Add, \"SYS_PTRACE\", false) {", + "\t\t\t\tcheck.LogInfo(\"Container %q defines the SYS_PTRACE capability\", cut)", + "\t\t\t\tsysPtraceEnabled = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !sysPtraceEnabled {", + "\t\t\tcheck.LogError(\"Pod %q has process namespace sharing enabled but no container allowing the SYS_PTRACE capability.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has process namespace sharing enabled but no container allowing the SYS_PTRACE capability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has process namespace sharing enabled and at least one container allowing the SYS_PTRACE capability\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has process namespace sharing enabled and at least one container allowing the SYS_PTRACE capability\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/namespace", + "name": "TestCrsNamespaces", + "kind": "function", + "source": [ + "func TestCrsNamespaces(crds []*apiextv1.CustomResourceDefinition, configNamespaces []string, logger *log.Logger) (invalidCrs map[string]map[string][]string, err error) {", + "\t// Initialize the top level map", + "\tinvalidCrs = make(map[string]map[string][]string)", + "\tfor _, crd := range crds {", + "\t\tcrNamespaces, err := getCrsPerNamespaces(crd)", + "\t\tif err != nil {", + "\t\t\treturn invalidCrs, fmt.Errorf(\"failed to get CRs for CRD %s - Error: %v\", crd.Name, err)", + "\t\t}", + "\t\tfor namespace, crNames := range crNamespaces {", + "\t\t\tif !stringhelper.StringInSlice(configNamespaces, namespace, false) {", + "\t\t\t\tlogger.Error(\"CRD: %q (kind:%q/ plural:%q) has CRs %v deployed in namespace %q not in configured namespaces %v\",", + "\t\t\t\t\tcrd.Name, crd.Spec.Names.Kind, crd.Spec.Names.Plural, crNames, namespace, configNamespaces)", + "\t\t\t\t// Initialize this map dimension before use", + "\t\t\t\tif invalidCrs[crd.Name] == nil {", + "\t\t\t\t\tinvalidCrs[crd.Name] = make(map[string][]string)", + "\t\t\t\t}", + "\t\t\t\tinvalidCrs[crd.Name][namespace] = append(invalidCrs[crd.Name][namespace], crNames...)", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn invalidCrs, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer", + "name": "checkContainCategory", + "kind": "function", + "source": [ + "func checkContainCategory(addCapability []corev1.Capability, referenceCategoryAddCapabilities []string) bool {", + "\tfor _, ncc := range addCapability {", + "\t\tif !stringhelper.StringInSlice(referenceCategoryAddCapabilities, string(ncc), true) {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "isMultiNamespacedOperator", + "kind": "function", + "source": [ + "func isMultiNamespacedOperator(operatorNamespace string, targetNamespaces []string) bool {", + "\treturn len(targetNamespaces) \u003e 1 \u0026\u0026 !stringhelper.StringInSlice(targetNamespaces, operatorNamespace, false)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func testOperatorCatalogSourceBundleCount(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tconst (", + "\t\tbundleCountLimit = 1000", + "", + "\t\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count.", + "\t\t// This means we cannot use the package manifests to skip based on channel.", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\tocp412Skip := false", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\tcheck.LogError(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003c= 4.12.\")", + "\t\t\tocp412Skip = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003e 4.12.\")", + "\t\t}", + "\t}", + "", + "\tbundleCountLimitStr := strconv.Itoa(bundleCountLimit)", + "", + "\t// Loop through all labeled operators and check if they have more than 1000 referenced images.", + "\tvar catalogsAlreadyReported []string", + "\tfor _, op := range env.Operators {", + "\t\tcatalogSourceCheckComplete := false", + "\t\tcheck.LogInfo(\"Checking bundle count for operator %q\", op.Csv.Name)", + "", + "\t\t// Search through packagemanifests to match the name of the CSV.", + "\t\tfor _, pm := range env.AllPackageManifests {", + "\t\t\t// Skip package manifests based on channel entries.", + "\t\t\t// Note: This only works for OCP versions \u003e 4.12 due to channel entries existence.", + "\t\t\tif !ocp412Skip \u0026\u0026 catalogsource.SkipPMBasedOnChannel(pm.Status.Channels, op.Csv.Name) {", + "\t\t\t\tlog.Debug(\"Skipping package manifest %q based on channel\", pm.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Search through all catalog sources to match the name and namespace of the package manifest.", + "\t\t\tfor _, catalogSource := range env.AllCatalogSources {", + "\t\t\t\tif catalogSource.Name != pm.Status.CatalogSource || catalogSource.Namespace != pm.Status.CatalogSourceNamespace {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on name or namespace\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the catalog source has already been reported, skip it.", + "\t\t\t\tif stringhelper.StringInSlice(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace, false) {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on already reported\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Found matching catalog source %q for operator %q\", catalogSource.Name, op.Csv.Name)", + "", + "\t\t\t\t// The name and namespace match. Lookup the bundle count.", + "\t\t\t\tbundleCount := provider.GetCatalogSourceBundleCount(env, catalogSource)", + "", + "\t\t\t\tif bundleCount == -1 {", + "\t\t\t\t\tcheck.LogError(\"Failed to get bundle count for CatalogSource %q\", catalogSource.Name)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"Failed to get bundle count\", false))", + "\t\t\t\t} else {", + "\t\t\t\t\tif bundleCount \u003e bundleCountLimit {", + "\t\t\t\t\t\tcheck.LogError(\"CatalogSource %q has more than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has more than \"+bundleCountLimitStr+\" referenced images\", false))", + "\t\t\t\t\t} else {", + "\t\t\t\t\t\tcheck.LogInfo(\"CatalogSource %q has less than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has less than \"+bundleCountLimitStr+\" referenced images\", true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Adding catalog source %q to list of already reported\", catalogSource.Name)", + "\t\t\t\tcatalogsAlreadyReported = append(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace)", + "\t\t\t\t// Signal that the catalog source check is complete.", + "\t\t\t\tcatalogSourceCheckComplete = true", + "\t\t\t\tbreak", + "\t\t\t}", + "", + "\t\t\t// If the catalog source check is complete, break out of the loop.", + "\t\t\tif catalogSourceCheckComplete {", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.intersectTargetFolders", + "kind": "function", + "source": [ + "func (f *FsDiff) intersectTargetFolders(src []string) []string {", + "\tvar dst []string", + "\tfor _, folder := range src {", + "\t\tif stringhelper.StringInSlice(targetFolders, folder, false) {", + "\t\t\tf.check.LogWarn(\"Container's folder %q is altered.\", folder)", + "\t\t\tdst = append(dst, folder)", + "\t\t}", + "\t}", + "\treturn dst", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "SubSlice", + "qualifiedName": "SubSlice", + "exported": true, + "signature": "func([]string, []string)(bool)", + "doc": "SubSlice verifies all elements of one slice exist in another\n\nThe function receives two string slices: the main slice and a candidate\nsub-slice. It iterates over each element of the candidate, checking for an\nexact match within the main slice using StringInSlice. If any element is\nmissing, it returns false; otherwise it returns true after all checks pass.", + "position": "/Users/deliedit/dev/certsuite/pkg/stringhelper/stringhelper.go:51", + "calls": [ + { + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer", + "name": "updateCapabilitiesFromContainer", + "kind": "function", + "source": [ + "func updateCapabilitiesFromContainer(cut *provider.Container, containerSCC *ContainerSCC) {", + "\tcontainerSCC.RequiredDropCapabilitiesPresent = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.Capabilities != nil {", + "\t\tvar sliceDropCapabilities []string", + "\t\tfor _, ncc := range cut.SecurityContext.Capabilities.Drop {", + "\t\t\tsliceDropCapabilities = append(sliceDropCapabilities, string(ncc))", + "\t\t}", + "", + "\t\t// Sort the slices", + "\t\tsort.Strings(sliceDropCapabilities)", + "\t\tsort.Strings(requiredDropCapabilities)", + "", + "\t\tif stringhelper.SubSlice(sliceDropCapabilities, requiredDropCapabilities) || slices.Equal(sliceDropCapabilities, dropAll) {", + "\t\t\tcontainerSCC.RequiredDropCapabilitiesPresent = OK", + "\t\t}", + "\t\t//nolint:gocritic", + "\t\tif len(cut.SecurityContext.Capabilities.Add) == 0 { // check if the len=0 this mean that is cat1", + "\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID1", + "\t\t} else if checkContainCategory(cut.SecurityContext.Capabilities.Add, category2AddCapabilities) {", + "\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID2", + "\t\t} else {", + "\t\t\tif checkContainCategory(cut.SecurityContext.Capabilities.Add, category3AddCapabilities) {", + "\t\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID3", + "\t\t\t} else {", + "\t\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID4", + "\t\t\t}", + "\t\t}", + "\t} else {", + "\t\tcontainerSCC.CapabilitiesCategory = CategoryID1", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SubSlice(s, sub []string) bool {", + "\tfor _, v := range sub {", + "\t\tif !StringInSlice(s, v, false) {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "testhelper", + "files": 1, + "imports": [ + "encoding/json", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "reflect" + ], + "structs": [ + { + "name": "FailureReasonOut", + "exported": true, + "doc": "FailureReasonOut Represents collections of compliant and non-compliant report objects\n\nThis structure stores two separate lists of report objects, one for items\nthat meet the compliance criteria and another for those that do not. Each\nlist holds pointers to ReportObject instances, allowing callers to access\ndetailed information about each item. The struct provides an Equal method to\ncompare two instances by checking both slices for identical contents.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:54", + "fields": { + "CompliantObjectsOut": "[]*ReportObject", + "NonCompliantObjectsOut": "[]*ReportObject" + }, + "methodNames": [ + "Equal" + ], + "source": [ + "type FailureReasonOut struct {", + "\tCompliantObjectsOut []*ReportObject", + "\tNonCompliantObjectsOut []*ReportObject", + "}" + ] + }, + { + "name": "ReportObject", + "exported": true, + "doc": "ReportObject Represents a structured report entry with type and key/value attributes\n\nThis structure holds the kind of object being reported, along with parallel\nslices that store field names and corresponding values. The fields are\npopulated via methods such as AddField, SetContainerProcessValues, or\nSetType, allowing callers to build descriptive reports for compliance checks.\nIt serves as a lightweight container used throughout the test helper package\nto aggregate and serialize results.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:41", + "fields": { + "ObjectFieldsKeys": "[]string", + "ObjectFieldsValues": "[]string", + "ObjectType": "string" + }, + "methodNames": [ + "AddField", + "SetContainerProcessValues", + "SetType" + ], + "source": [ + "type ReportObject struct {", + "\tObjectType string", + "\tObjectFieldsKeys []string", + "\tObjectFieldsValues []string", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "Equal", + "qualifiedName": "Equal", + "exported": true, + "signature": "func([]*ReportObject, []*ReportObject)(bool)", + "doc": "Equal Compares two slices of ReportObject pointers for deep equality\n\nThe function first verifies that both slices have the same length. It then\niterates through each index, treating nil entries as equal only when both are\nnil; a mismatch in nil status causes an immediate false result. For non-nil\nelements, it uses reflect.DeepEqual on the dereferenced values to determine\nequality, returning true only if all corresponding pairs match.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:66", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "reflect", + "name": "DeepEqual", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func Equal(p, other []*ReportObject) bool {", + "\tif len(p) != len(other) {", + "\t\treturn false", + "\t}", + "\tfor i := 0; i \u003c len(p); i++ {", + "\t\tif p[i] == nil \u0026\u0026 other[i] == nil {", + "\t\t\tcontinue", + "\t\t}", + "\t\tif p[i] == nil || other[i] == nil {", + "\t\t\treturn false", + "\t\t}", + "\t\tif !reflect.DeepEqual(*p[i], *other[i]) {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Equal", + "qualifiedName": "FailureReasonOut.Equal", + "exported": true, + "receiver": "FailureReasonOut", + "signature": "func(FailureReasonOut)(bool)", + "doc": "FailureReasonOut.Equal determines equality of two FailureReasonOut instances\n\nIt compares the CompliantObjectsOut and NonCompliantObjectsOut fields of both\nstructs, returning true only if all corresponding values match. The\ncomparison is performed using the generic Equal function for each field. If\nany field differs, it returns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:136", + "calls": [ + { + "name": "FailureReasonOut.Equal", + "kind": "function", + "source": [ + "func (p FailureReasonOut) Equal(other FailureReasonOut) bool {", + "\treturn Equal(p.CompliantObjectsOut, other.CompliantObjectsOut) \u0026\u0026", + "\t\tEqual(p.NonCompliantObjectsOut, other.NonCompliantObjectsOut)", + "}" + ] + }, + { + "name": "FailureReasonOut.Equal", + "kind": "function", + "source": [ + "func (p FailureReasonOut) Equal(other FailureReasonOut) bool {", + "\treturn Equal(p.CompliantObjectsOut, other.CompliantObjectsOut) \u0026\u0026", + "\t\tEqual(p.NonCompliantObjectsOut, other.NonCompliantObjectsOut)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "FailureReasonOut.Equal", + "kind": "function", + "source": [ + "func (p FailureReasonOut) Equal(other FailureReasonOut) bool {", + "\treturn Equal(p.CompliantObjectsOut, other.CompliantObjectsOut) \u0026\u0026", + "\t\tEqual(p.NonCompliantObjectsOut, other.NonCompliantObjectsOut)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (p FailureReasonOut) Equal(other FailureReasonOut) bool {", + "\treturn Equal(p.CompliantObjectsOut, other.CompliantObjectsOut) \u0026\u0026", + "\t\tEqual(p.NonCompliantObjectsOut, other.NonCompliantObjectsOut)", + "}" + ] + }, + { + "name": "FailureReasonOutTestString", + "qualifiedName": "FailureReasonOutTestString", + "exported": true, + "signature": "func(FailureReasonOut)(string)", + "doc": "FailureReasonOutTestString Formats a FailureReasonOut as a readable string\n\nThis function takes a FailureReasonOut value and builds a formatted string\nthat includes the compliant and non‑compliant object lists. It uses helper\nformatting to produce a concise representation of each list, then\nconcatenates them into a single string for debugging or test output.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:90", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "ReportObjectTestStringPointer", + "kind": "function", + "source": [ + "func ReportObjectTestStringPointer(p []*ReportObject) (out string) {", + "\tout = \"[]*testhelper.ReportObject{\"", + "\tfor _, p := range p {", + "\t\tout += fmt.Sprintf(\"\u0026%#v,\", *p)", + "\t}", + "\tout += \"}\"", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "ReportObjectTestStringPointer", + "kind": "function", + "source": [ + "func ReportObjectTestStringPointer(p []*ReportObject) (out string) {", + "\tout = \"[]*testhelper.ReportObject{\"", + "\tfor _, p := range p {", + "\t\tout += fmt.Sprintf(\"\u0026%#v,\", *p)", + "\t}", + "\tout += \"}\"", + "\treturn out", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func FailureReasonOutTestString(p FailureReasonOut) (out string) {", + "\tout = \"testhelper.FailureReasonOut{\"", + "\tout += fmt.Sprintf(\"CompliantObjectsOut: %s,\", ReportObjectTestStringPointer(p.CompliantObjectsOut))", + "\tout += fmt.Sprintf(\"NonCompliantObjectsOut: %s,\", ReportObjectTestStringPointer(p.NonCompliantObjectsOut))", + "\tout += \"}\"", + "\treturn out", + "}" + ] + }, + { + "name": "GetDaemonSetFailedToSpawnSkipFn", + "qualifiedName": "GetDaemonSetFailedToSpawnSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetDaemonSetFailedToSpawnSkipFn returns a closure that skips tests when the probe daemonset fails to spawn\n\nThe function takes a test environment and produces a zero‑argument function\nreturning a boolean and a message. When called, the inner function checks\nwhether the environment records a failed daemonset launch; if so it signals\nthe test should be skipped with an explanatory string. Otherwise it indicates\nno skip is needed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:565", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoAffinityRequiredPodsSkipFn", + "qualifiedName": "GetNoAffinityRequiredPodsSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoAffinityRequiredPodsSkipFn Determines if a test should be skipped due to absence of affinity-required pods\n\nThe function returns a closure that checks the test environment for any pods\nmarked with required node affinity. If none are found, it signals that the\ntest should be skipped and provides an explanatory message. Otherwise, it\nindicates the test can proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:825", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "GetAffinityRequiredPods", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoAffinityRequiredPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetAffinityRequiredPods()) == 0 {", + "\t\t\treturn true, \"no pods with required affinity found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoBareMetalNodesSkipFn", + "qualifiedName": "GetNoBareMetalNodesSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoBareMetalNodesSkipFn skips tests when no bare-metal nodes exist\n\nThe returned function checks the test environment for bare-metal nodes by\ncalling GetBaremetalNodes. If none are found, it signals that the current\ntest should be skipped with a descriptive message. Otherwise, it allows the\ntest to proceed normally.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:871", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "GetBaremetalNodes", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoBareMetalNodesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetBaremetalNodes()) == 0 {", + "\t\t\treturn true, \"no baremetal nodes found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoCPUPinningPodsSkipFn", + "qualifiedName": "GetNoCPUPinningPodsSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoCPUPinningPodsSkipFn Checks for the presence of CPU pinning pods before running a test\n\nThis function receives an environment object and returns a closure that\nindicates if a test should be skipped. The inner function counts\nCPU‑pinning pods with DPDK; if none are found it signals to skip with an\nexplanatory message, otherwise it allows the test to proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:581", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "GetCPUPinningPodsWithDpdk", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoCPUPinningPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetCPUPinningPodsWithDpdk()) == 0 {", + "\t\t\treturn true, \"no CPU pinning pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoCatalogSourcesSkipFn", + "qualifiedName": "GetNoCatalogSourcesSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoCatalogSourcesSkipFn Determines whether to skip tests due to missing catalog sources\n\nThe function returns a closure that checks the test environment for catalog\nsource entries. If no catalog sources are present, it signals that the\nassociated tests should be skipped with an explanatory message. Otherwise, it\nindicates that testing can proceed normally.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:916", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoCatalogSourcesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.AllCatalogSources) == 0 {", + "\t\t\treturn true, \"no catalog sources found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoContainersUnderTestSkipFn", + "qualifiedName": "GetNoContainersUnderTestSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoContainersUnderTestSkipFn skips tests when there are no containers to evaluate\n\nThis function receives a test environment and returns another function that\ndetermines whether the current test should be skipped. It checks if the\ncontainer list in the environment is empty; if so, it signals to skip with an\nexplanatory message. Otherwise, it allows the test to proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:617", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AffiliatedCertTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmVersionIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\ttestHelmVersion(check)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoOperatorsFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAllOperatorCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHelmCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerIsCertifiedDigestIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerCertificationStatusByDigest(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "generatePreflightContainerCnfCertTest", + "kind": "function", + "source": [ + "func generatePreflightContainerCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, containers []*provider.Container) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "\t\t\tfor _, cut := range containers {", + "\t\t\t\tfor _, r := range cut.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Container %q has passed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has failed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has errored Preflight test %q, err: %v\", cut, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Container has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoCrdsUnderTestSkipFn", + "qualifiedName": "GetNoCrdsUnderTestSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoCrdsUnderTestSkipFn Provides a skip function for tests when no CRDs are present\n\nIt returns an anonymous function that checks the TestEnvironment's Crds\nslice. If the slice is empty, the inner function signals to skip the test\nwith a message indicating there are no roles to check. Otherwise it allows\nthe test to proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:682", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoCrdsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Crds) == 0 {", + "\t\t\treturn true, \"no roles to check\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoDeploymentsUnderTestSkipFn", + "qualifiedName": "GetNoDeploymentsUnderTestSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoDeploymentsUnderTestSkipFn Determines whether tests should be skipped due to absence of deployments\n\nThe function returns a closure that checks the length of the Deployments\nslice in a test environment. If no deployments are present, it signals that\nthe test should skip with an explanatory message. Otherwise, it indicates\nthat testing can proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:649", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoDeploymentsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Deployments) == 0 {", + "\t\t\treturn true, \"no deployments to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoGuaranteedPodsWithExclusiveCPUsSkipFn", + "qualifiedName": "GetNoGuaranteedPodsWithExclusiveCPUsSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoGuaranteedPodsWithExclusiveCPUsSkipFn skips test when there are no pods using exclusive CPUs\n\nThe returned closure examines the test environment for pods that have been\nassigned exclusive CPU resources. If none are found, it signals to skip the\ntest by returning true and a descriptive message. Otherwise, it allows the\ntest to proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:809", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "GetGuaranteedPodsWithExclusiveCPUs", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetGuaranteedPodsWithExclusiveCPUs()) == 0 {", + "\t\t\treturn true, \"no pods with exclusive CPUs found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoHugepagesPodsSkipFn", + "qualifiedName": "GetNoHugepagesPodsSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoHugepagesPodsSkipFn Determines if a test should be skipped due to lack of hugepage pods\n\nThis function receives a testing environment and returns another function\nthat, when called, checks whether any pods are requesting hugepages. If none\nexist, it signals the test framework to skip with an explanatory message.\nOtherwise, it allows the test to proceed normally.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:901", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "GetHugepagesPods", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoHugepagesPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetHugepagesPods()) == 0 {", + "\t\t\treturn true, \"no pods requesting hugepages found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoIstioSkipFn", + "qualifiedName": "GetNoIstioSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoIstioSkipFn Decides if tests should be skipped due to missing Istio\n\nThe function creates and returns a closure that inspects the test environment\nfor an Istio service mesh flag. If the flag indicates no Istio is present, it\nsignals to skip with a descriptive message; otherwise it allows the test to\nproceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:886", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoIstioSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !env.IstioServiceMeshFound {", + "\t\t\treturn true, \"no istio service mesh found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoNamespacesSkipFn", + "qualifiedName": "GetNoNamespacesSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoNamespacesSkipFn Determines whether tests should be skipped due to lack of namespaces\n\nThe function returns a closure that checks the provided test environment for\nconfigured namespaces. If no namespaces are present, it signals that tests\nshould be skipped and supplies an explanatory message. Otherwise, it\nindicates that testing can proceed normally.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:698", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoNamespacesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Namespaces) == 0 {", + "\t\t\treturn true, \"There are no namespaces to check. Please check config.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoNodesWithRealtimeKernelSkipFn", + "qualifiedName": "GetNoNodesWithRealtimeKernelSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoNodesWithRealtimeKernelSkipFn Skips tests when no node uses a realtime kernel\n\nThis helper returns a function that checks all nodes in the test environment\nfor a realtime kernel type. If any node is found to use such a kernel, the\nreturned function signals not to skip; otherwise it indicates a skip with an\nexplanatory message.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:976", + "calls": [ + { + "name": "IsRTKernel", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoNodesWithRealtimeKernelSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tfor i := range env.Nodes {", + "\t\t\tnode := env.Nodes[i]", + "", + "\t\t\tif node.IsRTKernel() {", + "\t\t\t\treturn false, \"\"", + "\t\t\t}", + "\t\t}", + "", + "\t\treturn true, \"no nodes with realtime kernel type found\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoOperatorCrdsSkipFn", + "qualifiedName": "GetNoOperatorCrdsSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoOperatorCrdsSkipFn Skips tests when no operator CRDs are present\n\nThe function takes a test environment and returns a closure used to decide\nwhether a test should be skipped. The closure checks the length of the Crds\nslice in the environment; if it is empty, it signals to skip the test with an\nexplanatory message. Otherwise, it indicates that the test should proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:961", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoOperatorCrdsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Crds) == 0 {", + "\t\t\treturn true, \"no operator crds found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoOperatorPodsSkipFn", + "qualifiedName": "GetNoOperatorPodsSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoOperatorPodsSkipFn Determines whether to skip tests due to missing operator pods\n\nThe returned function checks the TestEnvironment's mapping of CSVs to pod\nlists. If no entries exist, it signals that tests should be skipped by\nreturning true along with a message explaining that no operator pods were\nfound. Otherwise, it indicates tests can proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:945", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoOperatorPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.CSVToPodListMap) == 0 {", + "\t\t\treturn true, \"no operator pods found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoOperatorsSkipFn", + "qualifiedName": "GetNoOperatorsSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoOperatorsSkipFn Decides if a test should be skipped because no operators are present\n\nThe function generates a closure that inspects the provided environment's\noperator list. If the list is empty, it signals to skip the test and supplies\nan explanatory message; otherwise it indicates the test can proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:930", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "generatePreflightOperatorCnfCertTest", + "kind": "function", + "source": [ + "func generatePreflightOperatorCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, operators []*provider.Operator) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t\t\tfor _, op := range operators {", + "\t\t\t\tfor _, r := range op.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Operator %q has passed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has failed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has errored Preflight test %q, err: %v\", op, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, fmt.Sprintf(\"Operator has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Operators) == 0 {", + "\t\t\treturn true, \"no operators found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoPersistentVolumeClaimsSkipFn", + "qualifiedName": "GetNoPersistentVolumeClaimsSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoPersistentVolumeClaimsSkipFn Determines if tests should be skipped due to absence of persistent volume claims\n\nThe function receives a test environment and produces a closure used by the\ntesting framework. When invoked, the closure checks whether the environment\ncontains any persistent volume claim objects. If none are present, it signals\nthat the test should be skipped and supplies an explanatory message;\notherwise it allows the test to proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:856", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoPersistentVolumeClaimsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.PersistentVolumeClaims) == 0 {", + "\t\t\treturn true, \"no persistent volume claims found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoPersistentVolumesSkipFn", + "qualifiedName": "GetNoPersistentVolumesSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoPersistentVolumesSkipFn skips tests when no persistent volumes exist\n\nIt produces a function that inspects the test environment’s list of\npersistent volumes. If the list is empty, it signals to skip the related\ntests and provides an explanatory message; otherwise it allows the tests to\nrun.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:761", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoPersistentVolumesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.PersistentVolumes) == 0 {", + "\t\t\treturn true, \"no persistent volumes to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoPodsUnderTestSkipFn", + "qualifiedName": "GetNoPodsUnderTestSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoPodsUnderTestSkipFn skips the test when there are no pods to check\n\nThis function creates a closure that examines the supplied test environment's\npod list. If the list is empty, it signals that the test should be skipped by\nreturning true and an explanatory message; otherwise, it indicates the test\nshould run.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:633", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PerformanceTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestExclusiveCPUPool(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRtAppNoExecProbes)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUs).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestRtAppsNoExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSharedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoNonGuaranteedPodContainersWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetNonGuaranteedPodContainersWithoutHostPID(), scheduling.SharedCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsolatedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLimitedUseOfExecProbesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestLimitedUseOfExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoRolesSkipFn", + "qualifiedName": "GetNoRolesSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoRolesSkipFn Determines whether tests should be skipped due to missing roles\n\nThe returned function checks the Roles slice in the test environment. If no\nroles are present, it signals a skip by returning true along with an\nexplanatory message. Otherwise, it indicates that testing can proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:713", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoRolesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Roles) == 0 {", + "\t\t\treturn true, \"There are no roles to check. Please check config.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoSRIOVPodsSkipFn", + "qualifiedName": "GetNoSRIOVPodsSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoSRIOVPodsSkipFn Provides a skip function for tests when no SRIOV pods are present\n\nThis returns a closure that checks the test environment for SRIOV-enabled\npods. If retrieving the list fails or the list is empty, it signals to skip\nthe test with an explanatory message; otherwise the test proceeds normally.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:596", + "calls": [ + { + "name": "GetPodsUsingSRIOV", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoSRIOVPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tpods, err := env.GetPodsUsingSRIOV()", + "\t\tif err != nil {", + "\t\t\treturn true, fmt.Sprintf(\"failed to get SRIOV pods: %v\", err)", + "\t\t}", + "", + "\t\tif len(pods) == 0 {", + "\t\t\treturn true, \"no SRIOV pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoServicesUnderTestSkipFn", + "qualifiedName": "GetNoServicesUnderTestSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoServicesUnderTestSkipFn Checks whether the test environment has any services defined\n\nThe function produces a closure that inspects the provided test environment's\nservice list. If the list is empty it signals to skip the test with an\nexplanatory message; otherwise it indicates the test should proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:548", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoServicesUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Services) == 0 {", + "\t\t\treturn true, \"no services to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoStatefulSetsUnderTestSkipFn", + "qualifiedName": "GetNoStatefulSetsUnderTestSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoStatefulSetsUnderTestSkipFn Skips tests when there are no StatefulSets in the environment\n\nThis function receives a test environment and produces a callback used by\ntest frameworks to decide whether to skip a particular check. The returned\nclosure inspects the number of StatefulSet objects present; if none exist, it\nsignals that the test should be skipped with an explanatory message.\nOtherwise it indicates the test can proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:666", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoStatefulSetsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.StatefulSets) == 0 {", + "\t\t\treturn true, \"no statefulSets to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNoStorageClassesSkipFn", + "qualifiedName": "GetNoStorageClassesSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNoStorageClassesSkipFn Skips tests when no storage classes are present\n\nThis function returns a closure that checks the length of the environment's\nstorage class list. If the list is empty, it signals to skip the test with an\nexplanatory message; otherwise, it allows the test to proceed normally.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:840", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNoStorageClassesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.StorageClassList) == 0 {", + "\t\t\treturn true, \"no storage classes found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNonOCPClusterSkipFn", + "qualifiedName": "GetNonOCPClusterSkipFn", + "exported": true, + "signature": "func()(func() (bool, string))", + "doc": "GetNonOCPClusterSkipFn provides a test skip function for non‑OCP clusters\n\nThis helper creates and returns a zero‑argument function that, when called,\nchecks whether the current environment is an OpenShift cluster. If it is not,\nthe returned function signals to skip the test by returning true along with a\ndescriptive message; otherwise it indicates no skip with false and an empty\nstring.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:534", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "IsOCPCluster", + "kind": "function", + "source": [ + "func IsOCPCluster() bool {", + "\treturn env.OpenshiftVersion != autodiscover.NonOpenshiftClusterVersion", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNonOCPClusterSkipFn() func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !provider.IsOCPCluster() {", + "\t\t\treturn true, \"non-OCP cluster detected\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNotEnoughWorkersSkipFn", + "qualifiedName": "GetNotEnoughWorkersSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment, int)(func() (bool, string))", + "doc": "GetNotEnoughWorkersSkipFn Creates a test skip function based on worker count\n\nThis returns a closure that checks whether the current environment has fewer\nworkers than the required minimum. If the condition is met, it signals to\nskip the test by returning true along with an explanatory message; otherwise\nit indicates the test should proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:777", + "calls": [ + { + "name": "GetWorkerCount", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNotEnoughWorkersSkipFn(env *provider.TestEnvironment, minWorkerNodes int) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.GetWorkerCount() \u003c minWorkerNodes {", + "\t\t\treturn true, \"not enough nodes to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetNotIntrusiveSkipFn", + "qualifiedName": "GetNotIntrusiveSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetNotIntrusiveSkipFn Provides a skip function for non‑intrusive tests\n\nThe returned closure checks whether the test environment is marked as\nintrusive. If it is not, the function signals that the test should be skipped\nby returning true along with an explanatory message. Otherwise, it indicates\nthe test should run normally.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:745", + "calls": [ + { + "name": "IsIntrusive", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetNotIntrusiveSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !env.IsIntrusive() {", + "\t\t\treturn true, \"not intrusive test\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetPodsWithoutAffinityRequiredLabelSkipFn", + "qualifiedName": "GetPodsWithoutAffinityRequiredLabelSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetPodsWithoutAffinityRequiredLabelSkipFn Creates a skip function for tests that require pods with an affinity label\n\nIt receives the test environment and returns a closure that checks whether\nany pods lack the required affinity label. If none are found, the closure\nsignals to skip the test with an explanatory message; otherwise it allows the\ntest to proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:793", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "GetPodsWithoutAffinityRequiredLabel", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetPodsWithoutAffinityRequiredLabelSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetPodsWithoutAffinityRequiredLabel()) == 0 {", + "\t\t\treturn true, \"no pods with required affinity label found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetSharedProcessNamespacePodsSkipFn", + "qualifiedName": "GetSharedProcessNamespacePodsSkipFn", + "exported": true, + "signature": "func(*provider.TestEnvironment)(func() (bool, string))", + "doc": "GetSharedProcessNamespacePodsSkipFn Determines whether to skip tests based on shared process namespace pod presence\n\nIt examines the test environment for pods that share a process namespace. If\nnone are present, it signals that the condition required for the test is not\nmet and returns true along with an explanatory message. Otherwise, it\nindicates the test should proceed.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:729", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "GetShareProcessNamespacePods", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetSharedProcessNamespacePodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetShareProcessNamespacePods()) == 0 {", + "\t\t\treturn true, \"Shared process namespace pods found.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "NewCatalogSourceReportObject", + "qualifiedName": "NewCatalogSourceReportObject", + "exported": true, + "signature": "func(string, string, string, bool)(*ReportObject)", + "doc": "NewCatalogSourceReportObject Creates a report object for a catalog source\n\nThe function builds a new report object using the provided namespace, catalog\nsource name, reason, and compliance flag. It delegates creation to an\ninternal helper that sets the type and records whether the item is compliant.\nFinally, it adds namespace and name fields before returning the populated\nreport.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:405", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func testOperatorCatalogSourceBundleCount(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tconst (", + "\t\tbundleCountLimit = 1000", + "", + "\t\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count.", + "\t\t// This means we cannot use the package manifests to skip based on channel.", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\tocp412Skip := false", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\tcheck.LogError(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003c= 4.12.\")", + "\t\t\tocp412Skip = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003e 4.12.\")", + "\t\t}", + "\t}", + "", + "\tbundleCountLimitStr := strconv.Itoa(bundleCountLimit)", + "", + "\t// Loop through all labeled operators and check if they have more than 1000 referenced images.", + "\tvar catalogsAlreadyReported []string", + "\tfor _, op := range env.Operators {", + "\t\tcatalogSourceCheckComplete := false", + "\t\tcheck.LogInfo(\"Checking bundle count for operator %q\", op.Csv.Name)", + "", + "\t\t// Search through packagemanifests to match the name of the CSV.", + "\t\tfor _, pm := range env.AllPackageManifests {", + "\t\t\t// Skip package manifests based on channel entries.", + "\t\t\t// Note: This only works for OCP versions \u003e 4.12 due to channel entries existence.", + "\t\t\tif !ocp412Skip \u0026\u0026 catalogsource.SkipPMBasedOnChannel(pm.Status.Channels, op.Csv.Name) {", + "\t\t\t\tlog.Debug(\"Skipping package manifest %q based on channel\", pm.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Search through all catalog sources to match the name and namespace of the package manifest.", + "\t\t\tfor _, catalogSource := range env.AllCatalogSources {", + "\t\t\t\tif catalogSource.Name != pm.Status.CatalogSource || catalogSource.Namespace != pm.Status.CatalogSourceNamespace {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on name or namespace\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the catalog source has already been reported, skip it.", + "\t\t\t\tif stringhelper.StringInSlice(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace, false) {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on already reported\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Found matching catalog source %q for operator %q\", catalogSource.Name, op.Csv.Name)", + "", + "\t\t\t\t// The name and namespace match. Lookup the bundle count.", + "\t\t\t\tbundleCount := provider.GetCatalogSourceBundleCount(env, catalogSource)", + "", + "\t\t\t\tif bundleCount == -1 {", + "\t\t\t\t\tcheck.LogError(\"Failed to get bundle count for CatalogSource %q\", catalogSource.Name)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"Failed to get bundle count\", false))", + "\t\t\t\t} else {", + "\t\t\t\t\tif bundleCount \u003e bundleCountLimit {", + "\t\t\t\t\t\tcheck.LogError(\"CatalogSource %q has more than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has more than \"+bundleCountLimitStr+\" referenced images\", false))", + "\t\t\t\t\t} else {", + "\t\t\t\t\t\tcheck.LogInfo(\"CatalogSource %q has less than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has less than \"+bundleCountLimitStr+\" referenced images\", true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Adding catalog source %q to list of already reported\", catalogSource.Name)", + "\t\t\t\tcatalogsAlreadyReported = append(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace)", + "\t\t\t\t// Signal that the catalog source check is complete.", + "\t\t\t\tcatalogSourceCheckComplete = true", + "\t\t\t\tbreak", + "\t\t\t}", + "", + "\t\t\t// If the catalog source check is complete, break out of the loop.", + "\t\t\tif catalogSourceCheckComplete {", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCatalogSourceReportObject(aNamespace, aCatalogSourceName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CatalogSourceType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aCatalogSourceName)", + "\treturn out", + "}" + ] + }, + { + "name": "NewCertifiedContainerReportObject", + "qualifiedName": "NewCertifiedContainerReportObject", + "exported": true, + "signature": "func(provider.ContainerImageIdentifier, string, bool)(*ReportObject)", + "doc": "NewCertifiedContainerReportObject Creates a report object for a container image\n\nThis function receives an image identifier, a compliance reason string, and a\nflag indicating whether the image meets compliance requirements. It\nconstructs a new report object of type ContainerImageType, annotating it with\nthe provided reason as either compliant or non‑compliant. The resulting\nobject includes fields for digest, repository, tag, and registry derived from\nthe identifier.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:299", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCertifiedContainerReportObject(cii provider.ContainerImageIdentifier, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerImageType, isCompliant)", + "\tout.AddField(ImageDigest, cii.Digest)", + "\tout.AddField(ImageRepo, cii.Repository)", + "\tout.AddField(ImageTag, cii.Tag)", + "\tout.AddField(ImageRegistry, cii.Registry)", + "\treturn out", + "}" + ] + }, + { + "name": "NewClusterOperatorReportObject", + "qualifiedName": "NewClusterOperatorReportObject", + "exported": true, + "signature": "func(string, string, bool)(*ReportObject)", + "doc": "NewClusterOperatorReportObject Creates a report object for a cluster operator\n\nThis function builds a ReportObject by calling the generic constructor with a\nreason, type label, and compliance flag. It then adds the operator name as an\nadditional field before returning the populated object. The returned pointer\nrepresents a structured report entry that can be used in test results.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:392", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testClusterOperatorHealth", + "kind": "function", + "source": [ + "func testClusterOperatorHealth(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Checks the various ClusterOperator(s) to see if they are all in an 'Available' state.", + "\t// If they are not in an 'Available' state, the check will fail.", + "\t// Note: This check is only applicable to OCP clusters and is skipped for non-OCP clusters.", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through the ClusterOperators and check their status.", + "\tfor i := range env.ClusterOperators {", + "\t\tcheck.LogInfo(\"Testing ClusterOperator %q to ensure it is in an 'Available' state.\", env.ClusterOperators[i].Name)", + "", + "\t\tif clusteroperator.IsClusterOperatorAvailable(\u0026env.ClusterOperators[i]) {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewClusterOperatorReportObject(env.ClusterOperators[i].Name, \"ClusterOperator is in an 'Available' state\", true))", + "\t\t} else {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewClusterOperatorReportObject(env.ClusterOperators[i].Name, \"ClusterOperator is not in an 'Available' state\", false))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewClusterOperatorReportObject(aClusterOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ClusterOperatorType, isCompliant)", + "\tout.AddField(Name, aClusterOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "NewClusterVersionReportObject", + "qualifiedName": "NewClusterVersionReportObject", + "exported": true, + "signature": "func(string, string, bool)(*ReportObject)", + "doc": "NewClusterVersionReportObject Creates a report object containing cluster version information\n\nThe function takes a version string, a reason for compliance or\nnon‑compliance, and a boolean indicating compliance status. It constructs a\nnew ReportObject with the provided reason and type, then adds the version as\nan additional field before returning the object.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:326", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testOCPStatus", + "kind": "function", + "source": [ + "func testOCPStatus(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tclusterIsInEOL := false", + "\tswitch env.OCPStatus {", + "\tcase compatibility.OCPStatusEOL:", + "\t\tcheck.LogError(\"OCP Version %q has been found to be in end of life\", env.OpenshiftVersion)", + "\t\tclusterIsInEOL = true", + "\tcase compatibility.OCPStatusMS:", + "\t\tcheck.LogInfo(\"OCP Version %q has been found to be in maintenance support\", env.OpenshiftVersion)", + "\tcase compatibility.OCPStatusGA:", + "\t\tcheck.LogInfo(\"OCP Version %q has been found to be in general availability\", env.OpenshiftVersion)", + "\tcase compatibility.OCPStatusPreGA:", + "\t\tcheck.LogInfo(\"OCP Version %q has been found to be in pre-general availability\", env.OpenshiftVersion)", + "\tdefault:", + "\t\tcheck.LogInfo(\"OCP Version %q was unable to be found in the lifecycle compatibility matrix\", env.OpenshiftVersion)", + "\t}", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tif clusterIsInEOL {", + "\t\tnonCompliantObjects = []*testhelper.ReportObject{testhelper.NewClusterVersionReportObject(env.OpenshiftVersion, \"Openshift Version is in End Of Life (EOL)\", false)}", + "\t} else {", + "\t\tcompliantObjects = []*testhelper.ReportObject{testhelper.NewClusterVersionReportObject(env.OpenshiftVersion, \"Openshift Version is not in End Of Life (EOL)\", true)}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewClusterVersionReportObject(version, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OCPClusterType, isCompliant)", + "\tout.AddField(OCPClusterVersionType, version)", + "\treturn out", + "}" + ] + }, + { + "name": "NewContainerReportObject", + "qualifiedName": "NewContainerReportObject", + "exported": true, + "signature": "func(string, string, string, string, bool)(*ReportObject)", + "doc": "NewContainerReportObject Creates a report object for a container\n\nIt builds a ReportObject with type ContainerType, attaching the provided\nnamespace, pod name, container name, and compliance reason as fields. The\nfunction uses NewReportObject to set the compliance status and then adds\nadditional identifying fields before returning the pointer.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:283", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling", + "name": "ProcessPidsCPUScheduling", + "kind": "function", + "source": [ + "func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *provider.Container, check string, logger *log.Logger) (compliantContainerPids, nonCompliantContainerPids []*testhelper.ReportObject) {", + "\thasCPUSchedulingConditionSuccess := false", + "\tfor _, process := range processes {", + "\t\tlogger.Debug(\"Testing process %q\", process)", + "\t\tschedulePolicy, schedulePriority, err := GetProcessCPUSchedulingFn(process.Pid, testContainer)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Unable to get the scheduling policy and priority : %v\", err)", + "\t\t\treturn compliantContainerPids, nonCompliantContainerPids", + "\t\t}", + "", + "\t\tswitch check {", + "\t\tcase SharedCPUScheduling:", + "\t\t\thasCPUSchedulingConditionSuccess = schedulePriority == 0", + "\t\tcase ExclusiveCPUScheduling:", + "\t\t\thasCPUSchedulingConditionSuccess = schedulePriority == 0 || (schedulePriority \u003c 10 \u0026\u0026 (schedulePolicy == SchedulingRoundRobin || schedulePolicy == SchedulingFirstInFirstOut))", + "\t\tcase IsolatedCPUScheduling:", + "\t\t\thasCPUSchedulingConditionSuccess = schedulePriority \u003e= 10 \u0026\u0026 (schedulePolicy == SchedulingRoundRobin || schedulePolicy == SchedulingFirstInFirstOut)", + "\t\t}", + "", + "\t\tif !hasCPUSchedulingConditionSuccess {", + "\t\t\tlogger.Error(\"Process %q in Container %q with cpu scheduling policy=%s, priority=%d did not satisfy cpu scheduling requirements\", process, testContainer, schedulePolicy, schedulePriority)", + "\t\t\taPidOut := testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, \"process does not satisfy: \"+schedulingRequirements[check], false).", + "\t\t\t\tSetContainerProcessValues(schedulePolicy, fmt.Sprint(schedulePriority), process.Args)", + "\t\t\tnonCompliantContainerPids = append(nonCompliantContainerPids, aPidOut)", + "\t\t\tcontinue", + "\t\t}", + "\t\tlogger.Info(\"Process %q in Container %q with cpu scheduling policy=%s, priority=%d satisfies cpu scheduling requirements\", process, testContainer, schedulePolicy, schedulePriority)", + "\t\taPidOut := testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, \"process satisfies: \"+schedulingRequirements[check], true).", + "\t\t\tSetContainerProcessValues(schedulePolicy, fmt.Sprint(schedulePriority), process.Args)", + "\t\tcompliantContainerPids = append(compliantContainerPids, aPidOut)", + "\t}", + "\treturn compliantContainerPids, nonCompliantContainerPids", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "checkForbiddenCapability", + "kind": "function", + "source": [ + "func checkForbiddenCapability(containers []*provider.Container, capability string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, cut := range containers {", + "\t\tlogger.Info(\"Testing Container %q\", cut)", + "\t\tcompliant := true", + "", + "\t\tswitch {", + "\t\tcase cut.SecurityContext == nil:", + "\t\tcase cut.SecurityContext.Capabilities == nil:", + "\t\tcase isContainerCapabilitySet(cut.SecurityContext.Capabilities, capability):", + "\t\t\tcompliant = false", + "\t\t}", + "", + "\t\tif compliant {", + "\t\t\tlogger.Info(\"Container %q does not use non-compliant capability %q\", cut, capability)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"No forbidden capability \"+capability+\" detected in container\", true))", + "\t\t} else {", + "\t\t\tlogger.Error(\"Non compliant %q capability detected in container %q. All container caps: %q\", capability, cut, cut.SecurityContext.Capabilities)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Non compliant capability \"+capability+\" in container\", false).AddField(testhelper.SCCCapability, capability))", + "\t\t}", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testContainerHostPort", + "kind": "function", + "source": [ + "func testContainerHostPort(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\thostPortFound := false", + "\t\tfor _, aPort := range cut.Ports {", + "\t\t\tif aPort.HostPort != 0 {", + "\t\t\t\tcheck.LogError(\"Host port %d is configured in Container %q.\", aPort.HostPort, cut)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Host port is configured\", false).", + "\t\t\t\t\tSetType(testhelper.HostPortType).", + "\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(aPort.HostPort))))", + "\t\t\t\thostPortFound = true", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !hostPortFound {", + "\t\t\tcheck.LogInfo(\"Host port not configured in Container %q.\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Host port is not configured\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testContainerSCC", + "kind": "function", + "source": [ + "func testContainerSCC(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\thighLevelCat := securitycontextcontainer.CategoryID1", + "\tfor _, pod := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", pod)", + "\t\tlistCategory := securitycontextcontainer.CheckPod(pod)", + "\t\tfor _, cat := range listCategory {", + "\t\t\tif cat.Category \u003e securitycontextcontainer.CategoryID1NoUID0 {", + "\t\t\t\tcheck.LogError(\"Category %q is NOT category 1 or category NoUID0\", cat)", + "\t\t\t\taContainerOut := testhelper.NewContainerReportObject(cat.NameSpace, cat.Podname, cat.Containername, \"container category is NOT category 1 or category NoUID0\", false).", + "\t\t\t\t\tSetType(testhelper.ContainerCategory).", + "\t\t\t\t\tAddField(testhelper.Category, cat.Category.String())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, aContainerOut)", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Category %q is category 1 or category NoUID0\", cat)", + "\t\t\t\taContainerOut := testhelper.NewContainerReportObject(cat.NameSpace, cat.Podname, cat.Containername, \"container category is category 1 or category NoUID0\", true).", + "\t\t\t\t\tSetType(testhelper.ContainerCategory).", + "\t\t\t\t\tAddField(testhelper.Category, cat.Category.String())", + "\t\t\t\tcompliantObjects = append(compliantObjects, aContainerOut)", + "\t\t\t}", + "\t\t\tif cat.Category \u003e highLevelCat {", + "\t\t\t\thighLevelCat = cat.Category", + "\t\t\t}", + "\t\t}", + "\t}", + "\taCNFOut := testhelper.NewReportObject(\"Overall CNF category\", testhelper.CnfType, false).AddField(testhelper.Category, highLevelCat.String())", + "\tcompliantObjects = append(compliantObjects, aCNFOut)", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testOneProcessPerContainer", + "kind": "function", + "source": [ + "func testOneProcessPerContainer(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t// the Istio sidecar container \"istio-proxy\" launches two processes: \"pilot-agent\" and \"envoy\"", + "\t\tif cut.IsIstioProxy() {", + "\t\t\tcheck.LogInfo(\"Skipping \\\"istio-proxy\\\" container\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Debug pod not found for node %q\", cut.NodeName)", + "\t\t\treturn", + "\t\t}", + "\t\tocpContext := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tpid, err := crclient.GetPidFromContainer(cut, ocpContext)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get PID for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tnbProcesses, err := getNbOfProcessesInPidNamespace(ocpContext, pid, clientsholder.GetClientsHolder())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get number of processes for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif nbProcesses \u003e 1 {", + "\t\t\tcheck.LogError(\"Container %q has more than one process running\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has more than one process running\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has only one process running\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has only one process running\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testPodRequests", + "kind": "function", + "source": [ + "func testPodRequests(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\t// Loop through the containers, looking for containers that are missing requests.", + "\t// These need to be defined in order to pass.", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif !resources.HasRequestsSet(cut, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Container %q is missing resource requests\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is missing resource requests\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has resource requests\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has resource requests\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testSYSNiceRealtimeCapability", + "kind": "function", + "source": [ + "func testSYSNiceRealtimeCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through all of the labeled containers and compare their security context capabilities and whether", + "\t// or not the node's kernel is realtime enabled.", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tn := env.Nodes[cut.NodeName]", + "\t\tif !n.IsRTKernel() {", + "\t\t\tcheck.LogInfo(\"Container is not running on a realtime kernel enabled node\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not running on a realtime kernel enabled node\", true))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif !isContainerCapabilitySet(cut.SecurityContext.Capabilities, \"SYS_NICE\") {", + "\t\t\tcheck.LogError(\"Container %q has been found running on a realtime kernel enabled node without SYS_NICE capability.\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is running on a realtime kernel enabled node without SYS_NICE capability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container is running on a realtime kernel enabled node with the SYS_NICE capability\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is running on a realtime kernel enabled node with the SYS_NICE capability\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testSecConPrivilegeEscalation", + "kind": "function", + "source": [ + "func testSecConPrivilegeEscalation(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tprivEscFound := false", + "\t\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.AllowPrivilegeEscalation != nil {", + "\t\t\tif *(cut.SecurityContext.AllowPrivilegeEscalation) {", + "\t\t\t\tcheck.LogError(\"AllowPrivilegeEscalation is set to true in Container %q.\", cut)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"AllowPrivilegeEscalation is set to true\", false))", + "\t\t\t\tprivEscFound = true", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !privEscFound {", + "\t\t\tcheck.LogInfo(\"AllowPrivilegeEscalation is set to false in Container %q.\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"AllowPrivilegeEscalation is not set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testSecConRunAsNonRoot", + "kind": "function", + "source": [ + "func testSecConRunAsNonRoot(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing pod %s/%s\", put.Namespace, put.Name)", + "\t\tnonCompliantContainers, nonComplianceReason := put.GetRunAsNonRootFalseContainers(knownContainersToSkip)", + "\t\tif len(nonCompliantContainers) == 0 {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is configured with RunAsNonRoot=true or RunAsUser!=0 at pod or container level.\", true))", + "\t\t} else {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"One or more containers of the pod are running with root user\", false))", + "\t\t\tfor index := range nonCompliantContainers {", + "\t\t\t\tcheck.LogError(\"Pod %s/%s, container %q is not compliant: %s\", put.Namespace, put.Name, nonCompliantContainers[index].Name, nonComplianceReason[index])", + "", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name, nonCompliantContainers[index].Name,", + "\t\t\t\t\tnonComplianceReason[index], false))", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "testContainerCertificationStatusByDigest", + "kind": "function", + "source": [ + "func testContainerCertificationStatusByDigest(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, c := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", c)", + "\t\tswitch {", + "\t\tcase c.ContainerImageIdentifier.Digest == \"\":", + "\t\t\tcheck.LogError(\"Container %q is missing digest field, failing validation (repo=%q image=%q)\", c, c.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, \"Missing digest field\", false).", + "\t\t\t\tAddField(testhelper.Repository, c.ContainerImageIdentifier.Registry).", + "\t\t\t\tAddField(testhelper.ImageName, c.ContainerImageIdentifier.Repository).", + "\t\t\t\tAddField(testhelper.ImageDigest, c.ContainerImageIdentifier.Digest))", + "\t\tcase !testContainerCertification(c.ContainerImageIdentifier, validator):", + "\t\t\tcheck.LogError(\"Container %q digest not found in database, failing validation (repo=%q image=%q tag=%q digest=%q)\", c,", + "\t\t\t\tc.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository,", + "\t\t\t\tc.ContainerImageIdentifier.Tag, c.ContainerImageIdentifier.Digest)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, \"Digest not found in database\", false).", + "\t\t\t\tAddField(testhelper.Repository, c.ContainerImageIdentifier.Registry).", + "\t\t\t\tAddField(testhelper.ImageName, c.ContainerImageIdentifier.Repository).", + "\t\t\t\tAddField(testhelper.ImageDigest, c.ContainerImageIdentifier.Digest))", + "\t\tdefault:", + "\t\t\tcheck.LogInfo(\"Container %q digest found in database, image certified (repo=%q image=%q tag=%q digest=%q)\", c,", + "\t\t\t\tc.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository,", + "\t\t\t\tc.ContainerImageIdentifier.Tag, c.ContainerImageIdentifier.Digest)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, \"Container is certified\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testContainersImagePolicy", + "kind": "function", + "source": [ + "func testContainersImagePolicy(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.ImagePullPolicy != corev1.PullIfNotPresent {", + "\t\t\tcheck.LogError(\"Container %q is using %q as ImagePullPolicy (compliant containers must use %q)\", cut, cut.ImagePullPolicy, corev1.PullIfNotPresent)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not using IfNotPresent as ImagePullPolicy\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q is using %q as ImagePullPolicy\", cut, cut.ImagePullPolicy)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is using IfNotPresent as ImagePullPolicy\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testContainersLivenessProbe", + "kind": "function", + "source": [ + "func testContainersLivenessProbe(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.LivenessProbe == nil {", + "\t\t\tcheck.LogError(\"Container %q does not have LivenessProbe defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have LivenessProbe defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has LivenessProbe defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has LivenessProbe defined\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testContainersPostStart", + "kind": "function", + "source": [ + "func testContainersPostStart(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\tif cut.Lifecycle == nil || (cut.Lifecycle != nil \u0026\u0026 cut.Lifecycle.PostStart == nil) {", + "\t\t\tcheck.LogError(\"Container %q does not have postStart defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have postStart defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has postStart defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has postStart defined.\"+", + "\t\t\t\t\"Attention: There is a known upstream bug where a pod with a still-running postStart lifecycle hook that is deleted may not be terminated even after \"+", + "\t\t\t\t\"the terminationGracePeriod k8s bug link: kubernetes/kubernetes#116032\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testContainersPreStop", + "kind": "function", + "source": [ + "func testContainersPreStop(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\tif cut.Lifecycle == nil || (cut.Lifecycle != nil \u0026\u0026 cut.Lifecycle.PreStop == nil) {", + "\t\t\tcheck.LogError(\"Container %q does not have preStop defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have preStop defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has preStop defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has preStop defined\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testContainersReadinessProbe", + "kind": "function", + "source": [ + "func testContainersReadinessProbe(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.ReadinessProbe == nil {", + "\t\t\tcheck.LogError(\"Container %q does not have ReadinessProbe defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have ReadinessProbe defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has ReadinessProbe defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has ReadinessProbe defined\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testContainersStartupProbe", + "kind": "function", + "source": [ + "func testContainersStartupProbe(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.StartupProbe == nil {", + "\t\t\tcheck.LogError(\"Container %q does not have StartupProbe defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have StartupProbe defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has StartupProbe defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has StartupProbe defined\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/manageability", + "name": "testContainerPortNameFormat", + "kind": "function", + "source": [ + "func testContainerPortNameFormat(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tfor _, newProtocol := range env.ValidProtocolNames {", + "\t\tallowedProtocolNames[newProtocol] = true", + "\t}", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogDebug(\"Testing Container %q\", cut)", + "\t\tfor _, port := range cut.Ports {", + "\t\t\tif !containerPortNameFormatCheck(port.Name) {", + "\t\t\t\tcheck.LogError(\"Container %q declares port %q that does not follow the partner naming conventions\", cut, port.Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"ContainerPort does not follow the partner naming conventions\", false).", + "\t\t\t\t\tAddField(testhelper.ContainerPort, port.Name))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Container %q declares port %q that does follow the partner naming conventions\", cut, port.Name)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"ContainerPort follows the partner naming conventions\", true).", + "\t\t\t\t\tAddField(testhelper.ContainerPort, port.Name))", + "\t\t\t}", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/manageability", + "name": "testContainersImageTag", + "kind": "function", + "source": [ + "func testContainersImageTag(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogDebug(\"Testing Container %q\", cut)", + "\t\tif cut.IsTagEmpty() {", + "\t\t\tcheck.LogError(\"Container %q is missing image tag(s)\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is missing image tag(s)\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q is tagged with %q\", cut, cut.ContainerImageIdentifier.Tag)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is tagged\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp", + "name": "RunNetworkingTests", + "kind": "function", + "source": [ + "func RunNetworkingTests( //nolint:funlen", + "\tnetsUnderTest map[string]netcommons.NetTestContext,", + "\tcount int,", + "\taIPVersion netcommons.IPVersion,", + "\tlogger *log.Logger) (report testhelper.FailureReasonOut, skip bool) {", + "\tlogger.Debug(\"%s\", netcommons.PrintNetTestContextMap(netsUnderTest))", + "\tskip = false", + "\tif len(netsUnderTest) == 0 {", + "\t\tlogger.Debug(\"There are no %q networks to test, skipping test\", aIPVersion)", + "\t\tskip = true", + "\t\treturn report, skip", + "\t}", + "\t// if no network can be tested, then we need to skip the test entirely.", + "\t// If at least one network can be tested (e.g. \u003e 2 IPs/ interfaces present), then we do not skip the test", + "\tatLeastOneNetworkTested := false", + "\tcompliantNets := map[string]int{}", + "\tnonCompliantNets := map[string]int{}", + "\tfor netName, netUnderTest := range netsUnderTest {", + "\t\tcompliantNets[netName] = 0", + "\t\tnonCompliantNets[netName] = 0", + "\t\tif len(netUnderTest.DestTargets) == 0 {", + "\t\t\tlogger.Debug(\"There are no containers to ping for %q network %q. A minimum of 2 containers is needed to run a ping test (a source and a destination) Skipping test\", aIPVersion, netName)", + "\t\t\tcontinue", + "\t\t}", + "\t\tatLeastOneNetworkTested = true", + "\t\tlogger.Debug(\"%q Ping tests on network %q. Number of target IPs: %d\", aIPVersion, netName, len(netUnderTest.DestTargets))", + "", + "\t\tfor _, aDestIP := range netUnderTest.DestTargets {", + "\t\t\tlogger.Debug(\"%q ping test on network %q from ( %q srcip: %q ) to ( %q dstip: %q )\",", + "\t\t\t\taIPVersion, netName,", + "\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP,", + "\t\t\t\taDestIP.ContainerIdentifier, aDestIP.IP)", + "\t\t\tresult, err := TestPing(netUnderTest.TesterSource.ContainerIdentifier, aDestIP, count)", + "\t\t\tlogger.Debug(\"Ping results: %q\", result)", + "\t\t\tlogger.Info(\"%q ping test on network %q from ( %q srcip: %q ) to ( %q dstip: %q ) result: %q\",", + "\t\t\t\taIPVersion, netName,", + "\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP,", + "\t\t\t\taDestIP.ContainerIdentifier, aDestIP.IP, result)", + "\t\t\tif err != nil {", + "\t\t\t\tlogger.Debug(\"Ping failed, err=%v\", err)", + "\t\t\t}", + "\t\t\tif result.outcome != testhelper.SUCCESS {", + "\t\t\t\tlogger.Error(\"Ping from %q (srcip: %q) to %q (dstip: %q) failed\",", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier,", + "\t\t\t\t\tnetUnderTest.TesterSource.IP,", + "\t\t\t\t\taDestIP.ContainerIdentifier,", + "\t\t\t\t\taDestIP.IP)", + "\t\t\t\tnonCompliantNets[netName]++", + "\t\t\t\tnonCompliantObject := testhelper.NewContainerReportObject(netUnderTest.TesterSource.ContainerIdentifier.Namespace,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Podname,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Name, \"Pinging destination container/IP from source container (identified by Namespace/Pod Name/Container Name) Failed\", false).", + "\t\t\t\t\tSetType(testhelper.ICMPResultType).", + "\t\t\t\t\tAddField(testhelper.NetworkName, netName).", + "\t\t\t\t\tAddField(testhelper.SourceIP, netUnderTest.TesterSource.IP).", + "\t\t\t\t\tAddField(testhelper.DestinationNamespace, aDestIP.ContainerIdentifier.Namespace).", + "\t\t\t\t\tAddField(testhelper.DestinationPodName, aDestIP.ContainerIdentifier.Podname).", + "\t\t\t\t\tAddField(testhelper.DestinationContainerName, aDestIP.ContainerIdentifier.Name).", + "\t\t\t\t\tAddField(testhelper.DestinationIP, aDestIP.IP)", + "\t\t\t\treport.NonCompliantObjectsOut = append(report.NonCompliantObjectsOut, nonCompliantObject)", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"Ping from %q (srcip: %q) to %q (dstip: %q) succeeded\",", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier,", + "\t\t\t\t\tnetUnderTest.TesterSource.IP,", + "\t\t\t\t\taDestIP.ContainerIdentifier,", + "\t\t\t\t\taDestIP.IP)", + "\t\t\t\tcompliantNets[netName]++", + "\t\t\t\tCompliantObject := testhelper.NewContainerReportObject(netUnderTest.TesterSource.ContainerIdentifier.Namespace,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Podname,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Name, \"Pinging destination container/IP from source container (identified by Namespace/Pod Name/Container Name) Succeeded\", true).", + "\t\t\t\t\tSetType(testhelper.ICMPResultType).", + "\t\t\t\t\tAddField(testhelper.NetworkName, netName).", + "\t\t\t\t\tAddField(testhelper.SourceIP, netUnderTest.TesterSource.IP).", + "\t\t\t\t\tAddField(testhelper.DestinationNamespace, aDestIP.ContainerIdentifier.Namespace).", + "\t\t\t\t\tAddField(testhelper.DestinationPodName, aDestIP.ContainerIdentifier.Podname).", + "\t\t\t\t\tAddField(testhelper.DestinationContainerName, aDestIP.ContainerIdentifier.Name).", + "\t\t\t\t\tAddField(testhelper.DestinationIP, aDestIP.IP)", + "\t\t\t\treport.CompliantObjectsOut = append(report.CompliantObjectsOut, CompliantObject)", + "\t\t\t}", + "\t\t}", + "\t\tif nonCompliantNets[netName] != 0 {", + "\t\t\tlogger.Error(\"ICMP tests failed for %d IP source/destination in this network\", nonCompliantNets[netName])", + "\t\t\treport.NonCompliantObjectsOut = append(report.NonCompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf(\"ICMP tests failed for %d IP source/destination in this network\", nonCompliantNets[netName]), testhelper.NetworkType, false).", + "\t\t\t\tAddField(testhelper.NetworkName, netName))", + "\t\t}", + "\t\tif compliantNets[netName] != 0 {", + "\t\t\tlogger.Info(\"ICMP tests were successful for all %d IP source/destination in this network\", compliantNets[netName])", + "\t\t\treport.CompliantObjectsOut = append(report.CompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf(\"ICMP tests were successful for all %d IP source/destination in this network\", compliantNets[netName]), testhelper.NetworkType, true).", + "\t\t\t\tAddField(testhelper.NetworkName, netName))", + "\t\t}", + "\t}", + "\tif !atLeastOneNetworkTested {", + "\t\tlogger.Debug(\"There are no %q networks to test, skipping test\", aIPVersion)", + "\t\tskip = true", + "\t}", + "", + "\treturn report, skip", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "findRogueContainersDeclaringPorts", + "kind": "function", + "source": [ + "func findRogueContainersDeclaringPorts(containers []*provider.Container, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, cut := range containers {", + "\t\tlogger.Info(\"Testing Container %q\", cut)", + "\t\tfor _, port := range cut.Ports {", + "\t\t\tif portsToTest[port.ContainerPort] {", + "\t\t\t\tlogger.Error(\"%q declares %s reserved port %d (%s)\", cut, portsOrigin, port.ContainerPort, port.Protocol)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Container declares %s reserved port in %v\", portsOrigin, portsToTest), false).", + "\t\t\t\t\t\tSetType(testhelper.DeclaredPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.ContainerPort))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, string(port.Protocol)))", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"%q does not declare any %s reserved port\", cut, portsOrigin)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Container does not declare %s reserved port in %v\", portsOrigin, portsToTest), true).", + "\t\t\t\t\t\tSetType(testhelper.DeclaredPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.ContainerPort))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, string(port.Protocol)))", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "testContainersLogging", + "kind": "function", + "source": [ + "func testContainersLogging(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Iterate through all the CUTs to get their log output. The TC checks that at least", + "\t// one log line is found.", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\thasLoggingOutput, err := containerHasLoggingOutput(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get %q log output, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not get log output\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif !hasLoggingOutput {", + "\t\t\tcheck.LogError(\"Container %q does not have any line of log to stderr/stdout\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"No log line to stderr/stdout found\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has some logging output\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Found log line to stderr/stdout\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "testTerminationMessagePolicy", + "kind": "function", + "source": [ + "func testTerminationMessagePolicy(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.TerminationMessagePolicy != corev1.TerminationMessageFallbackToLogsOnError {", + "\t\t\tcheck.LogError(\"Container %q does not have a TerminationMessagePolicy: FallbackToLogsOnError (has %s)\", cut, cut.TerminationMessagePolicy)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"TerminationMessagePolicy is not FallbackToLogsOnError\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has a TerminationMessagePolicy: FallbackToLogsOnError\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"TerminationMessagePolicy is FallbackToLogsOnError\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "filterProbeProcesses", + "kind": "function", + "source": [ + "func filterProbeProcesses(allProcesses []*crclient.Process, cut *provider.Container) (notExecProbeProcesses []*crclient.Process, compliantObjects []*testhelper.ReportObject) {", + "\texecProbeProcesses := []int{}", + "\texecProbesCmds := getExecProbesCmds(cut)", + "\t// find all exec probes by matching command line", + "\tfor _, p := range allProcesses {", + "\t\tif execProbesCmds[strings.Join(strings.Fields(p.Args), \"\")] {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container process belongs to an exec probe (skipping verification)\", true).", + "\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\texecProbeProcesses = append(execProbeProcesses, p.Pid)", + "\t\t}", + "\t}", + "\t// remove all exec probes and their children from the process list", + "\tfor _, p := range allProcesses {", + "\t\tif slices.Contains(execProbeProcesses, p.Pid) || slices.Contains(execProbeProcesses, p.PPid) {", + "\t\t\t// this process is part of an exec probe (child or parent), continue", + "\t\t\tcontinue", + "\t\t}", + "\t\tnotExecProbeProcesses = append(notExecProbeProcesses, p)", + "\t}", + "\treturn notExecProbeProcesses, compliantObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testLimitedUseOfExecProbes", + "kind": "function", + "source": [ + "func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcounter := 0", + "\tfor _, put := range env.Pods {", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t\tif cut.LivenessProbe != nil \u0026\u0026 cut.LivenessProbe.Exec != nil {", + "\t\t\t\tcounter++", + "\t\t\t\tif cut.LivenessProbe.PeriodSeconds \u003e= minExecProbePeriodSeconds {", + "\t\t\t\t\tcheck.LogInfo(\"Container %q has a LivenessProbe with PeriodSeconds greater than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"LivenessProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\tcut.LivenessProbe.PeriodSeconds), true))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogError(\"Container %q has a LivenessProbe with PeriodSeconds less than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\ttesthelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"LivenessProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\t\tcut.LivenessProbe.PeriodSeconds), false))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tif cut.StartupProbe != nil \u0026\u0026 cut.StartupProbe.Exec != nil {", + "\t\t\t\tcounter++", + "\t\t\t\tif cut.StartupProbe.PeriodSeconds \u003e= minExecProbePeriodSeconds {", + "\t\t\t\t\tcheck.LogInfo(\"Container %q has a StartupProbe with PeriodSeconds greater than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"StartupProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\tcut.StartupProbe.PeriodSeconds), true))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogError(\"Container %q has a StartupProbe with PeriodSeconds less than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\ttesthelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"StartupProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\t\tcut.StartupProbe.PeriodSeconds), false))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tif cut.ReadinessProbe != nil \u0026\u0026 cut.ReadinessProbe.Exec != nil {", + "\t\t\t\tcounter++", + "\t\t\t\tif cut.ReadinessProbe.PeriodSeconds \u003e= minExecProbePeriodSeconds {", + "\t\t\t\t\tcheck.LogInfo(\"Container %q has a ReadinessProbe with PeriodSeconds greater than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"ReadinessProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\tcut.ReadinessProbe.PeriodSeconds), true))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogError(\"Container %q has a ReadinessProbe with PeriodSeconds less than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\ttesthelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"ReadinessProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\t\tcut.ReadinessProbe.PeriodSeconds), false))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// If there \u003e=10 exec probes, mark the entire cluster as a failure", + "\tif counter \u003e= maxNumberOfExecProbes {", + "\t\tcheck.LogError(\"CNF has 10 or more exec probes (nb-exec-probes=%d)\", counter)", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"CNF has 10 or more exec probes (%d exec probes)\", counter), testhelper.CnfType, false))", + "\t} else {", + "\t\tcheck.LogInfo(\"CNF has less than 10 exec probes (nb-exec-probes=%d)\", counter)", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"CNF has less than 10 exec probes (%d exec probes)\", counter), testhelper.CnfType, true))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testRtAppsNoExecProbes", + "kind": "function", + "source": [ + "func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcuts := env.GetNonGuaranteedPodContainersWithoutHostPID()", + "\tfor _, cut := range cuts {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif !cut.HasExecProbes() {", + "\t\t\tcheck.LogInfo(\"Container %q does not define exec probes\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not define exec probes\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tprocesses, err := crclient.GetContainerProcesses(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not determine the processes pids for container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the processes pids for container\", false))", + "\t\t\tbreak", + "\t\t}", + "", + "\t\tnotExecProbeProcesses, compliantObjectsProbes := filterProbeProcesses(processes, cut)", + "\t\tcompliantObjects = append(compliantObjects, compliantObjectsProbes...)", + "\t\tallProcessesCompliant := true", + "\t\tfor _, p := range notExecProbeProcesses {", + "\t\t\tcheck.LogInfo(\"Testing process %q\", p)", + "\t\t\tschedPolicy, _, err := scheduling.GetProcessCPUScheduling(p.Pid, cut)", + "\t\t\tif err != nil {", + "\t\t\t\t// If the process does not exist anymore it means that it has finished since the time the process list", + "\t\t\t\t// was retrieved. In this case, just ignore the error and continue processing the rest of the processes.", + "\t\t\t\tif strings.Contains(err.Error(), noProcessFoundErrMsg) {", + "\t\t\t\t\tcheck.LogWarn(\"Container process %q disappeared\", p)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container process disappeared\", true).", + "\t\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\tcheck.LogError(\"Could not determine the scheduling policy for container %q (pid=%d), err: %v\", cut, p.Pid, err)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the scheduling policy for container\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif scheduling.PolicyIsRT(schedPolicy) {", + "\t\t\t\tcheck.LogError(\"Container %q defines exec probes while having a RT scheduling policy for process %q\", cut, p)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes while having a RT scheduling policy\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t}", + "\t\t}", + "", + "\t\tif allProcessesCompliant {", + "\t\t\tcheck.LogInfo(\"Container %q defines exec probes but does not have a RT scheduling policy\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes but does not have a RT scheduling policy\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testSchedulingPolicyInCPUPool", + "kind": "function", + "source": [ + "func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvironment,", + "\tpodContainers []*provider.Container, schedulingType string) {", + "\tvar compliantContainersPids []*testhelper.ReportObject", + "\tvar nonCompliantContainersPids []*testhelper.ReportObject", + "\tfor _, cut := range podContainers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\t// Get the pid namespace", + "\t\tpidNamespace, err := crclient.GetContainerPidNamespace(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get pid namespace for Container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcheck.LogDebug(\"PID namespace for Container %q is %q\", cut, pidNamespace)", + "", + "\t\t// Get the list of process ids running in the pid namespace", + "\t\tprocesses, err := crclient.GetPidsFromPidNamespace(pidNamespace, cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get PIDs from PID namespace %q for Container %q, err: %v\", pidNamespace, cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t}", + "", + "\t\tcompliantPids, nonCompliantPids := scheduling.ProcessPidsCPUScheduling(processes, cut, schedulingType, check.GetLogger())", + "\t\t// Check for the specified priority for each processes running in that pid namespace", + "", + "\t\tcompliantContainersPids = append(compliantContainersPids, compliantPids...)", + "\t\tnonCompliantContainersPids = append(nonCompliantContainersPids, nonCompliantPids...)", + "\t}", + "", + "\tcheck.SetResult(compliantContainersPids, nonCompliantContainersPids)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testContainersFsDiff", + "kind": "function", + "source": [ + "func testContainersFsDiff(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "", + "\t\t// If the probe pod is not found, we cannot run the test.", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Probe Pod not found for node %q\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"certsuite probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Check whether or not a container is available to prevent a panic.", + "\t\tif len(probePod.Spec.Containers) == 0 {", + "\t\t\tcheck.LogError(\"Probe Pod %q has no containers\", probePod)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"certsuite probe pod has no containers\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tctxt := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tfsDiffTester := cnffsdiff.NewFsDiffTester(check, clientsholder.GetClientsHolder(), ctxt, env.OpenshiftVersion)", + "\t\tfsDiffTester.RunTest(cut.UID)", + "\t\tswitch fsDiffTester.GetResults() {", + "\t\tcase testhelper.SUCCESS:", + "\t\t\tcheck.LogInfo(\"Container %q is not modified\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not modified\", true))", + "\t\t\tcontinue", + "\t\tcase testhelper.FAILURE:", + "\t\t\tcheck.LogError(\"Container %q modified (changed folders: %v, deleted folders: %v\", cut, fsDiffTester.ChangedFolders, fsDiffTester.DeletedFolders)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is modified\", false).", + "\t\t\t\tAddField(\"ChangedFolders\", strings.Join(fsDiffTester.ChangedFolders, \",\")).", + "\t\t\t\tAddField(\"DeletedFolders\", strings.Join(fsDiffTester.DeletedFolders, \",\")))", + "", + "\t\tcase testhelper.ERROR:", + "\t\t\tcheck.LogError(\"Could not run fs-diff in Container %q, err: %v\", cut, fsDiffTester.Error)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Error while running fs-diff\", false).AddField(testhelper.Error, fsDiffTester.Error.Error()))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testIsRedHatRelease", + "kind": "function", + "source": [ + "func testIsRedHatRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tbaseImageTester := isredhat.NewBaseImageTester(clientsholder.GetClientsHolder(), clientsholder.NewContext(cut.Namespace, cut.Podname, cut.Name))", + "", + "\t\tresult, err := baseImageTester.TestContainerIsRedHatRelease()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not collect release information from Container %q, err=%v\", cut, err)", + "\t\t}", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Container %q has failed the RHEL release check\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Failed the RHEL release check\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has passed the RHEL release check\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Passed the RHEL release check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "generatePreflightContainerCnfCertTest", + "kind": "function", + "source": [ + "func generatePreflightContainerCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, containers []*provider.Container) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "\t\t\tfor _, cut := range containers {", + "\t\t\t\tfor _, r := range cut.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Container %q has passed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has failed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has errored Preflight test %q, err: %v\", cut, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Container has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "NewCrdReportObject", + "qualifiedName": "NewCrdReportObject", + "exported": true, + "signature": "func(string, string, string, bool)(*ReportObject)", + "doc": "NewCrdReportObject Creates a report object for a custom resource definition\n\nThis function takes the name, version, reason, and compliance status of a\nCRD. It builds a ReportObject by delegating to NewReportObject, then adds\nfields for the CRD's name and version before returning the constructed\nobject.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:445", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testScaleCrd", + "kind": "function", + "source": [ + "func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.ScaleCrUnderTest {", + "\t\tgroupResourceSchema := env.ScaleCrUnderTest[i].GroupResourceSchema", + "\t\tscaleCr := env.ScaleCrUnderTest[i].Scale", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, scaleCr.Name, scaleCr.Namespace, scaleCr.Kind); hpa != nil {", + "\t\t\tif !scaling.TestScaleHPACrd(\u0026scaleCr, hpa, groupResourceSchema, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"CR has failed the scaling test: %s\", scaleCr.GetName())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"cr has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\tif !scaling.TestScaleCrd(\u0026scaleCr, groupResourceSchema, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"CR has failed the non-HPA scale test: %s\", scaleCr.GetName())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"CR has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"CR is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"CR is scalable\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorSingleCrdOwner", + "kind": "function", + "source": [ + "func testOperatorSingleCrdOwner(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Map each CRD to a list of operators that own it", + "\tcrdOwners := map[string][]string{}", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\townedCrds := operator.Csv.Spec.CustomResourceDefinitions.Owned", + "", + "\t\t// Helper map to filter out different versions of the same CRD name.", + "\t\tuniqueOwnedCrds := map[string]struct{}{}", + "\t\tfor j := range ownedCrds {", + "\t\t\tuniqueOwnedCrds[ownedCrds[j].Name] = struct{}{}", + "\t\t}", + "", + "\t\t// Now we can append the operator as CRD owner", + "\t\tfor crdName := range uniqueOwnedCrds {", + "\t\t\tcrdOwners[crdName] = append(crdOwners[crdName], operator.Name)", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"CRDs owned by operator %s: %+v\", operator.Name, uniqueOwnedCrds)", + "\t}", + "", + "\t// Flag those that are owned by more than one operator", + "\tfor crd, opList := range crdOwners {", + "\t\tif len(opList) \u003e 1 {", + "\t\t\tcheck.LogError(\"CRD %q is owned by more than one operator (owners: %v)\", crd, opList)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewCrdReportObject(crd, \"\", \"CRD is owned by more than one operator\", false).", + "\t\t\t\t\tAddField(testhelper.OperatorList, strings.Join(opList, \", \")))", + "\t\t} else {", + "\t\t\tcheck.LogDebug(\"CRD %q is owned by a single operator (%v)\", crd, opList[0])", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewCrdReportObject(crd, \"\", \"CRD is owned by a single operator\", true).", + "\t\t\t\t\tAddField(testhelper.OperatorName, opList[0]))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewCrdReportObject(aName, aVersion, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CustomResourceDefinitionType, isCompliant)", + "\tout.AddField(CustomResourceDefinitionName, aName)", + "\tout.AddField(CustomResourceDefinitionVersion, aVersion)", + "\treturn out", + "}" + ] + }, + { + "name": "NewDeploymentReportObject", + "qualifiedName": "NewDeploymentReportObject", + "exported": true, + "signature": "func(string, string, string, bool)(*ReportObject)", + "doc": "NewDeploymentReportObject Creates a deployment report object with namespace, name, reason, and compliance status\n\nThis function builds a new ReportObject by first invoking the generic\nconstructor with the provided reason, type identifier for deployments, and\ncompliance flag. It then adds fields for the namespace and deployment name to\nthe object's key/value store. The resulting pointer is returned for further\nuse or inspection.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:419", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testDeploymentScaling", + "kind": "function", + "source": [ + "func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, deployment := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", deployment.ToString())", + "\t\tif scaling.IsManaged(deployment.Name, env.Config.ManagedDeployments) {", + "\t\t\tif !scaling.CheckOwnerReference(deployment.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"Deployment %q scaling failed due to OwnerReferences that are not scalable\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Deployment %q scaling skipped due to scalable OwnerReferences, test will run on the CR scaling\", deployment.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip deployment if it is allowed by config", + "\t\tif nameInDeploymentSkipList(deployment.Name, deployment.Namespace, env.Config.SkipScalingTestDeployments) {", + "\t\t\tcheck.LogInfo(\"Deployment %q is being skipped due to configuration setting\", deployment.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TestDeploymentScaling test scaling of deployment", + "\t\t// This is the entry point for deployment scaling tests", + "\t\tns, name := deployment.Namespace, deployment.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"Deployment\"); hpa != nil {", + "\t\t\t// if the deployment is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the deployment", + "\t\t\tif !scaling.TestScaleHpaDeployment(deployment, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"Deployment %q has failed the HPA scale test\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the HPA scale test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the deployment is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleDeployment(deployment.Deployment, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Deployment %q has failed the non-HPA scale test\", deployment.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q is scalable\", deployment.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testHighAvailability", + "kind": "function", + "source": [ + "func testHighAvailability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, dp := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", dp.ToString())", + "\t\tif dp.Spec.Replicas == nil || *(dp.Spec.Replicas) \u003c= 1 {", + "\t\t\tcheck.LogError(\"Deployment %q found without valid high availability (number of replicas must be greater than 1)\", dp.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, \"Deployment found without valid high availability\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Skip any AffinityRequired pods", + "\t\t//nolint:goconst", + "\t\tif dp.Spec.Template.Labels[\"AffinityRequired\"] == \"true\" {", + "\t\t\tcheck.LogInfo(\"Skipping Deployment %q with affinity required\", dp.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif dp.Spec.Template.Spec.Affinity == nil ||", + "\t\t\tdp.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {", + "\t\t\tcheck.LogError(\"Deployment %q found without valid high availability (PodAntiAffinity must be defined)\", dp.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, \"Deployment found without valid high availability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q has valid high availability\", dp.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, \"Deployment has valid high availability\", true))", + "\t\t}", + "\t}", + "\tfor _, st := range env.StatefulSets {", + "\t\tif st.Spec.Replicas == nil || *(st.Spec.Replicas) \u003c= 1 {", + "\t\t\tcheck.LogError(\"StatefulSet %q found without valid high availability (number of replicas must be greater than 1)\", st.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, \"StatefulSet found without valid high availability\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Skip any AffinityRequired pods", + "\t\tif st.Spec.Template.Labels[\"AffinityRequired\"] == \"true\" {", + "\t\t\tcheck.LogInfo(\"Skipping StatefulSet %q with affinity required\", st.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif st.Spec.Template.Spec.Affinity == nil ||", + "\t\t\tst.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {", + "\t\t\tcheck.LogError(\"StatefulSet %q found without valid high availability (PodAntiAffinity must be defined)\", st.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, \"StatefulSet found without valid high availability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q has valid high availability\", st.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, \"StatefulSet has valid high availability\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodsRecreation", + "kind": "function", + "source": [ + "func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tneedsPostMortemInfo := true", + "\tdefer func() {", + "\t\tif needsPostMortemInfo {", + "\t\t\tcheck.LogDebug(\"%s\", postmortem.Log())", + "\t\t}", + "\t\t// Since we are possible exiting early, we need to make sure we set the result at the end of the function.", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}()", + "\tcheck.LogInfo(\"Testing node draining effect of deployment\")", + "\tcheck.LogInfo(\"Testing initial state for deployments\")", + "\tdefer env.SetNeedsRefresh()", + "", + "\t// Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check.", + "\t// timeout = k-mins + (1min * (num-deployments + num-statefulsets))", + "\tallPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets))", + "\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout, check.GetLogger())", + "\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\tfor _, dep := range notReadyDeployments {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment was not ready before draining any node.\", false))", + "\t\t}", + "\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset was not ready before draining any node.\", false))", + "\t\t}", + "\t\treturn", + "\t}", + "", + "\t// Filter out pods with Node Assignments present and FAIL them.", + "\t// We run into problems with this test when there are nodeSelectors assigned affecting where", + "\t// pods are scheduled. Also, they are not allowed in general, see the node-selector test case.", + "\t// Skip the safeguard for any pods that are using a runtimeClassName. This is potentially", + "\t// because pods that are derived from a performance profile might have a built-in nodeSelector.", + "\tvar podsWithNodeAssignment []*provider.Pod", + "\tfor _, put := range env.Pods {", + "\t\tif !put.IsRuntimeClassNameSpecified() \u0026\u0026 put.HasNodeSelector() {", + "\t\t\tpodsWithNodeAssignment = append(podsWithNodeAssignment, put)", + "\t\t\tcheck.LogError(\"Pod %q has been found with node selector(s): %v\", put, put.Spec.NodeSelector)", + "\t\t}", + "\t}", + "\tif len(podsWithNodeAssignment) \u003e 0 {", + "\t\tcheck.LogError(\"Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v\", podsWithNodeAssignment)", + "\t\tfor _, pod := range podsWithNodeAssignment {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has node assignment.\", false))", + "\t\t}", + "", + "\t\treturn", + "\t}", + "", + "\tfor nodeName := range podsets.GetAllNodesForAllPodSets(env.Pods) {", + "\t\tdefer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node", + "\t\terr := podrecreation.CordonHelper(nodeName, podrecreation.Cordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Error cordoning the node: %s\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node cordoning failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tcheck.LogInfo(\"Draining and Cordoning node %s: \", nodeName)", + "\t\tcount, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Getting pods list to drain failed, err=%v\", err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Getting pods list to drain failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tnodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count)", + "\t\tcheck.LogDebug(\"Draining node: %s with timeout: %s\", nodeName, nodeTimeout)", + "\t\t_, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Draining node %q failed, err=%v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Draining node failed\", false))", + "\t\t\treturn", + "\t\t}", + "", + "\t\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout, check.GetLogger())", + "\t\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\t\tfor _, dep := range notReadyDeployments {", + "\t\t\t\tcheck.LogError(\"Deployment %q not ready after draining node %q\", dep.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q not ready after draining node %q\", sts.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\treturn", + "\t\t}", + "", + "\t\terr = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogFatal(\"Error uncordoning the node: %s\", nodeName)", + "\t\t}", + "\t}", + "", + "\t// If everything went well for all nodes, the nonCompliantObjects should be empty. We need to", + "\t// manually add all the deps/sts into the compliant object lists so the check is marked as skipped.", + "\t// ToDo: Improve this.", + "\tif len(nonCompliantObjects) == 0 {", + "\t\tfor _, dep := range env.Deployments {", + "\t\t\tcheck.LogInfo(\"Deployment's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "", + "\t\tfor _, sts := range env.StatefulSets {", + "\t\t\tcheck.LogInfo(\"Statefulset's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "\t}", + "", + "\tneedsPostMortemInfo = false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "name": "NewHelmChartReportObject", + "qualifiedName": "NewHelmChartReportObject", + "exported": true, + "signature": "func(string, string, string, bool)(*ReportObject)", + "doc": "NewHelmChartReportObject Creates a report object for a Helm chart\n\nIt constructs a new report object with the provided namespace, chart name,\nreason, and compliance status. The function first creates a base report\nobject using the supplied reason and compliance flag, then adds fields for\nthe namespace and chart name to that object. The completed report object is\nreturned for use in testing or reporting.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:366", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "testHelmCertified", + "kind": "function", + "source": [ + "func testHelmCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) {", + "\thelmchartsReleases := env.HelmChartReleases", + "", + "\t// Collect all of the failed helm charts", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, helm := range helmchartsReleases {", + "\t\tcheck.LogInfo(\"Testing Helm Chart Release %q\", helm.Name)", + "\t\tif !validator.IsHelmChartCertified(helm, env.K8sVersion) {", + "\t\t\tcheck.LogError(\"Helm Chart %q version %q is not certified.\", helm.Name, helm.Chart.Metadata.Version)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, \"helm chart is not certified\", false).", + "\t\t\t\tSetType(testhelper.HelmVersionType).", + "\t\t\t\tAddField(testhelper.Version, helm.Chart.Metadata.Version))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Helm Chart %q version %q is certified.\", helm.Name, helm.Chart.Metadata.Version)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, \"helm chart is certified\", true).", + "\t\t\t\tSetType(testhelper.HelmVersionType).", + "\t\t\t\tAddField(testhelper.Version, helm.Chart.Metadata.Version))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "testHelmVersion", + "kind": "function", + "source": [ + "func testHelmVersion(check *checksdb.Check) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tclients := clientsholder.GetClientsHolder()", + "\t// Get the Tiller pod in the specified namespace", + "\tpodList, err := clients.K8sClient.CoreV1().Pods(\"\").List(context.TODO(), metav1.ListOptions{", + "\t\tLabelSelector: \"app=helm,name=tiller\",", + "\t})", + "\tif err != nil {", + "\t\tcheck.LogError(\"Could not get Tiller pod, err=%v\", err)", + "\t}", + "", + "\tif len(podList.Items) == 0 {", + "\t\tcheck.LogInfo(\"Tiller pod not found in any namespaces. Helm version is v3.\")", + "\t\tfor _, helm := range env.HelmChartReleases {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, \"helm chart was installed with helm v3\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.LogError(\"Tiller pod found, Helm version is v2 but v3 required\")", + "\tfor i := range podList.Items {", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(podList.Items[i].Namespace, podList.Items[i].Name,", + "\t\t\t\"This pod is a Tiller pod. Helm Chart version is v2 but needs to be v3 due to the security risks associated with Tiller\", false))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewHelmChartReportObject(aNamespace, aHelmChartName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, HelmType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aHelmChartName)", + "\treturn out", + "}" + ] + }, + { + "name": "NewNamespacedNamedReportObject", + "qualifiedName": "NewNamespacedNamedReportObject", + "exported": true, + "signature": "func(string, string, bool, string, string)(*ReportObject)", + "doc": "NewNamespacedNamedReportObject Creates a report object with namespace and name fields\n\nIt builds a new ReportObject using the reason, type, and compliance flag,\nthen appends the specified namespace and name as additional fields. The\nresulting pointer is returned for further use.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:496", + "calls": [ + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testCrdRoles", + "kind": "function", + "source": [ + "func testCrdRoles(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcrdResources := rbac.GetCrdResources(env.Crds)", + "\tfor roleIndex := range env.Roles {", + "\t\tif !stringhelper.StringInSlice[string](env.Namespaces, env.Roles[roleIndex].Namespace, false) {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tallRules := rbac.GetAllRules(\u0026env.Roles[roleIndex])", + "", + "\t\tmatchingRules, nonMatchingRules := rbac.FilterRulesNonMatchingResources(allRules, crdResources)", + "\t\tif len(matchingRules) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor _, aRule := range matchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) applies to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"This applies to CRDs under test\", testhelper.RoleRuleType, true, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "\t\tfor _, aRule := range nonMatchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) does not apply to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"This rule does not apply to CRDs under test\", testhelper.RoleRuleType, false, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "", + "\t\tif len(nonMatchingRules) == 0 {", + "\t\t\tcheck.LogInfo(\"Role %q rules only apply to CRDs under test\", env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules only apply to CRDs under test\",", + "\t\t\t\ttesthelper.RoleType, true, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Role %q rules apply to a mix of CRDs under test and others.\", env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules apply to a mix of CRDs under test and others. See non compliant role rule objects.\",", + "\t\t\t\ttesthelper.RoleType, false, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewNamespacedNamedReportObject(aReason, aType string, isCompliant bool, aNamespace, aName string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace).AddField(Name, aName)", + "}" + ] + }, + { + "name": "NewNamespacedReportObject", + "qualifiedName": "NewNamespacedReportObject", + "exported": true, + "signature": "func(string, string, bool, string)(*ReportObject)", + "doc": "NewNamespacedReportObject Creates a ReportObject that includes namespace information\n\nThe function constructs a new report object with the provided reason, type,\nand compliance status, then appends an additional field for the namespace. It\nreturns the resulting report object. This allows callers to generate reports\nthat are scoped to a specific Kubernetes namespace.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:487", + "calls": [ + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testCrdRoles", + "kind": "function", + "source": [ + "func testCrdRoles(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcrdResources := rbac.GetCrdResources(env.Crds)", + "\tfor roleIndex := range env.Roles {", + "\t\tif !stringhelper.StringInSlice[string](env.Namespaces, env.Roles[roleIndex].Namespace, false) {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tallRules := rbac.GetAllRules(\u0026env.Roles[roleIndex])", + "", + "\t\tmatchingRules, nonMatchingRules := rbac.FilterRulesNonMatchingResources(allRules, crdResources)", + "\t\tif len(matchingRules) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor _, aRule := range matchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) applies to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"This applies to CRDs under test\", testhelper.RoleRuleType, true, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "\t\tfor _, aRule := range nonMatchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) does not apply to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"This rule does not apply to CRDs under test\", testhelper.RoleRuleType, false, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "", + "\t\tif len(nonMatchingRules) == 0 {", + "\t\t\tcheck.LogInfo(\"Role %q rules only apply to CRDs under test\", env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules only apply to CRDs under test\",", + "\t\t\t\ttesthelper.RoleType, true, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Role %q rules apply to a mix of CRDs under test and others.\", env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules apply to a mix of CRDs under test and others. See non compliant role rule objects.\",", + "\t\t\t\ttesthelper.RoleType, false, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testNamespace", + "kind": "function", + "source": [ + "func testNamespace(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, namespace := range env.Namespaces {", + "\t\tcheck.LogInfo(\"Testing namespace %q\", namespace)", + "\t\tnamespaceCompliant := true", + "\t\tfor _, invalidPrefix := range invalidNamespacePrefixes {", + "\t\t\tif strings.HasPrefix(namespace, invalidPrefix) {", + "\t\t\t\tcheck.LogError(\"Namespace %q has invalid prefix %q\", namespace, invalidPrefix)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"Namespace has invalid prefix\", testhelper.Namespace, false, namespace))", + "\t\t\t\tnamespaceCompliant = false", + "\t\t\t\tbreak // Break out of the loop if we find an invalid prefix", + "\t\t\t}", + "\t\t}", + "\t\tif namespaceCompliant {", + "\t\t\tcheck.LogInfo(\"Namespace %q has valid prefix\", namespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"Namespace has valid prefix\", testhelper.Namespace, true, namespace))", + "\t\t}", + "\t}", + "\tif failedNamespacesNum := len(nonCompliantObjects); failedNamespacesNum \u003e 0 {", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "", + "\tinvalidCrs, err := namespace.TestCrsNamespaces(env.Crds, env.Namespaces, check.GetLogger())", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error while testing CRs namespaces, err=%v\", err)", + "\t\treturn", + "\t}", + "", + "\tinvalidCrsNum := namespace.GetInvalidCRsNum(invalidCrs, check.GetLogger())", + "\tif invalidCrsNum \u003e 0 {", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"CRs are not in the configured namespaces\", testhelper.Namespace, false))", + "\t} else {", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"CRs are in the configured namespaces\", testhelper.Namespace, true))", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces", + "kind": "function", + "source": [ + "func testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOnlySingleNamespacedOperatorsAllowedInTenantNamespaces\")", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\toperatorNamespaces := make(map[string]bool)", + "\tfor _, operator := range env.Operators {", + "\t\toperatorNamespace := operator.Csv.Annotations[\"olm.operatorNamespace\"]", + "\t\tfor _, namespace := range env.Namespaces {", + "\t\t\tif namespace == operatorNamespace {", + "\t\t\t\toperatorNamespaces[operatorNamespace] = true", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tfor operatorNamespace := range operatorNamespaces { // operator installed namespace", + "\t\tcheck.LogInfo(\"Checking if namespace %s contains only valid single/ multi namespaced operators\", operatorNamespace)", + "", + "\t\tisDedicatedOperatorNamespace, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators,", + "\t\t\tcsvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, err := checkValidOperatorInstallation(operatorNamespace)", + "", + "\t\tcheck.LogInfo(\"isDedicatedOperatorNamespace=%t, singleOrMultiNamespaceOperators=%s, nonSingleOrMultiNamespaceOperators=%s, csvsTargetingNamespace=%s, operatorsFoundButNotUnderTest=%s, podsNotBelongingToOperators=%s\", isDedicatedOperatorNamespace, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators) //nolint:lll", + "", + "\t\tif err != nil {", + "\t\t\tmsg := fmt.Sprintf(\"Operator namespace %s check got error %v\", operatorNamespace, err)", + "\t\t\tcheck.LogError(\"%s\", msg)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(msg, testhelper.Namespace, false, operatorNamespace))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isDedicatedOperatorNamespace {", + "\t\t\tvar msg string", + "\t\t\tif len(singleOrMultiNamespaceOperators) == 0 {", + "\t\t\t\tmsg = \"Namespace contains no installed single/multi namespace operators\"", + "\t\t\t} else {", + "\t\t\t\tmsg = fmt.Sprintf(\"Namespace is dedicated to single/multi namespace operators (%s) \", strings.Join(singleOrMultiNamespaceOperators, \", \"))", + "\t\t\t}", + "", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(msg, testhelper.Namespace, true, operatorNamespace))", + "\t\t} else {", + "\t\t\tmsg := \"Operator namespace is not dedicated to single/multi operators because \"", + "", + "\t\t\tif len(nonSingleOrMultiNamespaceOperators) != 0 {", + "\t\t\t\tmsg += \"- operators are installed with an install mode different from single/multi (\" + strings.Join(nonSingleOrMultiNamespaceOperators, \", \") + \")\\n\"", + "\t\t\t}", + "", + "\t\t\tif len(csvsTargetingNamespace) != 0 {", + "\t\t\t\tmsg += \"- this namespace is the target namespace of other operators (\" + strings.Join(csvsTargetingNamespace, \", \") + \")\\n\"", + "\t\t\t}", + "\t\t\tif len(operatorsFoundButNotUnderTest) != 0 {", + "\t\t\t\tmsg += \"- operators not under test found (\" + strings.Join(operatorsFoundButNotUnderTest, \", \") + \")\\n\"", + "\t\t\t}", + "\t\t\tif len(podsNotBelongingToOperators) != 0 {", + "\t\t\t\tmsg += \"- invalid non operator pods found (\" + strings.Join(podsNotBelongingToOperators, \", \") + \")\"", + "\t\t\t}", + "", + "\t\t\tnonCompliantNs := testhelper.NewNamespacedReportObject(msg, testhelper.Namespace, false, operatorNamespace)", + "", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantNs)", + "\t\t}", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace)", + "}" + ] + }, + { + "name": "NewNodeReportObject", + "qualifiedName": "NewNodeReportObject", + "exported": true, + "signature": "func(string, string, bool)(*ReportObject)", + "doc": "NewNodeReportObject Creates a node-specific report object\n\nThe function builds a ReportObject for a node by calling the generic\nconstructor with the provided reason, type identifier, and compliance flag.\nIt then attaches the node name as an additional field before returning the\nfully populated object.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:314", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodsRecreation", + "kind": "function", + "source": [ + "func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tneedsPostMortemInfo := true", + "\tdefer func() {", + "\t\tif needsPostMortemInfo {", + "\t\t\tcheck.LogDebug(\"%s\", postmortem.Log())", + "\t\t}", + "\t\t// Since we are possible exiting early, we need to make sure we set the result at the end of the function.", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}()", + "\tcheck.LogInfo(\"Testing node draining effect of deployment\")", + "\tcheck.LogInfo(\"Testing initial state for deployments\")", + "\tdefer env.SetNeedsRefresh()", + "", + "\t// Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check.", + "\t// timeout = k-mins + (1min * (num-deployments + num-statefulsets))", + "\tallPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets))", + "\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout, check.GetLogger())", + "\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\tfor _, dep := range notReadyDeployments {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment was not ready before draining any node.\", false))", + "\t\t}", + "\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset was not ready before draining any node.\", false))", + "\t\t}", + "\t\treturn", + "\t}", + "", + "\t// Filter out pods with Node Assignments present and FAIL them.", + "\t// We run into problems with this test when there are nodeSelectors assigned affecting where", + "\t// pods are scheduled. Also, they are not allowed in general, see the node-selector test case.", + "\t// Skip the safeguard for any pods that are using a runtimeClassName. This is potentially", + "\t// because pods that are derived from a performance profile might have a built-in nodeSelector.", + "\tvar podsWithNodeAssignment []*provider.Pod", + "\tfor _, put := range env.Pods {", + "\t\tif !put.IsRuntimeClassNameSpecified() \u0026\u0026 put.HasNodeSelector() {", + "\t\t\tpodsWithNodeAssignment = append(podsWithNodeAssignment, put)", + "\t\t\tcheck.LogError(\"Pod %q has been found with node selector(s): %v\", put, put.Spec.NodeSelector)", + "\t\t}", + "\t}", + "\tif len(podsWithNodeAssignment) \u003e 0 {", + "\t\tcheck.LogError(\"Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v\", podsWithNodeAssignment)", + "\t\tfor _, pod := range podsWithNodeAssignment {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has node assignment.\", false))", + "\t\t}", + "", + "\t\treturn", + "\t}", + "", + "\tfor nodeName := range podsets.GetAllNodesForAllPodSets(env.Pods) {", + "\t\tdefer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node", + "\t\terr := podrecreation.CordonHelper(nodeName, podrecreation.Cordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Error cordoning the node: %s\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node cordoning failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tcheck.LogInfo(\"Draining and Cordoning node %s: \", nodeName)", + "\t\tcount, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Getting pods list to drain failed, err=%v\", err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Getting pods list to drain failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tnodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count)", + "\t\tcheck.LogDebug(\"Draining node: %s with timeout: %s\", nodeName, nodeTimeout)", + "\t\t_, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Draining node %q failed, err=%v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Draining node failed\", false))", + "\t\t\treturn", + "\t\t}", + "", + "\t\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout, check.GetLogger())", + "\t\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\t\tfor _, dep := range notReadyDeployments {", + "\t\t\t\tcheck.LogError(\"Deployment %q not ready after draining node %q\", dep.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q not ready after draining node %q\", sts.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\treturn", + "\t\t}", + "", + "\t\terr = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogFatal(\"Error uncordoning the node: %s\", nodeName)", + "\t\t}", + "\t}", + "", + "\t// If everything went well for all nodes, the nonCompliantObjects should be empty. We need to", + "\t// manually add all the deps/sts into the compliant object lists so the check is marked as skipped.", + "\t// ToDo: Improve this.", + "\tif len(nonCompliantObjects) == 0 {", + "\t\tfor _, dep := range env.Deployments {", + "\t\t\tcheck.LogInfo(\"Deployment's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "", + "\t\tfor _, sts := range env.StatefulSets {", + "\t\t\tcheck.LogInfo(\"Statefulset's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "\t}", + "", + "\tneedsPostMortemInfo = false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testHugepages", + "kind": "function", + "source": [ + "func testHugepages(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.Nodes {", + "\t\tnode := env.Nodes[i]", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\tif !node.IsWorkerNode() {", + "\t\t\tcheck.LogInfo(\"Node %q is not a worker node\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Not a worker node\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tprobePod, exist := env.ProbePods[nodeName]", + "\t\tif !exist {", + "\t\t\tcheck.LogError(\"Could not find a Probe Pod in node %q.\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"tnf probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\thpTester, err := hugepages.NewTester(\u0026node, probePod, clientsholder.GetClientsHolder())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get node hugepages tester for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Unable to get node hugepages tester\", false))", + "\t\t}", + "", + "\t\tif err := hpTester.Run(); err != nil {", + "\t\t\tcheck.LogError(\"Hugepages check failed for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, err.Error(), false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q passed the hugepages check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the hugepages check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testHyperThreadingEnabled", + "kind": "function", + "source": [ + "func testHyperThreadingEnabled(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tbaremetalNodes := env.GetBaremetalNodes()", + "\tfor _, node := range baremetalNodes {", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\tenable, err := node.IsHyperThreadNode(env)", + "\t\t//nolint:gocritic", + "\t\tif enable {", + "\t\t\tcheck.LogInfo(\"Node %q has hyperthreading enabled\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has hyperthreading enabled\", true))", + "\t\t} else if err != nil {", + "\t\t\tcheck.LogError(\"Hyperthreading check fail for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Error with executing the check for hyperthreading: \"+err.Error(), false))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Node %q has hyperthreading disabled\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has hyperthreading disabled \", false))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testIsSELinuxEnforcing", + "kind": "function", + "source": [ + "func testIsSELinuxEnforcing(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tconst (", + "\t\tgetenforceCommand = `chroot /host getenforce`", + "\t\tenforcingString = \"Enforcing\\n\"", + "\t)", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\to := clientsholder.GetClientsHolder()", + "\tnodesFailed := 0", + "\tnodesError := 0", + "\tfor _, probePod := range env.ProbePods {", + "\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, getenforceCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tcheck.LogError(\"Could not execute command %q in Probe Pod %q, errStr: %q, err: %v\", getenforceCommand, probePod, errStr, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(probePod.Namespace, probePod.Name, \"Failed to execute command\", false))", + "\t\t\tnodesError++", + "\t\t\tcontinue", + "\t\t}", + "\t\tif outStr != enforcingString {", + "\t\t\tcheck.LogError(\"Node %q is not running SELinux, %s command returned: %s\", probePod.Spec.NodeName, getenforceCommand, outStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(probePod.Spec.NodeName, \"SELinux is not enforced\", false))", + "\t\t\tnodesFailed++", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q is running SELinux\", probePod.Spec.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(probePod.Spec.NodeName, \"SELinux is enforced\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testNodeOperatingSystemStatus", + "kind": "function", + "source": [ + "func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tfailedControlPlaneNodes := []string{}", + "\tfailedWorkerNodes := []string{}", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, node := range env.Nodes {", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\t// Get the OSImage which should tell us what version of operating system the node is running.", + "\t\tcheck.LogInfo(\"Node %q is running operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "", + "\t\t// Control plane nodes must be RHCOS (also CentOS Stream starting in OCP 4.13)", + "\t\t// Per the release notes from OCP documentation:", + "\t\t// \"You must use RHCOS machines for the control plane, and you can use either RHCOS or RHEL for compute machines.\"", + "\t\tif node.IsControlPlaneNode() \u0026\u0026 !node.IsRHCOS() \u0026\u0026 !node.IsCSCOS() {", + "\t\t\tcheck.LogError(\"Control plane node %q has been found to be running an incompatible operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "\t\t\tfailedControlPlaneNodes = append(failedControlPlaneNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Control plane node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Worker nodes can either be RHEL or RHCOS", + "\t\tif node.IsWorkerNode() {", + "\t\t\t//nolint:gocritic", + "\t\t\tif node.IsRHCOS() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetRHCOSVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather RHCOS version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather RHCOS version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tif shortVersion == operatingsystem.NotFoundStr {", + "\t\t\t\t\tcheck.LogInfo(\"Node %q has an RHCOS operating system that is not found in our internal database. Skipping as to not cause failures due to database mismatch.\", nodeName)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the node's RHCOS version and the OpenShift version are not compatible, the node fails.", + "\t\t\t\tcheck.LogDebug(\"Comparing RHCOS shortVersion %q to openshiftVersion %q\", shortVersion, env.OpenshiftVersion)", + "\t\t\t\tif !compatibility.IsRHCOSCompatible(shortVersion, env.OpenshiftVersion) {", + "\t\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible version of RHCOS %q\", nodeName, shortVersion)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).", + "\t\t\t\t\t\tAddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\tcheck.LogInfo(\"Worker node %q has been found to be running a compatible version of RHCOS %q\", nodeName, shortVersion)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running a compatible OS\", true).", + "\t\t\t\t\tAddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t} else if node.IsCSCOS() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetCSCOSVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather CentOS Stream CoreOS version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather CentOS Stream CoreOS version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// Warning: CentOS Stream CoreOS has not been released yet in any", + "\t\t\t\t// OCP RC/GA versions, so for the moment, we cannot compare the", + "\t\t\t\t// version with the OCP one, or retrieve it on the internal database", + "\t\t\t\tmsg := `", + "\t\t\t\t\tNode %s is using CentOS Stream CoreOS %s, which is not being used yet in any", + "\t\t\t\t\tOCP RC/GA version. Relaxing the conditions to check the OS as a result.", + "\t\t\t\t\t`", + "\t\t\t\tcheck.LogDebug(msg, nodeName, shortVersion)", + "\t\t\t} else if node.IsRHEL() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetRHELVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather RHEL version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather RHEL version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the node's RHEL version and the OpenShift version are not compatible, the node fails.", + "\t\t\t\tcheck.LogDebug(\"Comparing RHEL shortVersion %q to openshiftVersion %q\", shortVersion, env.OpenshiftVersion)", + "\t\t\t\tif !compatibility.IsRHELCompatible(shortVersion, env.OpenshiftVersion) {", + "\t\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible version of RHEL %q\", nodeName, shortVersion)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"Worker node %q has been found to be running a compatible version of RHEL %q\", nodeName, shortVersion)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running a compatible OS\", true).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tif n := len(failedControlPlaneNodes); n \u003e 0 {", + "\t\tcheck.LogError(\"Number of control plane nodes running non-RHCOS based operating systems: %d\", n)", + "\t}", + "", + "\tif n := len(failedWorkerNodes); n \u003e 0 {", + "\t\tcheck.LogError(\"Number of worker nodes running non-RHCOS or non-RHEL based operating systems: %d\", n)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testSysctlConfigs", + "kind": "function", + "source": [ + "func testSysctlConfigs(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\talreadyCheckedNodes := map[string]bool{}", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif alreadyCheckedNodes[cut.NodeName] {", + "\t\t\tcontinue", + "\t\t}", + "\t\talreadyCheckedNodes[cut.NodeName] = true", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Probe Pod not found for node %q\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"tnf probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tsysctlSettings, err := sysctlconfig.GetSysctlSettings(env, cut.NodeName)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get sysctl settings for node %q, error: %v\", cut.NodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Could not get sysctl settings\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tmcKernelArgumentsMap := bootparams.GetMcKernelArguments(env, cut.NodeName)", + "\t\tvalidSettings := true", + "\t\tfor key, sysctlConfigVal := range sysctlSettings {", + "\t\t\tif mcVal, ok := mcKernelArgumentsMap[key]; ok {", + "\t\t\t\tif mcVal != sysctlConfigVal {", + "\t\t\t\t\tcheck.LogError(\"Kernel config mismatch in node %q for %q (sysctl value: %q, machine config value: %q)\",", + "\t\t\t\t\t\tcut.NodeName, key, sysctlConfigVal, mcVal)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, fmt.Sprintf(\"Kernel config mismatch for %s\", key), false))", + "\t\t\t\t\tvalidSettings = false", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif validSettings {", + "\t\t\tcheck.LogInfo(\"Node %q passed the sysctl config check\", cut.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Passed the sysctl config check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testTainted", + "kind": "function", + "source": [ + "func testTainted(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// errNodes has nodes that failed some operation while checking kernel taints.", + "\terrNodes := []string{}", + "", + "\ttype badModuleTaints struct {", + "\t\tname string", + "\t\ttaints []string", + "\t}", + "", + "\t// badModules maps node names to list of \"bad\"/offending modules.", + "\tbadModules := map[string][]badModuleTaints{}", + "\t// otherTaints maps a node to a list of taint bits that haven't been set by any module.", + "\totherTaints := map[string][]int{}", + "", + "\tcheck.LogInfo(\"Modules allowlist: %+v\", env.Config.AcceptedKernelTaints)", + "\t// helper map to make the checks easier.", + "\tallowListedModules := map[string]bool{}", + "\tfor _, module := range env.Config.AcceptedKernelTaints {", + "\t\tallowListedModules[module.Module] = true", + "\t}", + "", + "\t// Loop through the probe pods that are tied to each node.", + "\tfor _, n := range env.Nodes {", + "\t\tnodeName := n.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "", + "\t\t// Ensure we are only testing nodes that have CNF workload deployed on them.", + "\t\tif !n.HasWorkloadDeployed(env.Pods) {", + "\t\t\tcheck.LogInfo(\"Node %q has no workload deployed on it. Skipping tainted kernel check.\", nodeName)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tdp := env.ProbePods[nodeName]", + "", + "\t\tocpContext := clientsholder.NewContext(dp.Namespace, dp.Name, dp.Spec.Containers[0].Name)", + "\t\ttf := nodetainted.NewNodeTaintedTester(\u0026ocpContext, nodeName)", + "", + "\t\t// Get the taints mask from the node kernel", + "\t\ttaintsMask, err := tf.GetKernelTaintsMask()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to retrieve kernel taint information from node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to retrieve kernel taint information from node\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif taintsMask == 0 {", + "\t\t\tcheck.LogInfo(\"Node %q has no non-approved kernel taints.\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has no non-approved kernel taints\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Node %q kernel is tainted. Taints mask=%d - Decoded taints: %v\",", + "\t\t\tnodeName, taintsMask, nodetainted.DecodeKernelTaintsFromBitMask(taintsMask))", + "", + "\t\t// Check the allow list. If empty, mark this node as failed.", + "\t\tif len(allowListedModules) == 0 {", + "\t\t\ttaintsMaskStr := strconv.FormatUint(taintsMask, 10)", + "\t\t\ttaintsStr := strings.Join(nodetainted.DecodeKernelTaintsFromBitMask(taintsMask), \",\")", + "\t\t\tcheck.LogError(\"Node %q contains taints not covered by module allowlist. Taints: %q (mask=%q)\", nodeName, taintsStr, taintsMaskStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node contains taints not covered by module allowlist\", false).", + "\t\t\t\tAddField(testhelper.TaintMask, taintsMaskStr).", + "\t\t\t\tAddField(testhelper.Taints, taintsStr))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// allow list check.", + "\t\t// Get the list of modules (tainters) that have set a taint bit.", + "\t\t// 1. Each module should appear in the allow list.", + "\t\t// 2. All kernel taint bits (one bit \u003c-\u003e one letter) should have been set by at least", + "\t\t// one tainter module.", + "\t\ttainters, taintBitsByAllModules, err := tf.GetTainterModules(allowListedModules)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get tainter modules from node %q, err: %v\", nodeName, err)", + "\t\t\terrNodes = append(errNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to get tainter modules\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Keep track of whether or not this node is compliant with module allow list.", + "\t\tcompliantNode := true", + "", + "\t\t// Save modules' names only.", + "\t\tfor moduleName, taintsLetters := range tainters {", + "\t\t\tmoduleTaints := nodetainted.DecodeKernelTaintsFromLetters(taintsLetters)", + "\t\t\tbadModules[nodeName] = append(badModules[nodeName], badModuleTaints{name: moduleName, taints: moduleTaints})", + "", + "\t\t\t// Create non-compliant taint objects for each of the taints", + "\t\t\tfor _, taint := range moduleTaints {", + "\t\t\t\tcheck.LogError(\"Node %q - module %q taints kernel: %q\", nodeName, moduleName, taint)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(nodetainted.RemoveAllExceptNumbers(taint), nodeName, taint, false).AddField(testhelper.ModuleName, moduleName))", + "", + "\t\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\t\tcompliantNode = false", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Lastly, check that all kernel taint bits come from modules.", + "\t\totherKernelTaints := nodetainted.GetOtherTaintedBits(taintsMask, taintBitsByAllModules)", + "\t\tfor _, taintedBit := range otherKernelTaints {", + "\t\t\tcheck.LogError(\"Node %q - taint bit %d is set but it is not caused by any module.\", nodeName, taintedBit)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(strconv.Itoa(taintedBit), nodeName, nodetainted.GetTaintMsg(taintedBit), false).", + "\t\t\t\tAddField(testhelper.ModuleName, \"N/A\"))", + "\t\t\totherTaints[nodeName] = append(otherTaints[nodeName], taintedBit)", + "", + "\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\tcompliantNode = false", + "\t\t}", + "", + "\t\tif compliantNode {", + "\t\t\tcheck.LogInfo(\"Node %q passed the tainted kernel check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the tainted kernel check\", true))", + "\t\t}", + "\t}", + "", + "\tif len(errNodes) \u003e 0 {", + "\t\tcheck.LogError(\"Failed to get kernel taints from some nodes: %+v\", errNodes)", + "\t}", + "", + "\tif len(badModules) \u003e 0 || len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Nodes have been found to be tainted. Tainted modules: %+v\", badModules)", + "\t}", + "", + "\tif len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Taints not related to any module: %+v\", otherTaints)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testUnalteredBootParams", + "kind": "function", + "source": [ + "func testUnalteredBootParams(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\talreadyCheckedNodes := map[string]bool{}", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif alreadyCheckedNodes[cut.NodeName] {", + "\t\t\tcheck.LogInfo(\"Skipping node %q: already checked.\", cut.NodeName)", + "\t\t\tcontinue", + "\t\t}", + "\t\talreadyCheckedNodes[cut.NodeName] = true", + "", + "\t\terr := bootparams.TestBootParamsHelper(env, cut, check.GetLogger())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Node %q failed the boot params check\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Failed the boot params check\", false).", + "\t\t\t\tAddField(testhelper.ProbePodName, env.ProbePods[cut.NodeName].Name))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q passed the boot params check\", cut.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Passed the boot params check\", true).", + "\t\t\t\tAddField(testhelper.ProbePodName, env.ProbePods[cut.NodeName].Name))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "NewOperatorReportObject", + "qualifiedName": "NewOperatorReportObject", + "exported": true, + "signature": "func(string, string, string, bool)(*ReportObject)", + "doc": "NewOperatorReportObject Creates a report object for an operator\n\nThe function builds a new ReportObject using the provided namespace, operator\nname, reason, and compliance flag. It initializes the base object with type\ninformation, then adds fields for namespace and operator name before\nreturning it.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:379", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "testAllOperatorCertified", + "kind": "function", + "source": [ + "func testAllOperatorCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) {", + "\toperatorsUnderTest := env.Operators", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tocpMinorVersion := \"\"", + "\tif provider.IsOCPCluster() {", + "\t\t// Converts\tmajor.minor.patch version format to major.minor", + "\t\tconst majorMinorPatchCount = 3", + "\t\tsplitVersion := strings.SplitN(env.OpenshiftVersion, \".\", majorMinorPatchCount)", + "\t\tocpMinorVersion = splitVersion[0] + \".\" + splitVersion[1]", + "\t}", + "\tfor _, operator := range operatorsUnderTest {", + "\t\tcheck.LogInfo(\"Testing Operator %q\", operator)", + "\t\tisCertified := validator.IsOperatorCertified(operator.Name, ocpMinorVersion)", + "\t\tif !isCertified {", + "\t\t\tcheck.LogError(\"Operator %q (channel %q) failed to be certified for OpenShift %s\", operator.Name, operator.Channel, ocpMinorVersion)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"Operator failed to be certified for OpenShift\", false).", + "\t\t\t\tAddField(testhelper.OCPVersion, ocpMinorVersion).", + "\t\t\t\tAddField(testhelper.OCPChannel, operator.Channel))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Operator %q (channel %q) is certified for OpenShift %s\", operator.Name, operator.Channel, ocpMinorVersion)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"Operator certified OK\", true).", + "\t\t\t\tAddField(testhelper.OCPVersion, ocpMinorVersion).", + "\t\t\t\tAddField(testhelper.OCPChannel, operator.Channel))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testMultipleSameOperators", + "kind": "function", + "source": [ + "func testMultipleSameOperators(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Ensure the CSV name is unique and not installed more than once.", + "\t// CSV Names are unique and OLM installs them with name.version format.", + "\t// So, we can check if the CSV name is installed more than once.", + "", + "\tcheck.LogInfo(\"Checking if the operator is installed more than once\")", + "", + "\tfor _, op := range env.AllOperators {", + "\t\tcheck.LogDebug(\"Checking operator %q\", op.Name)", + "\t\tcheck.LogDebug(\"Number of operators to check %s against: %d\", op.Name, len(env.AllOperators))", + "\t\tfor _, op2 := range env.AllOperators {", + "\t\t\t// Check if the operator is installed more than once.", + "\t\t\tif OperatorInstalledMoreThanOnce(op, op2) {", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(", + "\t\t\t\t\top.Namespace, op.Name, \"Operator is installed more than once\", false))", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(", + "\t\t\top.Namespace, op.Name, \"Operator is installed only once\", true))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorCrdOpenAPISpec", + "kind": "function", + "source": [ + "func testOperatorCrdOpenAPISpec(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOperatorCrdOpenAPISpec\")", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, crd := range env.Crds {", + "\t\tif openapi.IsCRDDefinedWithOpenAPI3Schema(crd) {", + "\t\t\tcheck.LogInfo(\"Operator CRD %s is defined with OpenAPIV3 schema \", crd.Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD is defined with OpenAPIV3 schema \", true).AddField(testhelper.OpenAPIV3Schema, crd.Name))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Operator CRD %s is not defined with OpenAPIV3 schema \", crd.Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD is not defined with OpenAPIV3 schema \", false).AddField(testhelper.OpenAPIV3Schema, crd.Name))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorCrdVersioning", + "kind": "function", + "source": [ + "func testOperatorCrdVersioning(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOperatorCrdVersioning\")", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, crd := range env.Crds {", + "\t\tdoesUseK8sVersioning := true", + "\t\tnonCompliantVersion := \"\"", + "", + "\t\tfor _, crdVersion := range crd.Spec.Versions {", + "\t\t\tversionName := crdVersion.Name", + "\t\t\tcheck.LogDebug(\"Checking for Operator CRD %s with version %s\", crd.Name, versionName)", + "", + "\t\t\tif !versions.IsValidK8sVersion(versionName) {", + "\t\t\t\tdoesUseK8sVersioning = false", + "\t\t\t\tnonCompliantVersion = versionName", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif doesUseK8sVersioning {", + "\t\t\tcheck.LogInfo(\"Operator CRD %s has valid K8s versioning \", crd.Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD has valid K8s versioning \", true).AddField(testhelper.CrdVersion, crd.Name))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Operator CRD %s has invalid K8s versioning %s \", crd.Name, nonCompliantVersion)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD has invalid K8s versioning \", false).AddField(testhelper.CrdVersion, crd.Name))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorInstallationAccessToSCC", + "kind": "function", + "source": [ + "func testOperatorInstallationAccessToSCC(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\tcsv := operator.Csv", + "\t\tcheck.LogDebug(\"Checking operator %s\", operator)", + "\t\tclusterPermissions := csv.Spec.InstallStrategy.StrategySpec.ClusterPermissions", + "\t\tif len(clusterPermissions) == 0 {", + "\t\t\tcheck.LogInfo(\"No clusterPermissions found in %s's CSV\", operator)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name,", + "\t\t\t\t\"No RBAC rules for Security Context Constraints found in CSV (no clusterPermissions found)\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Fails in case any cluster permission has a rule that refers to securitycontextconstraints.", + "\t\tif access.PermissionsHaveBadRule(clusterPermissions) {", + "\t\t\tcheck.LogInfo(\"Operator %s has a rule for a service account to access cluster SCCs\",", + "\t\t\t\toperator)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"One or more RBAC rules for Security Context Constraints found in CSV\", false))", + "\t\t} else {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"No RBAC rules for Security Context Constraints found in CSV\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorInstallationPhaseSucceeded", + "kind": "function", + "source": [ + "func testOperatorInstallationPhaseSucceeded(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, op := range env.Operators {", + "\t\tcheck.LogInfo(\"Testing Operator %q\", op)", + "\t\tif phasecheck.WaitOperatorReady(op.Csv) {", + "\t\t\tcheck.LogInfo(\"Operator %q is in Succeeded phase\", op)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name,", + "\t\t\t\t\"Operator on Succeeded state \", true).AddField(testhelper.OperatorPhase, string(op.Csv.Status.Phase)))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Operator %q is not in Succeeded phase (phase=%q)\", op, op.Csv.Status.Phase)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name,", + "\t\t\t\t\"Operator not in Succeeded state \", false).AddField(testhelper.OperatorPhase, string(op.Csv.Status.Phase)))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorOlmSkipRange", + "kind": "function", + "source": [ + "func testOperatorOlmSkipRange(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\tcheck.LogInfo(\"Testing Operator %q\", operator)", + "", + "\t\tif operator.Csv.Annotations[\"olm.skipRange\"] == \"\" {", + "\t\t\tcheck.LogError(\"OLM skipRange not found for Operator %q\", operator)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, \"OLM skipRange not found for operator\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"OLM skipRange %q found for Operator %q\", operator.Csv.Annotations[\"olm.skipRange\"], operator)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, \"OLM skipRange found for operator\", true).", + "\t\t\t\tAddField(\"olm.SkipRange\", operator.Csv.Annotations[\"olm.skipRange\"]))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorOlmSubscription", + "kind": "function", + "source": [ + "func testOperatorOlmSubscription(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\tcheck.LogInfo(\"Testing Operator %q\", operator)", + "\t\tif operator.SubscriptionName == \"\" {", + "\t\t\tcheck.LogError(\"OLM subscription not found for Operator %q\", operator)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, \"OLM subscription not found for operator, so it is not installed via OLM\", false).", + "\t\t\t\tAddField(testhelper.SubscriptionName, operator.SubscriptionName))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"OLM subscription %q found for Operator %q\", operator.SubscriptionName, operator)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, \"install-status-no-privilege (subscription found)\", true).", + "\t\t\t\tAddField(testhelper.SubscriptionName, operator.SubscriptionName))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorSemanticVersioning", + "kind": "function", + "source": [ + "func testOperatorSemanticVersioning(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOperatorSemanticVersioning\")", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, operator := range env.Operators {", + "\t\toperatorVersion := operator.Version", + "\t\tcheck.LogInfo(\"Testing Operator %q for version %s\", operator, operatorVersion)", + "", + "\t\tif versions.IsValidSemanticVersion(operatorVersion) {", + "\t\t\tcheck.LogInfo(\"Operator %q has a valid semantic version %s\", operator, operatorVersion)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name,", + "\t\t\t\t\"Operator has a valid semantic version \", true).AddField(testhelper.Version, operatorVersion))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Operator %q has an invalid semantic version %s\", operator, operatorVersion)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name,", + "\t\t\t\t\"Operator has an invalid semantic version \", false).AddField(testhelper.Version, operatorVersion))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "generatePreflightOperatorCnfCertTest", + "kind": "function", + "source": [ + "func generatePreflightOperatorCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, operators []*provider.Operator) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t\t\tfor _, op := range operators {", + "\t\t\t\tfor _, r := range op.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Operator %q has passed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has failed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has errored Preflight test %q, err: %v\", op, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, fmt.Sprintf(\"Operator has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "NewPodReportObject", + "qualifiedName": "NewPodReportObject", + "exported": true, + "signature": "func(string, string, string, bool)(*ReportObject)", + "doc": "NewPodReportObject Creates a report object for a pod\n\nThe function builds a ReportObject by calling NewReportObject with the given\nreason, type set to PodType, and compliance flag. It then attaches the\nnamespace and pod name as fields on the resulting object. Finally, it returns\na pointer to this populated ReportObject.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:352", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "test1337UIDs", + "kind": "function", + "source": [ + "func test1337UIDs(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Note this test is only ran as part of the 'extended' test suite.", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tconst leetNum = 1337", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.IsRunAsUserID(leetNum) {", + "\t\t\tcheck.LogError(\"Pod %q is using securityContext RunAsUser 1337\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is using securityContext RunAsUser 1337\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is not using securityContext RunAsUser 1337\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not using securityContext RunAsUser 1337\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testAutomountServiceToken", + "kind": "function", + "source": [ + "func testAutomountServiceToken(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.ServiceAccountName == defaultServiceAccount {", + "\t\t\tcheck.LogError(\"Pod %q uses the default service account name.\", put.Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found with default service account name\", false))", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Evaluate the pod's automount service tokens and any attached service accounts", + "\t\tclient := clientsholder.GetClientsHolder()", + "\t\tpodPassed, newMsg := rbac.EvaluateAutomountTokens(client.K8sClient.CoreV1(), put)", + "\t\tif !podPassed {", + "\t\t\tcheck.LogError(\"%s\", newMsg)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, newMsg, false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q does not have automount service tokens set to true\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod does not have automount service tokens set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testNamespaceResourceQuota", + "kind": "function", + "source": [ + "func testNamespaceResourceQuota(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\t// Look through all of the pods and compare their namespace to any potential", + "\t\t// resource quotas", + "\t\tfoundPodNamespaceRQ := false", + "\t\tfor index := range env.ResourceQuotas {", + "\t\t\t// We are just checking for the existence of the resource quota as of right now.", + "\t\t\t// Read more about the resource quota object here:", + "\t\t\t// https://kubernetes.io/docs/concepts/policy/resource-quotas/", + "\t\t\tif put.Namespace == env.ResourceQuotas[index].Namespace {", + "\t\t\t\tfoundPodNamespaceRQ = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !foundPodNamespaceRQ {", + "\t\t\tcheck.LogError(\"Pod %q is running in a namespace that does not have a ResourceQuota applied.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is running in a namespace that does not have a ResourceQuota applied\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is running in a namespace that has a ResourceQuota applied.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is running in a namespace that has a ResourceQuota applied\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testNoSSHDaemonsAllowed", + "kind": "function", + "source": [ + "func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tcut := put.Containers[0]", + "", + "\t\t// 1. Find SSH port", + "\t\tport, err := netutil.GetSSHDaemonPort(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get ssh daemon port on %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the ssh port for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif port == \"\" {", + "\t\t\tcheck.LogInfo(\"Pod %q is not running an SSH daemon\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not running an SSH daemon\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tsshServicePortNumber, err := strconv.ParseInt(port, 10, 32)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not convert port %q from string to integer on Container %q\", port, cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the listening ports for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// 2. Check if SSH port is listening", + "\t\tsshPortInfo := netutil.PortInfo{PortNumber: int32(sshServicePortNumber), Protocol: sshServicePortProtocol}", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get the listening ports for Pod %q, err: %v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the listening ports for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif _, ok := listeningPorts[sshPortInfo]; ok {", + "\t\t\tcheck.LogError(\"Pod %q is running an SSH daemon\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is running an SSH daemon\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is not running an SSH daemon\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not running an SSH daemon\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testPodClusterRoleBindings", + "kind": "function", + "source": [ + "func testPodClusterRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tresult, roleRefName, err := put.IsUsingClusterRoleBinding(env.ClusterRoleBindings, check.GetLogger())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to determine if Pod %q is using a cluster role binding, err=%v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf(\"failed to determine if pod is using a cluster role binding: %v\", err), false).", + "\t\t\t\tAddField(testhelper.ClusterRoleName, roleRefName))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\ttopOwners, err := put.GetTopOwner()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get top owners of Pod %q, err=%v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf(\"Error getting top owners of this pod, err=%s\", err), false).", + "\t\t\t\tAddField(testhelper.ClusterRoleName, roleRefName))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcsvNamespace, csvName, isOwnedByClusterWideOperator := ownedByClusterWideOperator(topOwners, env)", + "\t\t// Pod is using a cluster role binding but is owned by a cluster wide operator, so it is ok", + "\t\tif isOwnedByClusterWideOperator \u0026\u0026 result {", + "\t\t\tcheck.LogInfo(\"Pod %q is using a cluster role binding but is owned by a cluster-wide operator (Csv %q, namespace %q)\", put, csvName, csvNamespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is using a cluster role binding but owned by a cluster-wide operator\", true))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif result {", + "\t\t\t// Pod was found to be using a cluster role binding. This is not allowed.", + "\t\t\t// Flagging this pod as a failed pod.", + "\t\t\tcheck.LogError(\"Pod %q is using a cluster role binding (roleRefName=%q)\", put, roleRefName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is using a cluster role binding\", false).", + "\t\t\t\tAddField(testhelper.ClusterRoleName, roleRefName))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcheck.LogInfo(\"Pod %q is not using a cluster role binding\", put)", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not using a cluster role binding\", true))", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testPodHostIPC", + "kind": "function", + "source": [ + "func testPodHostIPC(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.HostIPC {", + "\t\t\tcheck.LogError(\"HostIpc is set in Pod %q.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"HostIpc is set to true\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"HostIpc not set in Pod %q.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"HostIpc is not set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testPodHostNetwork", + "kind": "function", + "source": [ + "func testPodHostNetwork(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.HostNetwork {", + "\t\t\tcheck.LogError(\"Host network is set to true in Pod %q.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Host network is set to true\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Host network is set to false in Pod %q.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Host network is not set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testPodHostPID", + "kind": "function", + "source": [ + "func testPodHostPID(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.HostPID {", + "\t\t\tcheck.LogError(\"HostPid is set in Pod %q.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"HostPid is set to true\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"HostPid not set in Pod %q.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"HostPid is not set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testPodHostPath", + "kind": "function", + "source": [ + "func testPodHostPath(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tpodIsCompliant := true", + "\t\tfor idx := range put.Spec.Volumes {", + "\t\t\tvol := \u0026put.Spec.Volumes[idx]", + "\t\t\tif vol.HostPath != nil \u0026\u0026 vol.HostPath.Path != \"\" {", + "\t\t\t\tcheck.LogError(\"Hostpath path: %q is set in Pod %q.\", vol.HostPath.Path, put)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Hostpath path is set\", false).", + "\t\t\t\t\tSetType(testhelper.HostPathType).", + "\t\t\t\t\tAddField(testhelper.Path, vol.HostPath.Path))", + "\t\t\t\tpodIsCompliant = false", + "\t\t\t}", + "\t\t}", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogError(\"Hostpath path not set in Pod %q.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Hostpath path is not set\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testPodRoleBindings", + "kind": "function", + "source": [ + "func testPodRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tpodIsCompliant := true", + "\t\tif put.Spec.ServiceAccountName == defaultServiceAccount {", + "\t\t\tcheck.LogError(\"Pod %q has an empty or default serviceAccountName\", put)", + "\t\t\t// Add the pod to the non-compliant list", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\"The serviceAccountName is either empty or default\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has a serviceAccountName %q, checking role bindings.\", put, put.Spec.ServiceAccountName)", + "\t\t\t// Loop through the rolebindings and check if they are from another namespace", + "\t\t\tfor rbIndex := range env.RoleBindings {", + "\t\t\t\t// Short circuit if the role binding and the pod are in the same namespace.", + "\t\t\t\tif env.RoleBindings[rbIndex].Namespace == put.Namespace {", + "\t\t\t\t\tcheck.LogInfo(\"Pod %q and the role binding are in the same namespace\", put)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\t// If we make it to this point, the role binding and the pod are in different namespaces.", + "\t\t\t\t// We must check if the pod's service account is in the role binding's subjects.", + "\t\t\t\tfound := false", + "\t\t\t\tfor _, subject := range env.RoleBindings[rbIndex].Subjects {", + "\t\t\t\t\t// If the subject is a service account and the service account is in the same namespace as one of the CNF's namespaces, then continue, this is allowed", + "\t\t\t\t\tif subject.Kind == rbacv1.ServiceAccountKind \u0026\u0026", + "\t\t\t\t\t\tsubject.Namespace == put.Namespace \u0026\u0026", + "\t\t\t\t\t\tsubject.Name == put.Spec.ServiceAccountName \u0026\u0026", + "\t\t\t\t\t\tstringhelper.StringInSlice[string](env.Namespaces, env.RoleBindings[rbIndex].Namespace, false) {", + "\t\t\t\t\t\tcontinue", + "\t\t\t\t\t}", + "", + "\t\t\t\t\t// Finally, if the subject is a service account and the service account is in the same namespace as the pod, then we have a failure", + "\t\t\t\t\tif subject.Kind == rbacv1.ServiceAccountKind \u0026\u0026", + "\t\t\t\t\t\tsubject.Namespace == put.Namespace \u0026\u0026", + "\t\t\t\t\t\tsubject.Name == put.Spec.ServiceAccountName {", + "\t\t\t\t\t\tcheck.LogError(\"Pod %q has the following role bindings that do not live in one of the CNF namespaces: %q\", put, env.RoleBindings[rbIndex].Name)", + "", + "\t\t\t\t\t\t// Add the pod to the non-compliant list", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\t\t\"The role bindings used by this pod do not live in one of the CNF namespaces\", false).", + "\t\t\t\t\t\t\t\tAddField(testhelper.RoleBindingName, env.RoleBindings[rbIndex].Name).", + "\t\t\t\t\t\t\t\tAddField(testhelper.RoleBindingNamespace, env.RoleBindings[rbIndex].Namespace).", + "\t\t\t\t\t\t\t\tAddField(testhelper.ServiceAccountName, put.Spec.ServiceAccountName).", + "\t\t\t\t\t\t\t\tSetType(testhelper.PodRoleBinding))", + "\t\t\t\t\t\tfound = true", + "\t\t\t\t\t\tpodIsCompliant = false", + "\t\t\t\t\t\tbreak", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\t// Break of out the loop if we found a role binding that is out of namespace", + "\t\t\t\tif found {", + "\t\t\t\t\tbreak", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// Add pod to the compliant object list", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogInfo(\"All the role bindings used by Pod %q (applied by the service accounts) live in one of the CNF namespaces\", put)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"All the role bindings used by this pod (applied by the service accounts) live in one of the CNF namespaces\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testPodServiceAccount", + "kind": "function", + "source": [ + "func testPodServiceAccount(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.ServiceAccountName == defaultServiceAccount {", + "\t\t\tcheck.LogError(\"Pod %q does not have a valid service account name (uses the default service account instead).\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod does not have a valid service account name\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has a valid service account name\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has a service account name\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testSecConReadOnlyFilesystem", + "kind": "function", + "source": [ + "func testSecConReadOnlyFilesystem(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, pod := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q in namespace %q\", pod.Name, pod.Namespace)", + "\t\tfor _, cut := range pod.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q in Pod %q\", cut.Name, pod.Name)", + "\t\t\tif cut.IsReadOnlyRootFilesystem(check.GetLogger()) {", + "\t\t\t\tcheck.LogInfo(\"Container %q in Pod %q has a read-only root filesystem.\", cut.Name, pod.Name)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Container has a read-only root filesystem\", true))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogError(\"Container %q in Pod %q does not have a read-only root filesystem.\", cut.Name, pod.Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Container does not have a read-only root filesystem\", false))", + "\t\t\t}", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testSecConRunAsNonRoot", + "kind": "function", + "source": [ + "func testSecConRunAsNonRoot(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing pod %s/%s\", put.Namespace, put.Name)", + "\t\tnonCompliantContainers, nonComplianceReason := put.GetRunAsNonRootFalseContainers(knownContainersToSkip)", + "\t\tif len(nonCompliantContainers) == 0 {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is configured with RunAsNonRoot=true or RunAsUser!=0 at pod or container level.\", true))", + "\t\t} else {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"One or more containers of the pod are running with root user\", false))", + "\t\t\tfor index := range nonCompliantContainers {", + "\t\t\t\tcheck.LogError(\"Pod %s/%s, container %q is not compliant: %s\", put.Namespace, put.Name, nonCompliantContainers[index].Name, nonComplianceReason[index])", + "", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name, nonCompliantContainers[index].Name,", + "\t\t\t\t\tnonComplianceReason[index], false))", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testSysPtraceCapability", + "kind": "function", + "source": [ + "func testSysPtraceCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.GetShareProcessNamespacePods() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tsysPtraceEnabled := false", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tif cut.SecurityContext == nil ||", + "\t\t\t\tcut.SecurityContext.Capabilities == nil ||", + "\t\t\t\tlen(cut.SecurityContext.Capabilities.Add) == 0 {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif stringhelper.StringInSlice(cut.SecurityContext.Capabilities.Add, \"SYS_PTRACE\", false) {", + "\t\t\t\tcheck.LogInfo(\"Container %q defines the SYS_PTRACE capability\", cut)", + "\t\t\t\tsysPtraceEnabled = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !sysPtraceEnabled {", + "\t\t\tcheck.LogError(\"Pod %q has process namespace sharing enabled but no container allowing the SYS_PTRACE capability.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has process namespace sharing enabled but no container allowing the SYS_PTRACE capability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has process namespace sharing enabled and at least one container allowing the SYS_PTRACE capability\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has process namespace sharing enabled and at least one container allowing the SYS_PTRACE capability\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "testHelmVersion", + "kind": "function", + "source": [ + "func testHelmVersion(check *checksdb.Check) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tclients := clientsholder.GetClientsHolder()", + "\t// Get the Tiller pod in the specified namespace", + "\tpodList, err := clients.K8sClient.CoreV1().Pods(\"\").List(context.TODO(), metav1.ListOptions{", + "\t\tLabelSelector: \"app=helm,name=tiller\",", + "\t})", + "\tif err != nil {", + "\t\tcheck.LogError(\"Could not get Tiller pod, err=%v\", err)", + "\t}", + "", + "\tif len(podList.Items) == 0 {", + "\t\tcheck.LogInfo(\"Tiller pod not found in any namespaces. Helm version is v3.\")", + "\t\tfor _, helm := range env.HelmChartReleases {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, \"helm chart was installed with helm v3\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.LogError(\"Tiller pod found, Helm version is v2 but v3 required\")", + "\tfor i := range podList.Items {", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(podList.Items[i].Namespace, podList.Items[i].Name,", + "\t\t\t\"This pod is a Tiller pod. Helm Chart version is v2 but needs to be v3 due to the security risks associated with Tiller\", false))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testAffinityRequiredPods", + "kind": "function", + "source": [ + "func testAffinityRequiredPods(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.GetAffinityRequiredPods() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\t// Check if the pod is Affinity compliant.", + "\t\tresult, err := put.IsAffinityCompliant()", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Pod %q is not Affinity compliant, reason=%v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not Affinity compliant\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is Affinity compliant\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is Affinity compliant\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testCPUIsolation", + "kind": "function", + "source": [ + "func testCPUIsolation(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Individual requirements we are looking for:", + "\t// - CPU Requests and Limits must be in the form of whole units", + "\t// - Resource Requests and Limits must be provided and identical", + "", + "\t// Additional checks if the above pass", + "\t// - 'runtimeClassName' must be specified", + "\t// - Annotations must be provided disabling CPU and IRQ load-balancing.", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.GetGuaranteedPodsWithExclusiveCPUs() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif !put.IsCPUIsolationCompliant() {", + "\t\t\tcheck.LogError(\"Pod %q is not CPU isolated\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not CPU isolated\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is CPU isolated\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is CPU isolated\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodNodeSelectorAndAffinityBestPractices", + "kind": "function", + "source": [ + "func testPodNodeSelectorAndAffinityBestPractices(testPods []*provider.Pod, check *checksdb.Check) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range testPods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tcompliantPod := true", + "\t\tif put.HasNodeSelector() {", + "\t\t\tcheck.LogError(\"Pod %q has a node selector. Node selector: %v\", put, put.Spec.NodeSelector)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has node selector\", false))", + "\t\t\tcompliantPod = false", + "\t\t}", + "\t\tif put.Spec.Affinity != nil \u0026\u0026 put.Spec.Affinity.NodeAffinity != nil {", + "\t\t\tcheck.LogError(\"Pod %q has a node affinity clause. Node affinity: %v\", put, put.Spec.Affinity.NodeAffinity)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has node affinity\", false))", + "\t\t\tcompliantPod = false", + "\t\t}", + "", + "\t\tif compliantPod {", + "\t\t\tcheck.LogInfo(\"Pod %q has no node selector or affinity\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has no node selector or affinity\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodPersistentVolumeReclaimPolicy", + "kind": "function", + "source": [ + "func testPodPersistentVolumeReclaimPolicy(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Look through all of the pods, matching their persistent volumes to the list of overall cluster PVs and checking their reclaim status.", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tcompliantPod := true", + "\t\t// Loop through all of the volumes attached to the pod.", + "\t\tfor pvIndex := range put.Spec.Volumes {", + "\t\t\t// Skip any volumes that do not have a PVC. No need to test them.", + "\t\t\tif put.Spec.Volumes[pvIndex].PersistentVolumeClaim == nil {", + "\t\t\t\tcheck.LogInfo(\"Pod %q does not have a PVC\", put)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// If the Pod Volume is not tied back to a PVC and corresponding PV that has a reclaim policy of DELETE.", + "\t\t\tif !volumes.IsPodVolumeReclaimPolicyDelete(\u0026put.Spec.Volumes[pvIndex], env.PersistentVolumes, env.PersistentVolumeClaims) {", + "\t\t\t\tcheck.LogError(\"Pod %q with volume %q has been found without a reclaim policy of DELETE.\", put, put.Spec.Volumes[pvIndex].Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod contains volume without a reclaim policy of DELETE\", false).", + "\t\t\t\t\tAddField(testhelper.PersistentVolumeName, put.Spec.Volumes[pvIndex].Name).", + "\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\tcompliantPod = false", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif compliantPod {", + "\t\t\tcheck.LogInfo(\"Pod %q complies with volume reclaim policy rules\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod complies with volume reclaim policy rules\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodTolerationBypass", + "kind": "function", + "source": [ + "func testPodTolerationBypass(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tpodIsCompliant := true", + "\t\tfor _, t := range put.Spec.Tolerations {", + "\t\t\t// Check if the tolerations fall outside the 'default' and are modified versions", + "\t\t\t// Take also into account the qosClass applied to the pod", + "\t\t\tif tolerations.IsTolerationModified(t, put.Status.QOSClass) {", + "\t\t\t\tcheck.LogError(\"Pod %q has been found with non-default toleration %s/%s which is not allowed.\", put, t.Key, t.Effect)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has non-default toleration\", false).", + "\t\t\t\t\tAddField(testhelper.TolerationKey, t.Key).", + "\t\t\t\t\tAddField(testhelper.TolerationEffect, string(t.Effect)))", + "\t\t\t\tpodIsCompliant = false", + "\t\t\t}", + "\t\t}", + "", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogInfo(\"Pod %q has default toleration\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has default toleration\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodsOwnerReference", + "kind": "function", + "source": [ + "func testPodsOwnerReference(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\to := ownerreference.NewOwnerReference(put.Pod)", + "\t\to.RunTest(check.GetLogger())", + "\t\tif o.GetResults() != testhelper.SUCCESS {", + "\t\t\tcheck.LogError(\"Pod %q found with non-compliant owner reference\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has non-compliant owner reference\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has compliant owner reference\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has compliant owner reference\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodsRecreation", + "kind": "function", + "source": [ + "func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tneedsPostMortemInfo := true", + "\tdefer func() {", + "\t\tif needsPostMortemInfo {", + "\t\t\tcheck.LogDebug(\"%s\", postmortem.Log())", + "\t\t}", + "\t\t// Since we are possible exiting early, we need to make sure we set the result at the end of the function.", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}()", + "\tcheck.LogInfo(\"Testing node draining effect of deployment\")", + "\tcheck.LogInfo(\"Testing initial state for deployments\")", + "\tdefer env.SetNeedsRefresh()", + "", + "\t// Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check.", + "\t// timeout = k-mins + (1min * (num-deployments + num-statefulsets))", + "\tallPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets))", + "\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout, check.GetLogger())", + "\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\tfor _, dep := range notReadyDeployments {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment was not ready before draining any node.\", false))", + "\t\t}", + "\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset was not ready before draining any node.\", false))", + "\t\t}", + "\t\treturn", + "\t}", + "", + "\t// Filter out pods with Node Assignments present and FAIL them.", + "\t// We run into problems with this test when there are nodeSelectors assigned affecting where", + "\t// pods are scheduled. Also, they are not allowed in general, see the node-selector test case.", + "\t// Skip the safeguard for any pods that are using a runtimeClassName. This is potentially", + "\t// because pods that are derived from a performance profile might have a built-in nodeSelector.", + "\tvar podsWithNodeAssignment []*provider.Pod", + "\tfor _, put := range env.Pods {", + "\t\tif !put.IsRuntimeClassNameSpecified() \u0026\u0026 put.HasNodeSelector() {", + "\t\t\tpodsWithNodeAssignment = append(podsWithNodeAssignment, put)", + "\t\t\tcheck.LogError(\"Pod %q has been found with node selector(s): %v\", put, put.Spec.NodeSelector)", + "\t\t}", + "\t}", + "\tif len(podsWithNodeAssignment) \u003e 0 {", + "\t\tcheck.LogError(\"Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v\", podsWithNodeAssignment)", + "\t\tfor _, pod := range podsWithNodeAssignment {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has node assignment.\", false))", + "\t\t}", + "", + "\t\treturn", + "\t}", + "", + "\tfor nodeName := range podsets.GetAllNodesForAllPodSets(env.Pods) {", + "\t\tdefer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node", + "\t\terr := podrecreation.CordonHelper(nodeName, podrecreation.Cordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Error cordoning the node: %s\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node cordoning failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tcheck.LogInfo(\"Draining and Cordoning node %s: \", nodeName)", + "\t\tcount, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Getting pods list to drain failed, err=%v\", err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Getting pods list to drain failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tnodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count)", + "\t\tcheck.LogDebug(\"Draining node: %s with timeout: %s\", nodeName, nodeTimeout)", + "\t\t_, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Draining node %q failed, err=%v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Draining node failed\", false))", + "\t\t\treturn", + "\t\t}", + "", + "\t\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout, check.GetLogger())", + "\t\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\t\tfor _, dep := range notReadyDeployments {", + "\t\t\t\tcheck.LogError(\"Deployment %q not ready after draining node %q\", dep.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q not ready after draining node %q\", sts.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\treturn", + "\t\t}", + "", + "\t\terr = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogFatal(\"Error uncordoning the node: %s\", nodeName)", + "\t\t}", + "\t}", + "", + "\t// If everything went well for all nodes, the nonCompliantObjects should be empty. We need to", + "\t// manually add all the deps/sts into the compliant object lists so the check is marked as skipped.", + "\t// ToDo: Improve this.", + "\tif len(nonCompliantObjects) == 0 {", + "\t\tfor _, dep := range env.Deployments {", + "\t\t\tcheck.LogInfo(\"Deployment's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "", + "\t\tfor _, sts := range env.StatefulSets {", + "\t\t\tcheck.LogInfo(\"Statefulset's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "\t}", + "", + "\tneedsPostMortemInfo = false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testStorageProvisioner", + "kind": "function", + "source": [ + "func testStorageProvisioner(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tconst localStorageProvisioner = \"kubernetes.io/no-provisioner\"", + "\tconst lvmProvisioner = \"topolvm.io\"", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tvar StorageClasses = env.StorageClassList", + "\tvar Pvc = env.PersistentVolumeClaims", + "\tsnoSingleLocalStorageProvisionner := \"\"", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tusesPvcAndStorageClass := false", + "\t\tfor pvIndex := range put.Spec.Volumes {", + "\t\t\t// Skip any nil persistentClaims.", + "\t\t\tvolume := put.Spec.Volumes[pvIndex]", + "\t\t\tif volume.PersistentVolumeClaim == nil {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\t// We have the list of pods/volumes/claims.", + "\t\t\t// Look through the storageClass list for a match.", + "\t\t\tfor i := range Pvc {", + "\t\t\t\tif Pvc[i].Name == put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName \u0026\u0026 Pvc[i].Namespace == put.Namespace {", + "\t\t\t\t\tfor j := range StorageClasses {", + "\t\t\t\t\t\tif Pvc[i].Spec.StorageClassName != nil \u0026\u0026 StorageClasses[j].Name == *Pvc[i].Spec.StorageClassName {", + "\t\t\t\t\t\t\tusesPvcAndStorageClass = true", + "\t\t\t\t\t\t\tcheck.LogDebug(\"Pod %q pvc_name: %s, storageclass_name: %s, provisioner_name: %s\", put, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName,", + "\t\t\t\t\t\t\t\tStorageClasses[j].Name, StorageClasses[j].Provisioner)", + "", + "\t\t\t\t\t\t\tif env.IsSNO() {", + "\t\t\t\t\t\t\t\t// For SNO, only one local storage provisionner is allowed. The first local storage provisioner for this pod is assumed to be the only local storage provisioner allowed in the cluster.", + "\t\t\t\t\t\t\t\tif snoSingleLocalStorageProvisionner == \"\" \u0026\u0026", + "\t\t\t\t\t\t\t\t\t(StorageClasses[j].Provisioner == localStorageProvisioner ||", + "\t\t\t\t\t\t\t\t\t\tStorageClasses[j].Provisioner == lvmProvisioner) {", + "\t\t\t\t\t\t\t\t\tsnoSingleLocalStorageProvisionner = StorageClasses[j].Provisioner", + "\t\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t\tif StorageClasses[j].Provisioner == snoSingleLocalStorageProvisionner {", + "\t\t\t\t\t\t\t\t\tcheck.LogInfo(\"Pod %q: Local storage (no provisioner or lvms) is recommended for SNO clusters.\", put)", + "\t\t\t\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Local storage (no provisioner or lvms) is recommended for SNO clusters.\", false).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t\t\tcontinue", + "\t\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t\tif StorageClasses[j].Provisioner == localStorageProvisioner || StorageClasses[j].Provisioner == lvmProvisioner {", + "\t\t\t\t\t\t\t\t\tcheck.LogError(\"Pod %q: A single type of local storage cluster is recommended for single node clusters. Use lvms or kubernetes noprovisioner, but not both.\", put)", + "\t\t\t\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\t\t\t\t\"A single type of local storage cluster is recommended for single node clusters. Use lvms or kubernetes noprovisioner, but not both.\", false).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t\t\tcontinue", + "\t\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t\tcheck.LogError(\"Pod %q: Non local storage not recommended in single node clusters.\", put)", + "\t\t\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Non local storage not recommended in single node clusters.\", false).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t} else {", + "\t\t\t\t\t\t\t\tif StorageClasses[j].Provisioner == localStorageProvisioner || StorageClasses[j].Provisioner == lvmProvisioner {", + "\t\t\t\t\t\t\t\t\tcheck.LogError(\"Pod %q: Local storage provisioner (no provisioner or lvms) not recommended in multinode clusters.\", put)", + "\t\t\t\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Local storage provisioner (no provisioner or lvms) not recommended in multinode clusters.\", false).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t\t\tcontinue", + "\t\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t\tcheck.LogInfo(\"Pod %q: Non local storage provisioner recommended in multinode clusters.\", put)", + "\t\t\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Non local storage provisioner recommended in multinode clusters.\", false).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t}", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\t// Save as compliant pod in case it's not using any of the existing PVC/StorageClasses of the cluster.", + "\t\t\t// Otherwise, in this cases the check will be marked as skipped.", + "\t\t\t// ToDo: improve this function.", + "\t\t\tif !usesPvcAndStorageClass {", + "\t\t\t\tcheck.LogInfo(\"Pod %q not configured to use local storage\", put)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod not configured to use local storage.\", true))", + "\t\t\t}", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testExecProbDenyAtCPUPinning", + "kind": "function", + "source": [ + "func testExecProbDenyAtCPUPinning(check *checksdb.Check, dpdkPods []*provider.Pod) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cpuPinnedPod := range dpdkPods {", + "\t\texecProbeFound := false", + "\t\tfor _, cut := range cpuPinnedPod.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t\tif cut.HasExecProbes() {", + "\t\t\t\tcheck.LogError(\"Container %q defines an exec probe\", cut)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(cpuPinnedPod.Namespace, cpuPinnedPod.Name, \"Exec prob is not allowed\", false))", + "\t\t\t\texecProbeFound = true", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !execProbeFound {", + "\t\t\tcheck.LogInfo(\"Pod %q does not define any exec probe\", cpuPinnedPod)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(cpuPinnedPod.Namespace, cpuPinnedPod.Name, \"Exec prob is allowed\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testNetworkAttachmentDefinitionSRIOVUsingMTU", + "kind": "function", + "source": [ + "func testNetworkAttachmentDefinitionSRIOVUsingMTU(check *checksdb.Check, sriovPods []*provider.Pod) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, pod := range sriovPods {", + "\t\tresult, err := pod.IsUsingSRIOVWithMTU()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to check if pod %q uses SRIOV with MTU, err: %v\", pod, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Failed to check if pod uses SRIOV with MTU\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif result {", + "\t\t\tcheck.LogInfo(\"Pod %q uses SRIOV with MTU\", pod)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod uses SRIOV with MTU\", true))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Pod %q uses SRIOV but the MTU is not set explicitly\", pod)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod uses SRIOV but the MTU is not set explicitly\", false))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testNetworkPolicyDenyAll", + "kind": "function", + "source": [ + "func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through the pods, looking for corresponding entries within a deny-all network policy (both ingress and egress).", + "\t// This ensures that each pod is accounted for that we are tasked with testing and excludes any pods that are not marked", + "\t// for testing (via the labels).", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tdenyAllEgressFound := false", + "\t\tdenyAllIngressFound := false", + "", + "\t\t// Look through all of the network policies for a matching namespace.", + "\t\tfor index := range env.NetworkPolicies {", + "\t\t\tnetworkPolicy := env.NetworkPolicies[index]", + "\t\t\tcheck.LogInfo(\"Testing Network policy %q against pod %q\", networkPolicy.Name, put)", + "", + "\t\t\t// Skip any network policies that don't match the namespace of the pod we are testing.", + "\t\t\tif networkPolicy.Namespace != put.Namespace {", + "\t\t\t\tcheck.LogInfo(\"Skipping Network policy %q (namespace %q does not match Pod namespace %q)\", networkPolicy.Name, networkPolicy.Namespace, put.Namespace)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Match the pod namespace with the network policy namespace.", + "\t\t\tif policies.LabelsMatch(networkPolicy.Spec.PodSelector, put.Labels) {", + "\t\t\t\tvar reason string", + "\t\t\t\tif !denyAllEgressFound {", + "\t\t\t\t\tdenyAllEgressFound, reason = policies.IsNetworkPolicyCompliant(\u0026networkPolicy, networkingv1.PolicyTypeEgress)", + "\t\t\t\t\tif reason != \"\" {", + "\t\t\t\t\t\tcheck.LogError(\"Network policy %q is not compliant, reason=%q\", networkPolicy.Name, reason)", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tif !denyAllIngressFound {", + "\t\t\t\t\tdenyAllIngressFound, reason = policies.IsNetworkPolicyCompliant(\u0026networkPolicy, networkingv1.PolicyTypeIngress)", + "\t\t\t\t\tif reason != \"\" {", + "\t\t\t\t\t\tcheck.LogError(\"Network policy %q is not compliant, reason=%q\", networkPolicy.Name, reason)", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Network policy has not been found that contains a deny-all rule for both ingress and egress.", + "\t\tpodIsCompliant := true", + "\t\tif !denyAllIngressFound {", + "\t\t\tcheck.LogError(\"Pod %q was found to not have a default ingress deny-all network policy.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod was found to not have a default ingress deny-all network policy\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t}", + "", + "\t\tif !denyAllEgressFound {", + "\t\t\tcheck.LogError(\"Pod %q was found to not have a default egress deny-all network policy.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod was found to not have a default egress deny-all network policy\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t}", + "", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogInfo(\"Pod %q has a default ingress/egress deny-all network policy\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has a default ingress/egress deny-all network policy\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testRestartOnRebootLabelOnPodsUsingSriov", + "kind": "function", + "source": [ + "func testRestartOnRebootLabelOnPodsUsingSriov(check *checksdb.Check, sriovPods []*provider.Pod) {", + "\tconst (", + "\t\trestartOnRebootLabel = \"restart-on-reboot\"", + "\t)", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, pod := range sriovPods {", + "\t\tcheck.LogInfo(\"Testing SRIOV Pod %q\", pod)", + "", + "\t\tlabelValue, exist := pod.GetLabels()[restartOnRebootLabel]", + "\t\tif !exist {", + "\t\t\tcheck.LogError(\"Pod %q uses SRIOV but the label %q was not found.\", pod, restartOnRebootLabel)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf(\"Pod uses SRIOV but the label %s was not found\", restartOnRebootLabel), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif labelValue != \"true\" {", + "\t\t\tcheck.LogError(\"Pod %q uses SRIOV but the %q label value is not true.\", pod, restartOnRebootLabel)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf(\"Pod uses SRIOV but the label %s is not set to true\", restartOnRebootLabel), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Pod %q uses SRIOV and the %q label is set to true\", pod, restartOnRebootLabel)", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf(\"Pod uses SRIOV and the label %s is set to true\", restartOnRebootLabel), true))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testUndeclaredContainerPortsUsage", + "kind": "function", + "source": [ + "func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tvar portInfo netutil.PortInfo", + "\tfor _, put := range env.Pods {", + "\t\t// First get the ports declared in the Pod's containers spec", + "\t\tdeclaredPorts := make(map[netutil.PortInfo]bool)", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t\tfor _, port := range cut.Ports {", + "\t\t\t\tportInfo.PortNumber = port.ContainerPort", + "\t\t\t\tportInfo.Protocol = string(port.Protocol)", + "\t\t\t\tdeclaredPorts[portInfo] = true", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Then check the actual ports that the containers are listening on", + "\t\tfirstPodContainer := put.Containers[0]", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(firstPodContainer)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get container %q listening ports, err: %v\", firstPodContainer, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf(\"Failed to get the container's listening ports, err: %v\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(listeningPorts) == 0 {", + "\t\t\tcheck.LogInfo(\"None of the containers of %q have any listening port.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"None of the containers have any listening ports\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Verify that all the listening ports have been declared in the container spec", + "\t\tfailedPod := false", + "\t\tfor listeningPort := range listeningPorts {", + "\t\t\tif put.ContainsIstioProxy() \u0026\u0026 netcommons.ReservedIstioPorts[listeningPort.PortNumber] {", + "\t\t\t\tcheck.LogInfo(\"%q is listening on port %d protocol %q, but the pod also contains istio-proxy. Ignoring.\",", + "\t\t\t\t\tput, listeningPort.PortNumber, listeningPort.Protocol)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tif ok := declaredPorts[listeningPort]; !ok {", + "\t\t\t\tcheck.LogError(\"%q is listening on port %d protocol %q, but that port was not declared in any container spec.\",", + "\t\t\t\t\tput, listeningPort.PortNumber, listeningPort.Protocol)", + "\t\t\t\tfailedPod = true", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\"Listening port was declared in no container spec\", false).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(listeningPort.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, listeningPort.Protocol))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"%q is listening on declared port %d protocol %q\", put, listeningPort.PortNumber, listeningPort.Protocol)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\"Listening port was declared in container spec\", true).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(listeningPort.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, listeningPort.Protocol))", + "\t\t\t}", + "\t\t}", + "\t\tif failedPod {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"At least one port was listening but not declared in any container specs\", false))", + "\t\t} else {", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"All listening were declared in containers specs\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "findRoguePodsListeningToPorts", + "kind": "function", + "source": [ + "func findRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, put := range pods {", + "\t\tlogger.Info(\"Testing Pod %q\", put)", + "\t\tcompliantObjectsEntries, nonCompliantObjectsEntries := findRogueContainersDeclaringPorts(put.Containers, portsToTest, portsOrigin, logger)", + "\t\tnonCompliantPortFound := len(nonCompliantObjectsEntries) \u003e 0", + "\t\tcompliantObjects = append(compliantObjects, compliantObjectsEntries...)", + "\t\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantObjectsEntries...)", + "\t\tcut := put.Containers[0]", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(cut)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get the listening ports on %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\tfmt.Sprintf(\"Failed to get the listening ports on pod, err: %v\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor port := range listeningPorts {", + "\t\t\tif ok := portsToTest[port.PortNumber]; ok {", + "\t\t\t\t// If pod contains an \"istio-proxy\" container, we need to make sure that the ports returned", + "\t\t\t\t// overlap with the known istio ports", + "\t\t\t\tif put.ContainsIstioProxy() \u0026\u0026 ReservedIstioPorts[port.PortNumber] {", + "\t\t\t\t\tlogger.Info(\"%q was found to be listening to port %d due to istio-proxy being present. Ignoring.\", put, port.PortNumber)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlogger.Error(\"%q has one container (%q) listening on port %d (%s) that has been reserved\", put, cut.Name, port.PortNumber, port.Protocol)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Pod Listens to %s reserved port in %v\", portsOrigin, portsToTest), false).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, port.Protocol))", + "\t\t\t\tnonCompliantPortFound = true", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"%q listens in %s unreserved port %d (%s)\", put, portsOrigin, port.PortNumber, port.Protocol)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Pod Listens to port not in %s reserved port %v\", portsOrigin, portsToTest), true).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, port.Protocol))", + "\t\t\t}", + "\t\t}", + "\t\tif nonCompliantPortFound {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\tfmt.Sprintf(\"Pod listens to or its containers declares some %s reserved port in %v\", portsOrigin, portsToTest), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcompliantObjects = append(compliantObjects,", + "\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\tfmt.Sprintf(\"Pod does not listen to or declare any %s reserved port in %v\", portsOrigin, portsToTest), true))", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorPodsNoHugepages", + "kind": "function", + "source": [ + "func testOperatorPodsNoHugepages(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor csv, pods := range env.CSVToPodListMap {", + "\t\tCsvResult := SplitCsv(csv)", + "\t\tcheck.LogInfo(\"Name of csv: %q in namespaces: %q\", CsvResult.NameCsv, CsvResult.Namespace)", + "\t\tfor _, pod := range pods {", + "\t\t\tcheck.LogInfo(\"Testing Pod %q in namespace %q\", pod.Name, pod.Namespace)", + "\t\t\tif pod.HasHugepages() {", + "\t\t\t\tcheck.LogError(\"Pod %q in namespace %q has hugepages\", pod.Name, pod.Namespace)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has hugepages\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Pod %q in namespace %q has no hugepages\", pod.Name, pod.Namespace)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has no hugepages\", true))", + "\t\t\t}", + "\t\t}", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testExclusiveCPUPool", + "kind": "function", + "source": [ + "func testExclusiveCPUPool(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tnBExclusiveCPUPoolContainers := 0", + "\t\tnBSharedCPUPoolContainers := 0", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tif resources.HasExclusiveCPUsAssigned(cut, check.GetLogger()) {", + "\t\t\t\tnBExclusiveCPUPoolContainers++", + "\t\t\t} else {", + "\t\t\t\tnBSharedCPUPoolContainers++", + "\t\t\t}", + "\t\t}", + "", + "\t\tif nBExclusiveCPUPoolContainers \u003e 0 \u0026\u0026 nBSharedCPUPoolContainers \u003e 0 {", + "\t\t\texclusiveStr := strconv.Itoa(nBExclusiveCPUPoolContainers)", + "\t\t\tsharedStr := strconv.Itoa(nBSharedCPUPoolContainers)", + "", + "\t\t\tcheck.LogError(\"Pod %q has containers whose CPUs belong to different pools. Containers in the shared cpu pool: %d \"+", + "\t\t\t\t\"Containers in the exclusive cpu pool: %d\", put, nBSharedCPUPoolContainers, nBExclusiveCPUPoolContainers)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has containers whose CPUs belong to different pools\", false).", + "\t\t\t\tAddField(\"SharedCPUPoolContainers\", sharedStr).", + "\t\t\t\tAddField(\"ExclusiveCPUPoolContainers\", exclusiveStr))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has no containers whose CPUs belong to different pools\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has no containers whose CPUs belong to different pools\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testIsSELinuxEnforcing", + "kind": "function", + "source": [ + "func testIsSELinuxEnforcing(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tconst (", + "\t\tgetenforceCommand = `chroot /host getenforce`", + "\t\tenforcingString = \"Enforcing\\n\"", + "\t)", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\to := clientsholder.GetClientsHolder()", + "\tnodesFailed := 0", + "\tnodesError := 0", + "\tfor _, probePod := range env.ProbePods {", + "\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, getenforceCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tcheck.LogError(\"Could not execute command %q in Probe Pod %q, errStr: %q, err: %v\", getenforceCommand, probePod, errStr, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(probePod.Namespace, probePod.Name, \"Failed to execute command\", false))", + "\t\t\tnodesError++", + "\t\t\tcontinue", + "\t\t}", + "\t\tif outStr != enforcingString {", + "\t\t\tcheck.LogError(\"Node %q is not running SELinux, %s command returned: %s\", probePod.Spec.NodeName, getenforceCommand, outStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(probePod.Spec.NodeName, \"SELinux is not enforced\", false))", + "\t\t\tnodesFailed++", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q is running SELinux\", probePod.Spec.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(probePod.Spec.NodeName, \"SELinux is enforced\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testPodHugePagesSize", + "kind": "function", + "source": [ + "func testPodHugePagesSize(check *checksdb.Check, env *provider.TestEnvironment, size string) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.GetHugepagesPods() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tresult := put.CheckResourceHugePagesSize(size)", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Pod %q has been found to be running with an incorrect hugepages size (expected size %q)\", put, size)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found to be running with an incorrect hugepages size\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has been found to be running with a correct hugepages size %q\", put, size)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found to be running with a correct hugepages size\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testServiceMesh", + "kind": "function", + "source": [ + "func testServiceMesh(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tistioProxyFound := false", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tif cut.IsIstioProxy() {", + "\t\t\t\tcheck.LogInfo(\"Istio proxy container found on Pod %q (Container %q)\", put, cut)", + "\t\t\t\tistioProxyFound = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !istioProxyFound {", + "\t\t\tcheck.LogError(\"Pod %q found without service mesh\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod found without service mesh container\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q found with service mesh\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod found with service mesh container\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "NewReportObject", + "qualifiedName": "NewReportObject", + "exported": true, + "signature": "func(string, string, bool)(*ReportObject)", + "doc": "NewReportObject Creates a report object with reason and type\n\nThis function initializes an empty ReportObject, sets its type field, and\nadds the provided reason as either a compliance or non‑compliance note\ndepending on the boolean flag. The resulting pointer is returned for further\naugmentation by caller functions.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:458", + "calls": [ + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCatalogSourceReportObject", + "kind": "function", + "source": [ + "func NewCatalogSourceReportObject(aNamespace, aCatalogSourceName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CatalogSourceType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aCatalogSourceName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCertifiedContainerReportObject", + "kind": "function", + "source": [ + "func NewCertifiedContainerReportObject(cii provider.ContainerImageIdentifier, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerImageType, isCompliant)", + "\tout.AddField(ImageDigest, cii.Digest)", + "\tout.AddField(ImageRepo, cii.Repository)", + "\tout.AddField(ImageTag, cii.Tag)", + "\tout.AddField(ImageRegistry, cii.Registry)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewClusterOperatorReportObject", + "kind": "function", + "source": [ + "func NewClusterOperatorReportObject(aClusterOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ClusterOperatorType, isCompliant)", + "\tout.AddField(Name, aClusterOperatorName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewClusterVersionReportObject", + "kind": "function", + "source": [ + "func NewClusterVersionReportObject(version, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OCPClusterType, isCompliant)", + "\tout.AddField(OCPClusterVersionType, version)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCrdReportObject", + "kind": "function", + "source": [ + "func NewCrdReportObject(aName, aVersion, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CustomResourceDefinitionType, isCompliant)", + "\tout.AddField(CustomResourceDefinitionName, aName)", + "\tout.AddField(CustomResourceDefinitionVersion, aVersion)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewDeploymentReportObject", + "kind": "function", + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewHelmChartReportObject", + "kind": "function", + "source": [ + "func NewHelmChartReportObject(aNamespace, aHelmChartName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, HelmType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aHelmChartName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedNamedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedNamedReportObject(aReason, aType string, isCompliant bool, aNamespace, aName string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace).AddField(Name, aName)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewStatefulSetReportObject", + "kind": "function", + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewTaintReportObject", + "kind": "function", + "source": [ + "func NewTaintReportObject(taintBit, nodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, TaintType, isCompliant)", + "\tout.AddField(NodeType, nodeName)", + "\tout.AddField(TaintBit, taintBit)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testContainerSCC", + "kind": "function", + "source": [ + "func testContainerSCC(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\thighLevelCat := securitycontextcontainer.CategoryID1", + "\tfor _, pod := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", pod)", + "\t\tlistCategory := securitycontextcontainer.CheckPod(pod)", + "\t\tfor _, cat := range listCategory {", + "\t\t\tif cat.Category \u003e securitycontextcontainer.CategoryID1NoUID0 {", + "\t\t\t\tcheck.LogError(\"Category %q is NOT category 1 or category NoUID0\", cat)", + "\t\t\t\taContainerOut := testhelper.NewContainerReportObject(cat.NameSpace, cat.Podname, cat.Containername, \"container category is NOT category 1 or category NoUID0\", false).", + "\t\t\t\t\tSetType(testhelper.ContainerCategory).", + "\t\t\t\t\tAddField(testhelper.Category, cat.Category.String())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, aContainerOut)", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Category %q is category 1 or category NoUID0\", cat)", + "\t\t\t\taContainerOut := testhelper.NewContainerReportObject(cat.NameSpace, cat.Podname, cat.Containername, \"container category is category 1 or category NoUID0\", true).", + "\t\t\t\t\tSetType(testhelper.ContainerCategory).", + "\t\t\t\t\tAddField(testhelper.Category, cat.Category.String())", + "\t\t\t\tcompliantObjects = append(compliantObjects, aContainerOut)", + "\t\t\t}", + "\t\t\tif cat.Category \u003e highLevelCat {", + "\t\t\t\thighLevelCat = cat.Category", + "\t\t\t}", + "\t\t}", + "\t}", + "\taCNFOut := testhelper.NewReportObject(\"Overall CNF category\", testhelper.CnfType, false).AddField(testhelper.Category, highLevelCat.String())", + "\tcompliantObjects = append(compliantObjects, aCNFOut)", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testNamespace", + "kind": "function", + "source": [ + "func testNamespace(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, namespace := range env.Namespaces {", + "\t\tcheck.LogInfo(\"Testing namespace %q\", namespace)", + "\t\tnamespaceCompliant := true", + "\t\tfor _, invalidPrefix := range invalidNamespacePrefixes {", + "\t\t\tif strings.HasPrefix(namespace, invalidPrefix) {", + "\t\t\t\tcheck.LogError(\"Namespace %q has invalid prefix %q\", namespace, invalidPrefix)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"Namespace has invalid prefix\", testhelper.Namespace, false, namespace))", + "\t\t\t\tnamespaceCompliant = false", + "\t\t\t\tbreak // Break out of the loop if we find an invalid prefix", + "\t\t\t}", + "\t\t}", + "\t\tif namespaceCompliant {", + "\t\t\tcheck.LogInfo(\"Namespace %q has valid prefix\", namespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"Namespace has valid prefix\", testhelper.Namespace, true, namespace))", + "\t\t}", + "\t}", + "\tif failedNamespacesNum := len(nonCompliantObjects); failedNamespacesNum \u003e 0 {", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "", + "\tinvalidCrs, err := namespace.TestCrsNamespaces(env.Crds, env.Namespaces, check.GetLogger())", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error while testing CRs namespaces, err=%v\", err)", + "\t\treturn", + "\t}", + "", + "\tinvalidCrsNum := namespace.GetInvalidCRsNum(invalidCrs, check.GetLogger())", + "\tif invalidCrsNum \u003e 0 {", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"CRs are not in the configured namespaces\", testhelper.Namespace, false))", + "\t} else {", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"CRs are in the configured namespaces\", testhelper.Namespace, true))", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testNodePort", + "kind": "function", + "source": [ + "func testNodePort(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, s := range env.Services {", + "\t\tcheck.LogInfo(\"Testing %q\", services.ToString(s))", + "", + "\t\tif s.Spec.Type == nodePort {", + "\t\t\tcheck.LogError(\"Service %q (ns %q) type is nodePort\", s.Name, s.Namespace)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Service is type NodePort\", testhelper.ServiceType, false).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceMode, string(s.Spec.Type)))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Service %q (ns %q) type is not nodePort (type=%q)\", s.Name, s.Namespace, s.Spec.Type)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"Service is not type NodePort\", testhelper.ServiceType, true).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceMode, string(s.Spec.Type)))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testDualStackServices", + "kind": "function", + "source": [ + "func testDualStackServices(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, s := range env.Services {", + "\t\tcheck.LogInfo(\"Testing Service %q\", s.Name)", + "\t\tserviceIPVersion, err := services.GetServiceIPVersion(s)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get IP version from Service %q, err=%v\", s.Name, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Could not get IP Version from service\", testhelper.ServiceType, false).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name))", + "\t\t}", + "\t\tif serviceIPVersion == netcommons.Undefined || serviceIPVersion == netcommons.IPv4 {", + "\t\t\tcheck.LogError(\"Service %q (ns: %q) only supports IPv4\", s.Name, s.Namespace)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Service supports only IPv4\", testhelper.ServiceType, false).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceIPVersion, serviceIPVersion.String()))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Service %q (ns: %q) supports IPv6 or is dual stack\", s.Name, s.Namespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"Service supports IPv6 or is dual stack\", testhelper.ServiceType, true).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceIPVersion, serviceIPVersion.String()))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp", + "name": "RunNetworkingTests", + "kind": "function", + "source": [ + "func RunNetworkingTests( //nolint:funlen", + "\tnetsUnderTest map[string]netcommons.NetTestContext,", + "\tcount int,", + "\taIPVersion netcommons.IPVersion,", + "\tlogger *log.Logger) (report testhelper.FailureReasonOut, skip bool) {", + "\tlogger.Debug(\"%s\", netcommons.PrintNetTestContextMap(netsUnderTest))", + "\tskip = false", + "\tif len(netsUnderTest) == 0 {", + "\t\tlogger.Debug(\"There are no %q networks to test, skipping test\", aIPVersion)", + "\t\tskip = true", + "\t\treturn report, skip", + "\t}", + "\t// if no network can be tested, then we need to skip the test entirely.", + "\t// If at least one network can be tested (e.g. \u003e 2 IPs/ interfaces present), then we do not skip the test", + "\tatLeastOneNetworkTested := false", + "\tcompliantNets := map[string]int{}", + "\tnonCompliantNets := map[string]int{}", + "\tfor netName, netUnderTest := range netsUnderTest {", + "\t\tcompliantNets[netName] = 0", + "\t\tnonCompliantNets[netName] = 0", + "\t\tif len(netUnderTest.DestTargets) == 0 {", + "\t\t\tlogger.Debug(\"There are no containers to ping for %q network %q. A minimum of 2 containers is needed to run a ping test (a source and a destination) Skipping test\", aIPVersion, netName)", + "\t\t\tcontinue", + "\t\t}", + "\t\tatLeastOneNetworkTested = true", + "\t\tlogger.Debug(\"%q Ping tests on network %q. Number of target IPs: %d\", aIPVersion, netName, len(netUnderTest.DestTargets))", + "", + "\t\tfor _, aDestIP := range netUnderTest.DestTargets {", + "\t\t\tlogger.Debug(\"%q ping test on network %q from ( %q srcip: %q ) to ( %q dstip: %q )\",", + "\t\t\t\taIPVersion, netName,", + "\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP,", + "\t\t\t\taDestIP.ContainerIdentifier, aDestIP.IP)", + "\t\t\tresult, err := TestPing(netUnderTest.TesterSource.ContainerIdentifier, aDestIP, count)", + "\t\t\tlogger.Debug(\"Ping results: %q\", result)", + "\t\t\tlogger.Info(\"%q ping test on network %q from ( %q srcip: %q ) to ( %q dstip: %q ) result: %q\",", + "\t\t\t\taIPVersion, netName,", + "\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP,", + "\t\t\t\taDestIP.ContainerIdentifier, aDestIP.IP, result)", + "\t\t\tif err != nil {", + "\t\t\t\tlogger.Debug(\"Ping failed, err=%v\", err)", + "\t\t\t}", + "\t\t\tif result.outcome != testhelper.SUCCESS {", + "\t\t\t\tlogger.Error(\"Ping from %q (srcip: %q) to %q (dstip: %q) failed\",", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier,", + "\t\t\t\t\tnetUnderTest.TesterSource.IP,", + "\t\t\t\t\taDestIP.ContainerIdentifier,", + "\t\t\t\t\taDestIP.IP)", + "\t\t\t\tnonCompliantNets[netName]++", + "\t\t\t\tnonCompliantObject := testhelper.NewContainerReportObject(netUnderTest.TesterSource.ContainerIdentifier.Namespace,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Podname,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Name, \"Pinging destination container/IP from source container (identified by Namespace/Pod Name/Container Name) Failed\", false).", + "\t\t\t\t\tSetType(testhelper.ICMPResultType).", + "\t\t\t\t\tAddField(testhelper.NetworkName, netName).", + "\t\t\t\t\tAddField(testhelper.SourceIP, netUnderTest.TesterSource.IP).", + "\t\t\t\t\tAddField(testhelper.DestinationNamespace, aDestIP.ContainerIdentifier.Namespace).", + "\t\t\t\t\tAddField(testhelper.DestinationPodName, aDestIP.ContainerIdentifier.Podname).", + "\t\t\t\t\tAddField(testhelper.DestinationContainerName, aDestIP.ContainerIdentifier.Name).", + "\t\t\t\t\tAddField(testhelper.DestinationIP, aDestIP.IP)", + "\t\t\t\treport.NonCompliantObjectsOut = append(report.NonCompliantObjectsOut, nonCompliantObject)", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"Ping from %q (srcip: %q) to %q (dstip: %q) succeeded\",", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier,", + "\t\t\t\t\tnetUnderTest.TesterSource.IP,", + "\t\t\t\t\taDestIP.ContainerIdentifier,", + "\t\t\t\t\taDestIP.IP)", + "\t\t\t\tcompliantNets[netName]++", + "\t\t\t\tCompliantObject := testhelper.NewContainerReportObject(netUnderTest.TesterSource.ContainerIdentifier.Namespace,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Podname,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Name, \"Pinging destination container/IP from source container (identified by Namespace/Pod Name/Container Name) Succeeded\", true).", + "\t\t\t\t\tSetType(testhelper.ICMPResultType).", + "\t\t\t\t\tAddField(testhelper.NetworkName, netName).", + "\t\t\t\t\tAddField(testhelper.SourceIP, netUnderTest.TesterSource.IP).", + "\t\t\t\t\tAddField(testhelper.DestinationNamespace, aDestIP.ContainerIdentifier.Namespace).", + "\t\t\t\t\tAddField(testhelper.DestinationPodName, aDestIP.ContainerIdentifier.Podname).", + "\t\t\t\t\tAddField(testhelper.DestinationContainerName, aDestIP.ContainerIdentifier.Name).", + "\t\t\t\t\tAddField(testhelper.DestinationIP, aDestIP.IP)", + "\t\t\t\treport.CompliantObjectsOut = append(report.CompliantObjectsOut, CompliantObject)", + "\t\t\t}", + "\t\t}", + "\t\tif nonCompliantNets[netName] != 0 {", + "\t\t\tlogger.Error(\"ICMP tests failed for %d IP source/destination in this network\", nonCompliantNets[netName])", + "\t\t\treport.NonCompliantObjectsOut = append(report.NonCompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf(\"ICMP tests failed for %d IP source/destination in this network\", nonCompliantNets[netName]), testhelper.NetworkType, false).", + "\t\t\t\tAddField(testhelper.NetworkName, netName))", + "\t\t}", + "\t\tif compliantNets[netName] != 0 {", + "\t\t\tlogger.Info(\"ICMP tests were successful for all %d IP source/destination in this network\", compliantNets[netName])", + "\t\t\treport.CompliantObjectsOut = append(report.CompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf(\"ICMP tests were successful for all %d IP source/destination in this network\", compliantNets[netName]), testhelper.NetworkType, true).", + "\t\t\t\tAddField(testhelper.NetworkName, netName))", + "\t\t}", + "\t}", + "\tif !atLeastOneNetworkTested {", + "\t\tlogger.Debug(\"There are no %q networks to test, skipping test\", aIPVersion)", + "\t\tskip = true", + "\t}", + "", + "\treturn report, skip", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "evaluateAPICompliance", + "kind": "function", + "source": [ + "func evaluateAPICompliance(", + "\tserviceAccountToDeprecatedAPIs map[string]map[string]string,", + "\tkubernetesVersion string,", + "\tworkloadServiceAccountNames map[string]struct{}) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tversion, err := semver.NewVersion(kubernetesVersion)", + "\tif err != nil {", + "\t\tfmt.Printf(\"Failed to parse Kubernetes version %q: %v\", kubernetesVersion, err)", + "\t\treturn nil, nil", + "\t}", + "", + "\t// Increment the version to represent the next release for comparison", + "\tnextK8sVersion := version.IncMinor()", + "", + "\t// Iterate over each service account and its deprecated APIs", + "\tfor saName, deprecatedAPIs := range serviceAccountToDeprecatedAPIs {", + "\t\tfor apiName, removedInRelease := range deprecatedAPIs {", + "\t\t\tremovedVersion, err := semver.NewVersion(removedInRelease)", + "\t\t\tif err != nil {", + "\t\t\t\tfmt.Printf(\"Failed to parse Kubernetes version from APIRequestCount.status.removedInRelease: %s\\n\", err)", + "\t\t\t\t// Skip this API if the version parsing fails", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tisCompliantWithNextK8sVersion := removedVersion.Minor() \u003e nextK8sVersion.Minor()", + "", + "\t\t\t// Define reasons with version information", + "\t\t\tnonCompliantReason := fmt.Sprintf(\"API %s used by service account %s is NOT compliant with Kubernetes version %s, it will be removed in release %s\", apiName, saName, nextK8sVersion.String(), removedInRelease)", + "\t\t\tcompliantReason := fmt.Sprintf(\"API %s used by service account %s is compliant with Kubernetes version %s, it will be removed in release %s\", apiName, saName, nextK8sVersion.String(), removedInRelease)", + "", + "\t\t\tvar reportObject *testhelper.ReportObject", + "\t\t\tif isCompliantWithNextK8sVersion {", + "\t\t\t\treportObject = testhelper.NewReportObject(compliantReason, \"API\", true)", + "\t\t\t\treportObject.AddField(\"ActiveInRelease\", nextK8sVersion.String())", + "\t\t\t\tcompliantObjects = append(compliantObjects, reportObject)", + "\t\t\t} else {", + "\t\t\t\treportObject = testhelper.NewReportObject(nonCompliantReason, \"API\", false)", + "\t\t\t\treportObject.AddField(\"RemovedInRelease\", removedInRelease)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, reportObject)", + "\t\t\t}", + "", + "\t\t\treportObject.AddField(\"APIName\", apiName)", + "\t\t\treportObject.AddField(\"ServiceAccount\", saName)", + "\t\t}", + "\t}", + "", + "\t// Force the test to pass if both lists are empty", + "\tif len(compliantObjects) == 0 \u0026\u0026 len(nonCompliantObjects) == 0 {", + "\t\tfor saName := range workloadServiceAccountNames {", + "\t\t\treportObject := testhelper.NewReportObject(\"SA does not use any removed API\", \"ServiceAccount\", true).", + "\t\t\t\tAddField(\"Name\", saName)", + "\t\t\tcompliantObjects = append(compliantObjects, reportObject)", + "\t\t}", + "\t}", + "", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "testCrds", + "kind": "function", + "source": [ + "func testCrds(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, crd := range env.Crds {", + "\t\tcheck.LogInfo(\"Testing CRD: %s\", crd.Name)", + "\t\tfor _, ver := range crd.Spec.Versions {", + "\t\t\tif _, ok := ver.Schema.OpenAPIV3Schema.Properties[\"status\"]; !ok {", + "\t\t\t\tcheck.LogError(\"CRD: %s, version: %s does not have a status subresource\", crd.Name, ver.Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewReportObject(\"Crd does not have a status sub resource set\", testhelper.CustomResourceDefinitionType, false).", + "\t\t\t\t\t\tAddField(testhelper.CustomResourceDefinitionName, crd.Name).", + "\t\t\t\t\t\tAddField(testhelper.CustomResourceDefinitionVersion, ver.Name))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"CRD: %s, version: %s has a status subresource\", crd.Name, ver.Name)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewReportObject(\"Crd has a status sub resource set\", testhelper.CustomResourceDefinitionType, true).", + "\t\t\t\t\t\tAddField(testhelper.CustomResourceDefinitionName, crd.Name).", + "\t\t\t\t\t\tAddField(testhelper.CustomResourceDefinitionVersion, ver.Name))", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "testPodDisruptionBudgets", + "kind": "function", + "source": [ + "func testPodDisruptionBudgets(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through all of the of Deployments and StatefulSets and check if the PDBs are valid", + "\tfor _, d := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", d.ToString())", + "\t\tdeploymentSelector := labels.Set(d.Spec.Template.Labels)", + "\t\tpdbFound := false", + "\t\tfor pdbIndex := range env.PodDisruptionBudgets {", + "\t\t\tpdb := \u0026env.PodDisruptionBudgets[pdbIndex]", + "\t\t\tif pdb.Namespace != d.Namespace {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tpdbSelector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)", + "\t\t\tif err != nil {", + "\t\t\t\tcheck.LogError(\"Could not convert the PDB %q label selector to selector, err: %v\", pdbSelector, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif pdbSelector.Matches(deploymentSelector) {", + "\t\t\t\tpdbFound = true", + "\t\t\t\tif ok, err := pdbv1.CheckPDBIsValid(pdb, d.Spec.Replicas); !ok {", + "\t\t\t\t\tcheck.LogError(\"PDB %q is not valid for Deployment %q, err: %v\", pdb.Name, d.Name, err)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"Invalid PodDisruptionBudget config: %v\", err), testhelper.DeploymentType, false).", + "\t\t\t\t\t\tAddField(testhelper.DeploymentName, d.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, d.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"PDB %q is valid for Deployment: %q\", pdb.Name, d.Name)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"Deployment: references PodDisruptionBudget\", testhelper.DeploymentType, true).", + "\t\t\t\t\t\tAddField(testhelper.DeploymentName, d.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, d.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif !pdbFound {", + "\t\t\tcheck.LogError(\"Deployment %q is missing a corresponding PodDisruptionBudget\", d.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Deployment is missing a corresponding PodDisruptionBudget\", testhelper.DeploymentType, false).", + "\t\t\t\tAddField(testhelper.DeploymentName, d.Name).", + "\t\t\t\tAddField(testhelper.Namespace, d.Namespace))", + "\t\t}", + "\t}", + "", + "\tfor _, s := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", s.ToString())", + "\t\tstatefulSetSelector := labels.Set(s.Spec.Template.Labels)", + "\t\tpdbFound := false", + "\t\tfor pdbIndex := range env.PodDisruptionBudgets {", + "\t\t\tpdb := \u0026env.PodDisruptionBudgets[pdbIndex]", + "\t\t\tif pdb.Namespace != s.Namespace {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tpdbSelector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)", + "\t\t\tif err != nil {", + "\t\t\t\tcheck.LogError(\"Could not convert the PDB %q label selector to selector, err: %v\", pdbSelector, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif pdbSelector.Matches(statefulSetSelector) {", + "\t\t\t\tpdbFound = true", + "\t\t\t\tif ok, err := pdbv1.CheckPDBIsValid(pdb, s.Spec.Replicas); !ok {", + "\t\t\t\t\tcheck.LogError(\"PDB %q is not valid for StatefulSet %q, err: %v\", pdb.Name, s.Name, err)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"Invalid PodDisruptionBudget config: %v\", err), testhelper.StatefulSetType, false).", + "\t\t\t\t\t\tAddField(testhelper.StatefulSetName, s.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"PDB %q is valid for StatefulSet: %q\", pdb.Name, s.Name)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"StatefulSet: references PodDisruptionBudget\", testhelper.StatefulSetType, true).", + "\t\t\t\t\t\tAddField(testhelper.StatefulSetName, s.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif !pdbFound {", + "\t\t\tcheck.LogError(\"StatefulSet %q is missing a corresponding PodDisruptionBudget\", s.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"StatefulSet is missing a corresponding PodDisruptionBudget\", testhelper.StatefulSetType, false).", + "\t\t\t\tAddField(testhelper.StatefulSetName, s.Name).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testLimitedUseOfExecProbes", + "kind": "function", + "source": [ + "func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcounter := 0", + "\tfor _, put := range env.Pods {", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t\tif cut.LivenessProbe != nil \u0026\u0026 cut.LivenessProbe.Exec != nil {", + "\t\t\t\tcounter++", + "\t\t\t\tif cut.LivenessProbe.PeriodSeconds \u003e= minExecProbePeriodSeconds {", + "\t\t\t\t\tcheck.LogInfo(\"Container %q has a LivenessProbe with PeriodSeconds greater than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"LivenessProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\tcut.LivenessProbe.PeriodSeconds), true))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogError(\"Container %q has a LivenessProbe with PeriodSeconds less than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\ttesthelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"LivenessProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\t\tcut.LivenessProbe.PeriodSeconds), false))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tif cut.StartupProbe != nil \u0026\u0026 cut.StartupProbe.Exec != nil {", + "\t\t\t\tcounter++", + "\t\t\t\tif cut.StartupProbe.PeriodSeconds \u003e= minExecProbePeriodSeconds {", + "\t\t\t\t\tcheck.LogInfo(\"Container %q has a StartupProbe with PeriodSeconds greater than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"StartupProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\tcut.StartupProbe.PeriodSeconds), true))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogError(\"Container %q has a StartupProbe with PeriodSeconds less than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\ttesthelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"StartupProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\t\tcut.StartupProbe.PeriodSeconds), false))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tif cut.ReadinessProbe != nil \u0026\u0026 cut.ReadinessProbe.Exec != nil {", + "\t\t\t\tcounter++", + "\t\t\t\tif cut.ReadinessProbe.PeriodSeconds \u003e= minExecProbePeriodSeconds {", + "\t\t\t\t\tcheck.LogInfo(\"Container %q has a ReadinessProbe with PeriodSeconds greater than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"ReadinessProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\tcut.ReadinessProbe.PeriodSeconds), true))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogError(\"Container %q has a ReadinessProbe with PeriodSeconds less than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\ttesthelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"ReadinessProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\t\tcut.ReadinessProbe.PeriodSeconds), false))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// If there \u003e=10 exec probes, mark the entire cluster as a failure", + "\tif counter \u003e= maxNumberOfExecProbes {", + "\t\tcheck.LogError(\"CNF has 10 or more exec probes (nb-exec-probes=%d)\", counter)", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"CNF has 10 or more exec probes (%d exec probes)\", counter), testhelper.CnfType, false))", + "\t} else {", + "\t\tcheck.LogInfo(\"CNF has less than 10 exec probes (nb-exec-probes=%d)\", counter)", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"CNF has less than 10 exec probes (%d exec probes)\", counter), testhelper.CnfType, true))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "NewStatefulSetReportObject", + "qualifiedName": "NewStatefulSetReportObject", + "exported": true, + "signature": "func(string, string, string, bool)(*ReportObject)", + "doc": "NewStatefulSetReportObject Creates a report object for a StatefulSet\n\nIt builds a ReportObject with the type set to a constant representing\nStatefulSet, attaches compliance or non‑compliance reason, then adds\nnamespace and name fields. The function returns the fully populated\nReportObject for use in tests.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:432", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testHighAvailability", + "kind": "function", + "source": [ + "func testHighAvailability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, dp := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", dp.ToString())", + "\t\tif dp.Spec.Replicas == nil || *(dp.Spec.Replicas) \u003c= 1 {", + "\t\t\tcheck.LogError(\"Deployment %q found without valid high availability (number of replicas must be greater than 1)\", dp.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, \"Deployment found without valid high availability\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Skip any AffinityRequired pods", + "\t\t//nolint:goconst", + "\t\tif dp.Spec.Template.Labels[\"AffinityRequired\"] == \"true\" {", + "\t\t\tcheck.LogInfo(\"Skipping Deployment %q with affinity required\", dp.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif dp.Spec.Template.Spec.Affinity == nil ||", + "\t\t\tdp.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {", + "\t\t\tcheck.LogError(\"Deployment %q found without valid high availability (PodAntiAffinity must be defined)\", dp.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, \"Deployment found without valid high availability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q has valid high availability\", dp.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, \"Deployment has valid high availability\", true))", + "\t\t}", + "\t}", + "\tfor _, st := range env.StatefulSets {", + "\t\tif st.Spec.Replicas == nil || *(st.Spec.Replicas) \u003c= 1 {", + "\t\t\tcheck.LogError(\"StatefulSet %q found without valid high availability (number of replicas must be greater than 1)\", st.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, \"StatefulSet found without valid high availability\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Skip any AffinityRequired pods", + "\t\tif st.Spec.Template.Labels[\"AffinityRequired\"] == \"true\" {", + "\t\t\tcheck.LogInfo(\"Skipping StatefulSet %q with affinity required\", st.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif st.Spec.Template.Spec.Affinity == nil ||", + "\t\t\tst.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {", + "\t\t\tcheck.LogError(\"StatefulSet %q found without valid high availability (PodAntiAffinity must be defined)\", st.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, \"StatefulSet found without valid high availability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q has valid high availability\", st.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, \"StatefulSet has valid high availability\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodsRecreation", + "kind": "function", + "source": [ + "func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tneedsPostMortemInfo := true", + "\tdefer func() {", + "\t\tif needsPostMortemInfo {", + "\t\t\tcheck.LogDebug(\"%s\", postmortem.Log())", + "\t\t}", + "\t\t// Since we are possible exiting early, we need to make sure we set the result at the end of the function.", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}()", + "\tcheck.LogInfo(\"Testing node draining effect of deployment\")", + "\tcheck.LogInfo(\"Testing initial state for deployments\")", + "\tdefer env.SetNeedsRefresh()", + "", + "\t// Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check.", + "\t// timeout = k-mins + (1min * (num-deployments + num-statefulsets))", + "\tallPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets))", + "\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout, check.GetLogger())", + "\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\tfor _, dep := range notReadyDeployments {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment was not ready before draining any node.\", false))", + "\t\t}", + "\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset was not ready before draining any node.\", false))", + "\t\t}", + "\t\treturn", + "\t}", + "", + "\t// Filter out pods with Node Assignments present and FAIL them.", + "\t// We run into problems with this test when there are nodeSelectors assigned affecting where", + "\t// pods are scheduled. Also, they are not allowed in general, see the node-selector test case.", + "\t// Skip the safeguard for any pods that are using a runtimeClassName. This is potentially", + "\t// because pods that are derived from a performance profile might have a built-in nodeSelector.", + "\tvar podsWithNodeAssignment []*provider.Pod", + "\tfor _, put := range env.Pods {", + "\t\tif !put.IsRuntimeClassNameSpecified() \u0026\u0026 put.HasNodeSelector() {", + "\t\t\tpodsWithNodeAssignment = append(podsWithNodeAssignment, put)", + "\t\t\tcheck.LogError(\"Pod %q has been found with node selector(s): %v\", put, put.Spec.NodeSelector)", + "\t\t}", + "\t}", + "\tif len(podsWithNodeAssignment) \u003e 0 {", + "\t\tcheck.LogError(\"Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v\", podsWithNodeAssignment)", + "\t\tfor _, pod := range podsWithNodeAssignment {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has node assignment.\", false))", + "\t\t}", + "", + "\t\treturn", + "\t}", + "", + "\tfor nodeName := range podsets.GetAllNodesForAllPodSets(env.Pods) {", + "\t\tdefer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node", + "\t\terr := podrecreation.CordonHelper(nodeName, podrecreation.Cordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Error cordoning the node: %s\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node cordoning failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tcheck.LogInfo(\"Draining and Cordoning node %s: \", nodeName)", + "\t\tcount, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Getting pods list to drain failed, err=%v\", err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Getting pods list to drain failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tnodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count)", + "\t\tcheck.LogDebug(\"Draining node: %s with timeout: %s\", nodeName, nodeTimeout)", + "\t\t_, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Draining node %q failed, err=%v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Draining node failed\", false))", + "\t\t\treturn", + "\t\t}", + "", + "\t\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout, check.GetLogger())", + "\t\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\t\tfor _, dep := range notReadyDeployments {", + "\t\t\t\tcheck.LogError(\"Deployment %q not ready after draining node %q\", dep.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q not ready after draining node %q\", sts.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\treturn", + "\t\t}", + "", + "\t\terr = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogFatal(\"Error uncordoning the node: %s\", nodeName)", + "\t\t}", + "\t}", + "", + "\t// If everything went well for all nodes, the nonCompliantObjects should be empty. We need to", + "\t// manually add all the deps/sts into the compliant object lists so the check is marked as skipped.", + "\t// ToDo: Improve this.", + "\tif len(nonCompliantObjects) == 0 {", + "\t\tfor _, dep := range env.Deployments {", + "\t\t\tcheck.LogInfo(\"Deployment's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "", + "\t\tfor _, sts := range env.StatefulSets {", + "\t\t\tcheck.LogInfo(\"Statefulset's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "\t}", + "", + "\tneedsPostMortemInfo = false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testStatefulSetScaling", + "kind": "function", + "source": [ + "func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, statefulSet := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", statefulSet.ToString())", + "\t\tif scaling.IsManaged(statefulSet.Name, env.Config.ManagedStatefulsets) {", + "\t\t\tif !scaling.CheckOwnerReference(statefulSet.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q scaling failed due to OwnerReferences that are not scalable\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has OwnerReferences that are not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"StatefulSet %q scaling skipped due to scalable OwnerReferences, test will run on te CR scaling\", statefulSet.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip statefulset if it is allowed by config", + "\t\tif nameInStatefulSetSkipList(statefulSet.Name, statefulSet.Namespace, env.Config.SkipScalingTestStatefulSets) {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q is being skipped due to configuration setting\", statefulSet.String())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TeststatefulsetScaling test scaling of statefulset", + "\t\t// This is the entry point for statefulset scaling tests", + "\t\tns, name := statefulSet.Namespace, statefulSet.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"StatefulSet\"); hpa != nil {", + "\t\t\t// if the statefulset is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the statefulset", + "\t\t\tif !scaling.TestScaleHpaStatefulSet(statefulSet.StatefulSet, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %q\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the statefulset is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleStatefulSet(statefulSet.StatefulSet, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %s\", statefulSet.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + }, + { + "name": "NewTaintReportObject", + "qualifiedName": "NewTaintReportObject", + "exported": true, + "signature": "func(string, string, string, bool)(*ReportObject)", + "doc": "NewTaintReportObject Creates a taint report object with node details\n\nThis function builds a ReportObject that records a specific taint bit on a\ngiven node. It initializes the object with the reason for compliance or\nnon‑compliance, sets its type to a predefined taint category, and then adds\nfields for the node name and the taint bit value. The resulting pointer is\nreturned for further use in testing or reporting.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:339", + "calls": [ + { + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testTainted", + "kind": "function", + "source": [ + "func testTainted(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// errNodes has nodes that failed some operation while checking kernel taints.", + "\terrNodes := []string{}", + "", + "\ttype badModuleTaints struct {", + "\t\tname string", + "\t\ttaints []string", + "\t}", + "", + "\t// badModules maps node names to list of \"bad\"/offending modules.", + "\tbadModules := map[string][]badModuleTaints{}", + "\t// otherTaints maps a node to a list of taint bits that haven't been set by any module.", + "\totherTaints := map[string][]int{}", + "", + "\tcheck.LogInfo(\"Modules allowlist: %+v\", env.Config.AcceptedKernelTaints)", + "\t// helper map to make the checks easier.", + "\tallowListedModules := map[string]bool{}", + "\tfor _, module := range env.Config.AcceptedKernelTaints {", + "\t\tallowListedModules[module.Module] = true", + "\t}", + "", + "\t// Loop through the probe pods that are tied to each node.", + "\tfor _, n := range env.Nodes {", + "\t\tnodeName := n.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "", + "\t\t// Ensure we are only testing nodes that have CNF workload deployed on them.", + "\t\tif !n.HasWorkloadDeployed(env.Pods) {", + "\t\t\tcheck.LogInfo(\"Node %q has no workload deployed on it. Skipping tainted kernel check.\", nodeName)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tdp := env.ProbePods[nodeName]", + "", + "\t\tocpContext := clientsholder.NewContext(dp.Namespace, dp.Name, dp.Spec.Containers[0].Name)", + "\t\ttf := nodetainted.NewNodeTaintedTester(\u0026ocpContext, nodeName)", + "", + "\t\t// Get the taints mask from the node kernel", + "\t\ttaintsMask, err := tf.GetKernelTaintsMask()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to retrieve kernel taint information from node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to retrieve kernel taint information from node\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif taintsMask == 0 {", + "\t\t\tcheck.LogInfo(\"Node %q has no non-approved kernel taints.\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has no non-approved kernel taints\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Node %q kernel is tainted. Taints mask=%d - Decoded taints: %v\",", + "\t\t\tnodeName, taintsMask, nodetainted.DecodeKernelTaintsFromBitMask(taintsMask))", + "", + "\t\t// Check the allow list. If empty, mark this node as failed.", + "\t\tif len(allowListedModules) == 0 {", + "\t\t\ttaintsMaskStr := strconv.FormatUint(taintsMask, 10)", + "\t\t\ttaintsStr := strings.Join(nodetainted.DecodeKernelTaintsFromBitMask(taintsMask), \",\")", + "\t\t\tcheck.LogError(\"Node %q contains taints not covered by module allowlist. Taints: %q (mask=%q)\", nodeName, taintsStr, taintsMaskStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node contains taints not covered by module allowlist\", false).", + "\t\t\t\tAddField(testhelper.TaintMask, taintsMaskStr).", + "\t\t\t\tAddField(testhelper.Taints, taintsStr))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// allow list check.", + "\t\t// Get the list of modules (tainters) that have set a taint bit.", + "\t\t// 1. Each module should appear in the allow list.", + "\t\t// 2. All kernel taint bits (one bit \u003c-\u003e one letter) should have been set by at least", + "\t\t// one tainter module.", + "\t\ttainters, taintBitsByAllModules, err := tf.GetTainterModules(allowListedModules)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get tainter modules from node %q, err: %v\", nodeName, err)", + "\t\t\terrNodes = append(errNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to get tainter modules\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Keep track of whether or not this node is compliant with module allow list.", + "\t\tcompliantNode := true", + "", + "\t\t// Save modules' names only.", + "\t\tfor moduleName, taintsLetters := range tainters {", + "\t\t\tmoduleTaints := nodetainted.DecodeKernelTaintsFromLetters(taintsLetters)", + "\t\t\tbadModules[nodeName] = append(badModules[nodeName], badModuleTaints{name: moduleName, taints: moduleTaints})", + "", + "\t\t\t// Create non-compliant taint objects for each of the taints", + "\t\t\tfor _, taint := range moduleTaints {", + "\t\t\t\tcheck.LogError(\"Node %q - module %q taints kernel: %q\", nodeName, moduleName, taint)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(nodetainted.RemoveAllExceptNumbers(taint), nodeName, taint, false).AddField(testhelper.ModuleName, moduleName))", + "", + "\t\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\t\tcompliantNode = false", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Lastly, check that all kernel taint bits come from modules.", + "\t\totherKernelTaints := nodetainted.GetOtherTaintedBits(taintsMask, taintBitsByAllModules)", + "\t\tfor _, taintedBit := range otherKernelTaints {", + "\t\t\tcheck.LogError(\"Node %q - taint bit %d is set but it is not caused by any module.\", nodeName, taintedBit)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(strconv.Itoa(taintedBit), nodeName, nodetainted.GetTaintMsg(taintedBit), false).", + "\t\t\t\tAddField(testhelper.ModuleName, \"N/A\"))", + "\t\t\totherTaints[nodeName] = append(otherTaints[nodeName], taintedBit)", + "", + "\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\tcompliantNode = false", + "\t\t}", + "", + "\t\tif compliantNode {", + "\t\t\tcheck.LogInfo(\"Node %q passed the tainted kernel check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the tainted kernel check\", true))", + "\t\t}", + "\t}", + "", + "\tif len(errNodes) \u003e 0 {", + "\t\tcheck.LogError(\"Failed to get kernel taints from some nodes: %+v\", errNodes)", + "\t}", + "", + "\tif len(badModules) \u003e 0 || len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Nodes have been found to be tainted. Tainted modules: %+v\", badModules)", + "\t}", + "", + "\tif len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Taints not related to any module: %+v\", otherTaints)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewTaintReportObject(taintBit, nodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, TaintType, isCompliant)", + "\tout.AddField(NodeType, nodeName)", + "\tout.AddField(TaintBit, taintBit)", + "\treturn out", + "}" + ] + }, + { + "name": "AddField", + "qualifiedName": "ReportObject.AddField", + "exported": true, + "receiver": "ReportObject", + "signature": "func(string, string)(*ReportObject)", + "doc": "ReportObject.AddField Adds a key-value pair to the report\n\nThe method appends the supplied key to an internal slice of keys and the\ncorresponding value to a parallel slice of values, maintaining order. It\nreturns the same ReportObject pointer so calls can be chained. This enables\nconstructing structured reports by sequentially adding fields.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:475", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCatalogSourceReportObject", + "kind": "function", + "source": [ + "func NewCatalogSourceReportObject(aNamespace, aCatalogSourceName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CatalogSourceType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aCatalogSourceName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCertifiedContainerReportObject", + "kind": "function", + "source": [ + "func NewCertifiedContainerReportObject(cii provider.ContainerImageIdentifier, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerImageType, isCompliant)", + "\tout.AddField(ImageDigest, cii.Digest)", + "\tout.AddField(ImageRepo, cii.Repository)", + "\tout.AddField(ImageTag, cii.Tag)", + "\tout.AddField(ImageRegistry, cii.Registry)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewClusterOperatorReportObject", + "kind": "function", + "source": [ + "func NewClusterOperatorReportObject(aClusterOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ClusterOperatorType, isCompliant)", + "\tout.AddField(Name, aClusterOperatorName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewClusterVersionReportObject", + "kind": "function", + "source": [ + "func NewClusterVersionReportObject(version, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OCPClusterType, isCompliant)", + "\tout.AddField(OCPClusterVersionType, version)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCrdReportObject", + "kind": "function", + "source": [ + "func NewCrdReportObject(aName, aVersion, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CustomResourceDefinitionType, isCompliant)", + "\tout.AddField(CustomResourceDefinitionName, aName)", + "\tout.AddField(CustomResourceDefinitionVersion, aVersion)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewDeploymentReportObject", + "kind": "function", + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewHelmChartReportObject", + "kind": "function", + "source": [ + "func NewHelmChartReportObject(aNamespace, aHelmChartName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, HelmType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aHelmChartName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedNamedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedNamedReportObject(aReason, aType string, isCompliant bool, aNamespace, aName string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace).AddField(Name, aName)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewStatefulSetReportObject", + "kind": "function", + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewTaintReportObject", + "kind": "function", + "source": [ + "func NewTaintReportObject(taintBit, nodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, TaintType, isCompliant)", + "\tout.AddField(NodeType, nodeName)", + "\tout.AddField(TaintBit, taintBit)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "ReportObject.SetContainerProcessValues", + "kind": "function", + "source": [ + "func (obj *ReportObject) SetContainerProcessValues(aPolicy, aPriority, aCommandLine string) *ReportObject {", + "\tobj.AddField(ProcessCommandLine, aCommandLine)", + "\tobj.AddField(SchedulingPolicy, aPolicy)", + "\tobj.AddField(SchedulingPriority, aPriority)", + "\tobj.ObjectType = ContainerProcessType", + "\treturn obj", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "SetContainerProcessValues", + "qualifiedName": "ReportObject.SetContainerProcessValues", + "exported": true, + "receiver": "ReportObject", + "signature": "func(string, string, string)(*ReportObject)", + "doc": "ReportObject.SetContainerProcessValues Stores container process details in the report object\n\nIt records the command line, scheduling policy, and priority of a container\nprocess by adding these fields to the report. The function also tags the\nreport with a type indicating it represents a container process. The updated\nreport object is returned for further chaining.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:269", + "calls": [ + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObject.AddField", + "kind": "function", + "source": [ + "func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) {", + "\tobj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey)", + "\tobj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue)", + "\treturn obj", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (obj *ReportObject) SetContainerProcessValues(aPolicy, aPriority, aCommandLine string) *ReportObject {", + "\tobj.AddField(ProcessCommandLine, aCommandLine)", + "\tobj.AddField(SchedulingPolicy, aPolicy)", + "\tobj.AddField(SchedulingPriority, aPriority)", + "\tobj.ObjectType = ContainerProcessType", + "\treturn obj", + "}" + ] + }, + { + "name": "SetType", + "qualifiedName": "ReportObject.SetType", + "exported": true, + "receiver": "ReportObject", + "signature": "func(string)(*ReportObject)", + "doc": "ReportObject.SetType Assigns a new type to the report object\n\nThe method receives a string that represents the desired type and stores it\nin the ObjectType field of the ReportObject instance. It then returns the\nsame instance, allowing callers to chain further configuration calls.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:505", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (obj *ReportObject) SetType(aType string) (out *ReportObject) {", + "\tobj.ObjectType = aType", + "\treturn obj", + "}" + ] + }, + { + "name": "ReportObjectTestString", + "qualifiedName": "ReportObjectTestString", + "exported": true, + "signature": "func([]*ReportObject)(string)", + "doc": "ReportObjectTestString Creates a formatted string of ReportObject values\n\nThe function takes a slice of pointers to ReportObject and builds a single\nstring that lists each element in the same order as the input. Each object is\nrendered with the %#v format specifier, appended with a comma, and the entire\nlist is wrapped in brackets prefixed by \"[]testhelper.ReportObject\". The\nresulting string is returned for use in test output or debugging.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:121", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ReportObjectTestString(p []*ReportObject) (out string) {", + "\tout = \"[]testhelper.ReportObject{\"", + "\tfor _, p := range p {", + "\t\tout += fmt.Sprintf(\"%#v,\", *p)", + "\t}", + "\tout += \"}\"", + "\treturn out", + "}" + ] + }, + { + "name": "ReportObjectTestStringPointer", + "qualifiedName": "ReportObjectTestStringPointer", + "exported": true, + "signature": "func([]*ReportObject)(string)", + "doc": "ReportObjectTestStringPointer Formats a slice of ReportObject pointers into a readable string\n\nIt receives a list of pointers to ReportObject, iterates over each element,\nand appends a formatted representation of the dereferenced object to an\noutput string. The resulting string starts with \"[]*testhelper.ReportObject\"\nand ends with \"\", enclosing all items separated by commas. This string is\nused primarily for debugging or test failure messages.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:105", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "FailureReasonOutTestString", + "kind": "function", + "source": [ + "func FailureReasonOutTestString(p FailureReasonOut) (out string) {", + "\tout = \"testhelper.FailureReasonOut{\"", + "\tout += fmt.Sprintf(\"CompliantObjectsOut: %s,\", ReportObjectTestStringPointer(p.CompliantObjectsOut))", + "\tout += fmt.Sprintf(\"NonCompliantObjectsOut: %s,\", ReportObjectTestStringPointer(p.NonCompliantObjectsOut))", + "\tout += \"}\"", + "\treturn out", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ReportObjectTestStringPointer(p []*ReportObject) (out string) {", + "\tout = \"[]*testhelper.ReportObject{\"", + "\tfor _, p := range p {", + "\t\tout += fmt.Sprintf(\"\u0026%#v,\", *p)", + "\t}", + "\tout += \"}\"", + "\treturn out", + "}" + ] + }, + { + "name": "ResultObjectsToString", + "qualifiedName": "ResultObjectsToString", + "exported": true, + "signature": "func([]*ReportObject, []*ReportObject)(string, error)", + "doc": "ResultObjectsToString Serializes compliant and non‑compliant report objects into a JSON string\n\nThe function receives two slices of ReportObject values, one for compliant\nitems and another for non‑compliant ones. It constructs a FailureReasonOut\nstructure containing these slices, marshals the structure to JSON, and\nreturns the resulting string. If the marshalling fails, an error is returned\nwith context.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:997", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "Marshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "Check.SetResult", + "kind": "function", + "source": [ + "func (check *Check) SetResult(compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tcheck.mutex.Lock()", + "\tdefer check.mutex.Unlock()", + "", + "\tif check.Result == CheckResultAborted {", + "\t\treturn", + "\t}", + "", + "\tresultObjectsStr, err := testhelper.ResultObjectsToString(compliantObjects, nonCompliantObjects)", + "\tif err != nil {", + "\t\tcheck.LogError(\"Failed to get result objects string for check %s: %v\", check.ID, err)", + "\t}", + "", + "\tcheck.details = resultObjectsStr", + "", + "\t// If an error/panic happened before, do not change the result.", + "\tif check.Result == CheckResultError {", + "\t\treturn", + "\t}", + "", + "\tif len(nonCompliantObjects) \u003e 0 {", + "\t\tcheck.Result = CheckResultFailed", + "\t\tcheck.skipReason = \"\"", + "\t} else if len(compliantObjects) == 0 {", + "\t\t// Mark this check as skipped.", + "\t\tcheck.LogWarn(\"Check %s marked as skipped as both compliant and non-compliant objects lists are empty.\", check.ID)", + "\t\tcheck.skipReason = \"compliant and non-compliant objects lists are empty\"", + "\t\tcheck.Result = CheckResultSkipped", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ResultObjectsToString(compliantObject, nonCompliantObject []*ReportObject) (string, error) {", + "\treason := FailureReasonOut{", + "\t\tCompliantObjectsOut: compliantObject,", + "\t\tNonCompliantObjectsOut: nonCompliantObject,", + "\t}", + "", + "\tbytes, err := json.Marshal(reason)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"could not marshall FailureReasonOut object: %v\", err)", + "\t}", + "", + "\treturn string(bytes), nil", + "}" + ] + }, + { + "name": "ResultToString", + "qualifiedName": "ResultToString", + "exported": true, + "signature": "func(int)(string)", + "doc": "ResultToString Translates a result code into its textual form\n\nThe function receives an integer representing a status code and returns the\nmatching string: \"SUCCESS\", \"FAILURE\" or \"ERROR\". If the input does not match\nany known code, it yields an empty string.", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:515", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp", + "name": "PingResults.String", + "kind": "function", + "source": [ + "func (results PingResults) String() string {", + "\treturn fmt.Sprintf(\"outcome: %s transmitted: %d received: %d errors: %d\", testhelper.ResultToString(results.outcome), results.transmitted, results.received, results.errors)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ResultToString(result int) (str string) {", + "\tswitch result {", + "\tcase SUCCESS:", + "\t\treturn \"SUCCESS\"", + "\tcase FAILURE:", + "\t\treturn \"FAILURE\"", + "\tcase ERROR:", + "\t\treturn \"ERROR\"", + "\t}", + "\treturn \"\"", + "}" + ] + } + ], + "globals": [ + { + "name": "AbortTrigger", + "exported": true, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:1011" + } + ], + "consts": [ + { + "name": "CatalogSourceType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:232" + }, + { + "name": "Category", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:154" + }, + { + "name": "ChangedFolders", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:183" + }, + { + "name": "ClusterOperatorType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:230" + }, + { + "name": "ClusterRoleName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:156" + }, + { + "name": "CnfType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:226" + }, + { + "name": "ContainerCategory", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:238" + }, + { + "name": "ContainerImageType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:233" + }, + { + "name": "ContainerName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:147" + }, + { + "name": "ContainerPort", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:249" + }, + { + "name": "ContainerProcessType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:237" + }, + { + "name": "ContainerType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:231" + }, + { + "name": "CrdVersion", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:173" + }, + { + "name": "CustomResourceDefinitionName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:165" + }, + { + "name": "CustomResourceDefinitionType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:244" + }, + { + "name": "CustomResourceDefinitionVersion", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:166" + }, + { + "name": "DeclaredPortType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:248" + }, + { + "name": "DeletedFolders", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:184" + }, + { + "name": "DeploymentName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:162" + }, + { + "name": "DeploymentType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:240" + }, + { + "name": "DestinationContainerName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:199" + }, + { + "name": "DestinationIP", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:200" + }, + { + "name": "DestinationNamespace", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:197" + }, + { + "name": "DestinationPodName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:198" + }, + { + "name": "ERROR", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:30" + }, + { + "name": "Error", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:253" + }, + { + "name": "FAILURE", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:29" + }, + { + "name": "Group", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:205" + }, + { + "name": "HelmType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:228" + }, + { + "name": "HelmVersionType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:252" + }, + { + "name": "HostPathType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:251" + }, + { + "name": "HostPortType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:250" + }, + { + "name": "ICMPResultType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:242" + }, + { + "name": "ImageDigest", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:256" + }, + { + "name": "ImageName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:170" + }, + { + "name": "ImageRegistry", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:259" + }, + { + "name": "ImageRepo", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:257" + }, + { + "name": "ImageTag", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:258" + }, + { + "name": "ListeningPortType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:247" + }, + { + "name": "ModuleName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:188" + }, + { + "name": "Name", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:145" + }, + { + "name": "Namespace", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:144" + }, + { + "name": "NetworkName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:196" + }, + { + "name": "NetworkType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:243" + }, + { + "name": "NodeSelector", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:176" + }, + { + "name": "NodeType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:234" + }, + { + "name": "OCPChannel", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:175" + }, + { + "name": "OCPClusterType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:235" + }, + { + "name": "OCPClusterVersionType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:236" + }, + { + "name": "OCPVersion", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:174" + }, + { + "name": "OSImage", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:192" + }, + { + "name": "OpenAPIV3Schema", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:172" + }, + { + "name": "OperatorList", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:219" + }, + { + "name": "OperatorName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:216" + }, + { + "name": "OperatorPermission", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:254" + }, + { + "name": "OperatorPhase", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:215" + }, + { + "name": "OperatorType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:229" + }, + { + "name": "Path", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:168" + }, + { + "name": "PersistentVolumeClaimName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:178" + }, + { + "name": "PersistentVolumeName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:177" + }, + { + "name": "PodDisruptionBudgetReference", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:164" + }, + { + "name": "PodName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:146" + }, + { + "name": "PodRoleBinding", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:260" + }, + { + "name": "PodType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:227" + }, + { + "name": "PortNumber", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:210" + }, + { + "name": "PortProtocol", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:211" + }, + { + "name": "ProbePodName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:193" + }, + { + "name": "ProcessCommandLine", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:149" + }, + { + "name": "ProcessID", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:148" + }, + { + "name": "ReasonForCompliance", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:153" + }, + { + "name": "ReasonForNonCompliance", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:152" + }, + { + "name": "Repository", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:169" + }, + { + "name": "ResourceName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:206" + }, + { + "name": "RoleBindingName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:155" + }, + { + "name": "RoleBindingNamespace", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:157" + }, + { + "name": "RoleName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:204" + }, + { + "name": "RoleRuleType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:245" + }, + { + "name": "RoleType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:246" + }, + { + "name": "SCCCapability", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:167" + }, + { + "name": "SUCCESS", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:28" + }, + { + "name": "SchedulingPolicy", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:150" + }, + { + "name": "SchedulingPriority", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:151" + }, + { + "name": "ServiceAccountName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:158" + }, + { + "name": "ServiceIPVersion", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:161" + }, + { + "name": "ServiceMode", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:159" + }, + { + "name": "ServiceName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:160" + }, + { + "name": "ServiceType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:239" + }, + { + "name": "SourceIP", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:201" + }, + { + "name": "StatefulSetName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:163" + }, + { + "name": "StatefulSetType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:241" + }, + { + "name": "StorageClassName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:181" + }, + { + "name": "StorageClassProvisioner", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:182" + }, + { + "name": "SubscriptionName", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:214" + }, + { + "name": "SysctlKey", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:190" + }, + { + "name": "SysctlValue", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:191" + }, + { + "name": "TaintBit", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:185" + }, + { + "name": "TaintBitDescription", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:186" + }, + { + "name": "TaintMask", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:187" + }, + { + "name": "TaintType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:255" + }, + { + "name": "Taints", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:189" + }, + { + "name": "TolerationEffect", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:180" + }, + { + "name": "TolerationKey", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:179" + }, + { + "name": "UndefinedType", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:225" + }, + { + "name": "Verb", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:207" + }, + { + "name": "Version", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/pkg/testhelper/testhelper.go:171" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions", + "name": "versions", + "files": 1, + "imports": [ + "github.com/Masterminds/semver/v3", + "regexp" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "GitVersion", + "qualifiedName": "GitVersion", + "exported": true, + "signature": "func()(string)", + "doc": "GitVersion provides the current build’s git display version\n\nThe function checks if a release tag is defined; if not it falls back to an\nunreleased build label combined with the previous release information. It\nthen appends the short commit hash in parentheses and returns the resulting\nstring, which is used throughout the application to report the running\nversion.", + "position": "/Users/deliedit/dev/certsuite/pkg/versions/versions.go:33", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/version", + "name": "showVersion", + "kind": "function", + "source": [ + "func showVersion(cmd *cobra.Command, _ []string) error {", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Startup", + "kind": "function", + "source": [ + "func Startup() {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\t// Create an evaluator to filter test cases with labels", + "\tif err := checksdb.InitLabelsExprEvaluator(testParams.LabelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(testParams.OutputDir, testParams.LogLevel); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\t// Diagnostic functions will run when no labels are provided.", + "\tif testParams.LabelsFilter == noLabelsFilterExpr {", + "\t\tlog.Warn(\"The Best Practices Test Suite will run in diagnostic mode so no test case will be launched\")", + "\t}", + "", + "\t// Set clientsholder singleton with the filenames from the env vars.", + "\t_ = clientsholder.GetClientsHolder(getK8sClientsConfigFileNames()...)", + "\tLoadChecksDB(testParams.LabelsFilter)", + "", + "\tlog.Info(\"Certsuite Version: %v\", versions.GitVersion())", + "\tlog.Info(\"Claim Format Version: %s\", versions.ClaimFormatVersion)", + "\tlog.Info(\"Labels filter: %v\", testParams.LabelsFilter)", + "\tlog.Info(\"Log level: %s\", strings.ToUpper(testParams.LogLevel))", + "", + "\tcli.PrintBanner()", + "", + "\tfmt.Printf(\"Certsuite version: %s\\n\", versions.GitVersion())", + "\tfmt.Printf(\"Claim file version: %s\\n\", versions.ClaimFormatVersion)", + "\tfmt.Printf(\"Checks filter: %s\\n\", testParams.LabelsFilter)", + "\tfmt.Printf(\"Output folder: %s\\n\", testParams.OutputDir)", + "\tfmt.Printf(\"Log file: %s (level=%s)\\n\", log.LogFileName, testParams.LogLevel)", + "\tfmt.Printf(\"\\n\")", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GitVersion() string {", + "\tif GitRelease == \"\" {", + "\t\tGitDisplayRelease = \"Unreleased build post \" + GitPreviousRelease", + "\t} else {", + "\t\tGitDisplayRelease = GitRelease", + "\t}", + "", + "\treturn GitDisplayRelease + \" (\" + GitCommit + \")\"", + "}" + ] + }, + { + "name": "IsValidK8sVersion", + "qualifiedName": "IsValidK8sVersion", + "exported": true, + "signature": "func(string)(bool)", + "doc": "IsValidK8sVersion Checks if a string matches Kubernetes version naming conventions\n\nThe function compiles a regular expression that enforces the pattern for\nKubernetes versions, allowing optional pre-release identifiers such as alpha\nor beta with numeric suffixes. It returns true when the input string conforms\nto this format and false otherwise.", + "position": "/Users/deliedit/dev/certsuite/pkg/versions/versions.go:59", + "calls": [ + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "name": "MatchString", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorCrdVersioning", + "kind": "function", + "source": [ + "func testOperatorCrdVersioning(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOperatorCrdVersioning\")", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, crd := range env.Crds {", + "\t\tdoesUseK8sVersioning := true", + "\t\tnonCompliantVersion := \"\"", + "", + "\t\tfor _, crdVersion := range crd.Spec.Versions {", + "\t\t\tversionName := crdVersion.Name", + "\t\t\tcheck.LogDebug(\"Checking for Operator CRD %s with version %s\", crd.Name, versionName)", + "", + "\t\t\tif !versions.IsValidK8sVersion(versionName) {", + "\t\t\t\tdoesUseK8sVersioning = false", + "\t\t\t\tnonCompliantVersion = versionName", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif doesUseK8sVersioning {", + "\t\t\tcheck.LogInfo(\"Operator CRD %s has valid K8s versioning \", crd.Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD has valid K8s versioning \", true).AddField(testhelper.CrdVersion, crd.Name))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Operator CRD %s has invalid K8s versioning %s \", crd.Name, nonCompliantVersion)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD has invalid K8s versioning \", false).AddField(testhelper.CrdVersion, crd.Name))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsValidK8sVersion(version string) bool {", + "\tr := regexp.MustCompile(`^(v)([1-9]\\d*)+((alpha|beta)([1-9]\\d*)+){0,2}$`)", + "\treturn r.MatchString(version)", + "}" + ] + }, + { + "name": "IsValidSemanticVersion", + "qualifiedName": "IsValidSemanticVersion", + "exported": true, + "signature": "func(string)(bool)", + "doc": "IsValidSemanticVersion Validates that a string is a proper semantic version\n\nThe function attempts to parse the input using a semantic version parser. If\nparsing succeeds without error, it returns true, indicating a valid semantic\nversion; otherwise, it returns false.", + "position": "/Users/deliedit/dev/certsuite/pkg/versions/versions.go:48", + "calls": [ + { + "name": "NewVersion", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorSemanticVersioning", + "kind": "function", + "source": [ + "func testOperatorSemanticVersioning(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOperatorSemanticVersioning\")", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, operator := range env.Operators {", + "\t\toperatorVersion := operator.Version", + "\t\tcheck.LogInfo(\"Testing Operator %q for version %s\", operator, operatorVersion)", + "", + "\t\tif versions.IsValidSemanticVersion(operatorVersion) {", + "\t\t\tcheck.LogInfo(\"Operator %q has a valid semantic version %s\", operator, operatorVersion)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name,", + "\t\t\t\t\"Operator has a valid semantic version \", true).AddField(testhelper.Version, operatorVersion))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Operator %q has an invalid semantic version %s\", operator, operatorVersion)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name,", + "\t\t\t\t\"Operator has an invalid semantic version \", false).AddField(testhelper.Version, operatorVersion))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsValidSemanticVersion(version string) bool {", + "\t_, err := semver.NewVersion(version)", + "\treturn err == nil", + "}" + ] + } + ], + "globals": [ + { + "name": "ClaimFormatVersion", + "exported": true, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/pkg/versions/versions.go:23" + }, + { + "name": "GitCommit", + "exported": true, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/pkg/versions/versions.go:11" + }, + { + "name": "GitDisplayRelease", + "exported": true, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/pkg/versions/versions.go:20" + }, + { + "name": "GitPreviousRelease", + "exported": true, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/pkg/versions/versions.go:17" + }, + { + "name": "GitRelease", + "exported": true, + "type": "string", + "position": "/Users/deliedit/dev/certsuite/pkg/versions/versions.go:14" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests", + "name": "suite", + "files": 1, + "imports": null, + "structs": null, + "interfaces": null, + "functions": null, + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "accesscontrol", + "files": 3, + "imports": [ + "fmt", + "github.com/operator-framework/api/pkg/operators/v1alpha1", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/namespace", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/resources", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common/rbac", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/services", + "k8s.io/api/core/v1", + "k8s.io/api/rbac/v1", + "strconv", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "LoadChecks", + "qualifiedName": "LoadChecks", + "exported": true, + "signature": "func()()", + "doc": "LoadChecks Initialises all access control checks for the test suite\n\nThis function registers a group of security checks under the access‑control\nkey, attaching pre‑execution logic and a series of check functions that\nvalidate container capabilities, pod configurations, namespace policies, and\nmore. Each check is created with identifiers derived from test metadata, may\nbe skipped based on environmental conditions, and logs its progress through a\nshared logger.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:80", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "WithBeforeEachFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewChecksGroup", + "kind": "function", + "source": [ + "func NewChecksGroup(groupName string) *ChecksGroup {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\tif dbByGroup == nil {", + "\t\tdbByGroup = map[string]*ChecksGroup{}", + "\t}", + "", + "\tgroup, exists := dbByGroup[groupName]", + "\tif exists {", + "\t\treturn group", + "\t}", + "", + "\tgroup = \u0026ChecksGroup{", + "\t\tname: groupName,", + "\t\tchecks: []*Check{},", + "\t\tcurrentRunningCheckIdx: checkIdxNone,", + "\t}", + "\tdbByGroup[groupName] = group", + "", + "\treturn group", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testContainerSCC", + "kind": "function", + "source": [ + "func testContainerSCC(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\thighLevelCat := securitycontextcontainer.CategoryID1", + "\tfor _, pod := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", pod)", + "\t\tlistCategory := securitycontextcontainer.CheckPod(pod)", + "\t\tfor _, cat := range listCategory {", + "\t\t\tif cat.Category \u003e securitycontextcontainer.CategoryID1NoUID0 {", + "\t\t\t\tcheck.LogError(\"Category %q is NOT category 1 or category NoUID0\", cat)", + "\t\t\t\taContainerOut := testhelper.NewContainerReportObject(cat.NameSpace, cat.Podname, cat.Containername, \"container category is NOT category 1 or category NoUID0\", false).", + "\t\t\t\t\tSetType(testhelper.ContainerCategory).", + "\t\t\t\t\tAddField(testhelper.Category, cat.Category.String())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, aContainerOut)", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Category %q is category 1 or category NoUID0\", cat)", + "\t\t\t\taContainerOut := testhelper.NewContainerReportObject(cat.NameSpace, cat.Podname, cat.Containername, \"container category is category 1 or category NoUID0\", true).", + "\t\t\t\t\tSetType(testhelper.ContainerCategory).", + "\t\t\t\t\tAddField(testhelper.Category, cat.Category.String())", + "\t\t\t\tcompliantObjects = append(compliantObjects, aContainerOut)", + "\t\t\t}", + "\t\t\tif cat.Category \u003e highLevelCat {", + "\t\t\t\thighLevelCat = cat.Category", + "\t\t\t}", + "\t\t}", + "\t}", + "\taCNFOut := testhelper.NewReportObject(\"Overall CNF category\", testhelper.CnfType, false).AddField(testhelper.Category, highLevelCat.String())", + "\tcompliantObjects = append(compliantObjects, aCNFOut)", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testSysAdminCapability", + "kind": "function", + "source": [ + "func testSysAdminCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"SYS_ADMIN\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testNetAdminCapability", + "kind": "function", + "source": [ + "func testNetAdminCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"NET_ADMIN\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testNetRawCapability", + "kind": "function", + "source": [ + "func testNetRawCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"NET_RAW\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testIpcLockCapability", + "kind": "function", + "source": [ + "func testIpcLockCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"IPC_LOCK\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testBpfCapability", + "kind": "function", + "source": [ + "func testBpfCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"BPF\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testSecConRunAsNonRoot", + "kind": "function", + "source": [ + "func testSecConRunAsNonRoot(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing pod %s/%s\", put.Namespace, put.Name)", + "\t\tnonCompliantContainers, nonComplianceReason := put.GetRunAsNonRootFalseContainers(knownContainersToSkip)", + "\t\tif len(nonCompliantContainers) == 0 {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is configured with RunAsNonRoot=true or RunAsUser!=0 at pod or container level.\", true))", + "\t\t} else {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"One or more containers of the pod are running with root user\", false))", + "\t\t\tfor index := range nonCompliantContainers {", + "\t\t\t\tcheck.LogError(\"Pod %s/%s, container %q is not compliant: %s\", put.Namespace, put.Name, nonCompliantContainers[index].Name, nonComplianceReason[index])", + "", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name, nonCompliantContainers[index].Name,", + "\t\t\t\t\tnonComplianceReason[index], false))", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testSecConPrivilegeEscalation", + "kind": "function", + "source": [ + "func testSecConPrivilegeEscalation(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tprivEscFound := false", + "\t\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.AllowPrivilegeEscalation != nil {", + "\t\t\tif *(cut.SecurityContext.AllowPrivilegeEscalation) {", + "\t\t\t\tcheck.LogError(\"AllowPrivilegeEscalation is set to true in Container %q.\", cut)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"AllowPrivilegeEscalation is set to true\", false))", + "\t\t\t\tprivEscFound = true", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !privEscFound {", + "\t\t\tcheck.LogInfo(\"AllowPrivilegeEscalation is set to false in Container %q.\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"AllowPrivilegeEscalation is not set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testSecConReadOnlyFilesystem", + "kind": "function", + "source": [ + "func testSecConReadOnlyFilesystem(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, pod := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q in namespace %q\", pod.Name, pod.Namespace)", + "\t\tfor _, cut := range pod.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q in Pod %q\", cut.Name, pod.Name)", + "\t\t\tif cut.IsReadOnlyRootFilesystem(check.GetLogger()) {", + "\t\t\t\tcheck.LogInfo(\"Container %q in Pod %q has a read-only root filesystem.\", cut.Name, pod.Name)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Container has a read-only root filesystem\", true))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogError(\"Container %q in Pod %q does not have a read-only root filesystem.\", cut.Name, pod.Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Container does not have a read-only root filesystem\", false))", + "\t\t\t}", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testContainerHostPort", + "kind": "function", + "source": [ + "func testContainerHostPort(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\thostPortFound := false", + "\t\tfor _, aPort := range cut.Ports {", + "\t\t\tif aPort.HostPort != 0 {", + "\t\t\t\tcheck.LogError(\"Host port %d is configured in Container %q.\", aPort.HostPort, cut)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Host port is configured\", false).", + "\t\t\t\t\tSetType(testhelper.HostPortType).", + "\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(aPort.HostPort))))", + "\t\t\t\thostPortFound = true", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !hostPortFound {", + "\t\t\tcheck.LogInfo(\"Host port not configured in Container %q.\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Host port is not configured\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodHostNetwork", + "kind": "function", + "source": [ + "func testPodHostNetwork(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.HostNetwork {", + "\t\t\tcheck.LogError(\"Host network is set to true in Pod %q.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Host network is set to true\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Host network is set to false in Pod %q.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Host network is not set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodHostPath", + "kind": "function", + "source": [ + "func testPodHostPath(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tpodIsCompliant := true", + "\t\tfor idx := range put.Spec.Volumes {", + "\t\t\tvol := \u0026put.Spec.Volumes[idx]", + "\t\t\tif vol.HostPath != nil \u0026\u0026 vol.HostPath.Path != \"\" {", + "\t\t\t\tcheck.LogError(\"Hostpath path: %q is set in Pod %q.\", vol.HostPath.Path, put)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Hostpath path is set\", false).", + "\t\t\t\t\tSetType(testhelper.HostPathType).", + "\t\t\t\t\tAddField(testhelper.Path, vol.HostPath.Path))", + "\t\t\t\tpodIsCompliant = false", + "\t\t\t}", + "\t\t}", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogError(\"Hostpath path not set in Pod %q.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Hostpath path is not set\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodHostIPC", + "kind": "function", + "source": [ + "func testPodHostIPC(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.HostIPC {", + "\t\t\tcheck.LogError(\"HostIpc is set in Pod %q.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"HostIpc is set to true\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"HostIpc not set in Pod %q.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"HostIpc is not set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodHostPID", + "kind": "function", + "source": [ + "func testPodHostPID(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.HostPID {", + "\t\t\tcheck.LogError(\"HostPid is set in Pod %q.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"HostPid is set to true\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"HostPid not set in Pod %q.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"HostPid is not set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoNamespacesSkipFn", + "kind": "function", + "source": [ + "func GetNoNamespacesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Namespaces) == 0 {", + "\t\t\treturn true, \"There are no namespaces to check. Please check config.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testNamespace", + "kind": "function", + "source": [ + "func testNamespace(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, namespace := range env.Namespaces {", + "\t\tcheck.LogInfo(\"Testing namespace %q\", namespace)", + "\t\tnamespaceCompliant := true", + "\t\tfor _, invalidPrefix := range invalidNamespacePrefixes {", + "\t\t\tif strings.HasPrefix(namespace, invalidPrefix) {", + "\t\t\t\tcheck.LogError(\"Namespace %q has invalid prefix %q\", namespace, invalidPrefix)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"Namespace has invalid prefix\", testhelper.Namespace, false, namespace))", + "\t\t\t\tnamespaceCompliant = false", + "\t\t\t\tbreak // Break out of the loop if we find an invalid prefix", + "\t\t\t}", + "\t\t}", + "\t\tif namespaceCompliant {", + "\t\t\tcheck.LogInfo(\"Namespace %q has valid prefix\", namespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"Namespace has valid prefix\", testhelper.Namespace, true, namespace))", + "\t\t}", + "\t}", + "\tif failedNamespacesNum := len(nonCompliantObjects); failedNamespacesNum \u003e 0 {", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "", + "\tinvalidCrs, err := namespace.TestCrsNamespaces(env.Crds, env.Namespaces, check.GetLogger())", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error while testing CRs namespaces, err=%v\", err)", + "\t\treturn", + "\t}", + "", + "\tinvalidCrsNum := namespace.GetInvalidCRsNum(invalidCrs, check.GetLogger())", + "\tif invalidCrsNum \u003e 0 {", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"CRs are not in the configured namespaces\", testhelper.Namespace, false))", + "\t} else {", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"CRs are in the configured namespaces\", testhelper.Namespace, true))", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodServiceAccount", + "kind": "function", + "source": [ + "func testPodServiceAccount(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.ServiceAccountName == defaultServiceAccount {", + "\t\t\tcheck.LogError(\"Pod %q does not have a valid service account name (uses the default service account instead).\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod does not have a valid service account name\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has a valid service account name\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has a service account name\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodRoleBindings", + "kind": "function", + "source": [ + "func testPodRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tpodIsCompliant := true", + "\t\tif put.Spec.ServiceAccountName == defaultServiceAccount {", + "\t\t\tcheck.LogError(\"Pod %q has an empty or default serviceAccountName\", put)", + "\t\t\t// Add the pod to the non-compliant list", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\"The serviceAccountName is either empty or default\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has a serviceAccountName %q, checking role bindings.\", put, put.Spec.ServiceAccountName)", + "\t\t\t// Loop through the rolebindings and check if they are from another namespace", + "\t\t\tfor rbIndex := range env.RoleBindings {", + "\t\t\t\t// Short circuit if the role binding and the pod are in the same namespace.", + "\t\t\t\tif env.RoleBindings[rbIndex].Namespace == put.Namespace {", + "\t\t\t\t\tcheck.LogInfo(\"Pod %q and the role binding are in the same namespace\", put)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\t// If we make it to this point, the role binding and the pod are in different namespaces.", + "\t\t\t\t// We must check if the pod's service account is in the role binding's subjects.", + "\t\t\t\tfound := false", + "\t\t\t\tfor _, subject := range env.RoleBindings[rbIndex].Subjects {", + "\t\t\t\t\t// If the subject is a service account and the service account is in the same namespace as one of the CNF's namespaces, then continue, this is allowed", + "\t\t\t\t\tif subject.Kind == rbacv1.ServiceAccountKind \u0026\u0026", + "\t\t\t\t\t\tsubject.Namespace == put.Namespace \u0026\u0026", + "\t\t\t\t\t\tsubject.Name == put.Spec.ServiceAccountName \u0026\u0026", + "\t\t\t\t\t\tstringhelper.StringInSlice[string](env.Namespaces, env.RoleBindings[rbIndex].Namespace, false) {", + "\t\t\t\t\t\tcontinue", + "\t\t\t\t\t}", + "", + "\t\t\t\t\t// Finally, if the subject is a service account and the service account is in the same namespace as the pod, then we have a failure", + "\t\t\t\t\tif subject.Kind == rbacv1.ServiceAccountKind \u0026\u0026", + "\t\t\t\t\t\tsubject.Namespace == put.Namespace \u0026\u0026", + "\t\t\t\t\t\tsubject.Name == put.Spec.ServiceAccountName {", + "\t\t\t\t\t\tcheck.LogError(\"Pod %q has the following role bindings that do not live in one of the CNF namespaces: %q\", put, env.RoleBindings[rbIndex].Name)", + "", + "\t\t\t\t\t\t// Add the pod to the non-compliant list", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\t\t\"The role bindings used by this pod do not live in one of the CNF namespaces\", false).", + "\t\t\t\t\t\t\t\tAddField(testhelper.RoleBindingName, env.RoleBindings[rbIndex].Name).", + "\t\t\t\t\t\t\t\tAddField(testhelper.RoleBindingNamespace, env.RoleBindings[rbIndex].Namespace).", + "\t\t\t\t\t\t\t\tAddField(testhelper.ServiceAccountName, put.Spec.ServiceAccountName).", + "\t\t\t\t\t\t\t\tSetType(testhelper.PodRoleBinding))", + "\t\t\t\t\t\tfound = true", + "\t\t\t\t\t\tpodIsCompliant = false", + "\t\t\t\t\t\tbreak", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\t// Break of out the loop if we found a role binding that is out of namespace", + "\t\t\t\tif found {", + "\t\t\t\t\tbreak", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// Add pod to the compliant object list", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogInfo(\"All the role bindings used by Pod %q (applied by the service accounts) live in one of the CNF namespaces\", put)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"All the role bindings used by this pod (applied by the service accounts) live in one of the CNF namespaces\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodClusterRoleBindings", + "kind": "function", + "source": [ + "func testPodClusterRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tresult, roleRefName, err := put.IsUsingClusterRoleBinding(env.ClusterRoleBindings, check.GetLogger())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to determine if Pod %q is using a cluster role binding, err=%v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf(\"failed to determine if pod is using a cluster role binding: %v\", err), false).", + "\t\t\t\tAddField(testhelper.ClusterRoleName, roleRefName))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\ttopOwners, err := put.GetTopOwner()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get top owners of Pod %q, err=%v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf(\"Error getting top owners of this pod, err=%s\", err), false).", + "\t\t\t\tAddField(testhelper.ClusterRoleName, roleRefName))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcsvNamespace, csvName, isOwnedByClusterWideOperator := ownedByClusterWideOperator(topOwners, env)", + "\t\t// Pod is using a cluster role binding but is owned by a cluster wide operator, so it is ok", + "\t\tif isOwnedByClusterWideOperator \u0026\u0026 result {", + "\t\t\tcheck.LogInfo(\"Pod %q is using a cluster role binding but is owned by a cluster-wide operator (Csv %q, namespace %q)\", put, csvName, csvNamespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is using a cluster role binding but owned by a cluster-wide operator\", true))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif result {", + "\t\t\t// Pod was found to be using a cluster role binding. This is not allowed.", + "\t\t\t// Flagging this pod as a failed pod.", + "\t\t\tcheck.LogError(\"Pod %q is using a cluster role binding (roleRefName=%q)\", put, roleRefName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is using a cluster role binding\", false).", + "\t\t\t\tAddField(testhelper.ClusterRoleName, roleRefName))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcheck.LogInfo(\"Pod %q is not using a cluster role binding\", put)", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not using a cluster role binding\", true))", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testAutomountServiceToken", + "kind": "function", + "source": [ + "func testAutomountServiceToken(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.ServiceAccountName == defaultServiceAccount {", + "\t\t\tcheck.LogError(\"Pod %q uses the default service account name.\", put.Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found with default service account name\", false))", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Evaluate the pod's automount service tokens and any attached service accounts", + "\t\tclient := clientsholder.GetClientsHolder()", + "\t\tpodPassed, newMsg := rbac.EvaluateAutomountTokens(client.K8sClient.CoreV1(), put)", + "\t\tif !podPassed {", + "\t\t\tcheck.LogError(\"%s\", newMsg)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, newMsg, false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q does not have automount service tokens set to true\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod does not have automount service tokens set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOneProcessPerContainer", + "kind": "function", + "source": [ + "func testOneProcessPerContainer(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t// the Istio sidecar container \"istio-proxy\" launches two processes: \"pilot-agent\" and \"envoy\"", + "\t\tif cut.IsIstioProxy() {", + "\t\t\tcheck.LogInfo(\"Skipping \\\"istio-proxy\\\" container\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Debug pod not found for node %q\", cut.NodeName)", + "\t\t\treturn", + "\t\t}", + "\t\tocpContext := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tpid, err := crclient.GetPidFromContainer(cut, ocpContext)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get PID for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tnbProcesses, err := getNbOfProcessesInPidNamespace(ocpContext, pid, clientsholder.GetClientsHolder())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get number of processes for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif nbProcesses \u003e 1 {", + "\t\t\tcheck.LogError(\"Container %q has more than one process running\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has more than one process running\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has only one process running\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has only one process running\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoNodesWithRealtimeKernelSkipFn", + "kind": "function", + "source": [ + "func GetNoNodesWithRealtimeKernelSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tfor i := range env.Nodes {", + "\t\t\tnode := env.Nodes[i]", + "", + "\t\t\tif node.IsRTKernel() {", + "\t\t\t\treturn false, \"\"", + "\t\t\t}", + "\t\t}", + "", + "\t\treturn true, \"no nodes with realtime kernel type found\"", + "\t}", + "}" + ] + }, + { + "name": "testSYSNiceRealtimeCapability", + "kind": "function", + "source": [ + "func testSYSNiceRealtimeCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through all of the labeled containers and compare their security context capabilities and whether", + "\t// or not the node's kernel is realtime enabled.", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tn := env.Nodes[cut.NodeName]", + "\t\tif !n.IsRTKernel() {", + "\t\t\tcheck.LogInfo(\"Container is not running on a realtime kernel enabled node\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not running on a realtime kernel enabled node\", true))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif !isContainerCapabilitySet(cut.SecurityContext.Capabilities, \"SYS_NICE\") {", + "\t\t\tcheck.LogError(\"Container %q has been found running on a realtime kernel enabled node without SYS_NICE capability.\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is running on a realtime kernel enabled node without SYS_NICE capability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container is running on a realtime kernel enabled node with the SYS_NICE capability\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is running on a realtime kernel enabled node with the SYS_NICE capability\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetSharedProcessNamespacePodsSkipFn", + "kind": "function", + "source": [ + "func GetSharedProcessNamespacePodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetShareProcessNamespacePods()) == 0 {", + "\t\t\treturn true, \"Shared process namespace pods found.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testSysPtraceCapability", + "kind": "function", + "source": [ + "func testSysPtraceCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.GetShareProcessNamespacePods() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tsysPtraceEnabled := false", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tif cut.SecurityContext == nil ||", + "\t\t\t\tcut.SecurityContext.Capabilities == nil ||", + "\t\t\t\tlen(cut.SecurityContext.Capabilities.Add) == 0 {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif stringhelper.StringInSlice(cut.SecurityContext.Capabilities.Add, \"SYS_PTRACE\", false) {", + "\t\t\t\tcheck.LogInfo(\"Container %q defines the SYS_PTRACE capability\", cut)", + "\t\t\t\tsysPtraceEnabled = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !sysPtraceEnabled {", + "\t\t\tcheck.LogError(\"Pod %q has process namespace sharing enabled but no container allowing the SYS_PTRACE capability.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has process namespace sharing enabled but no container allowing the SYS_PTRACE capability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has process namespace sharing enabled and at least one container allowing the SYS_PTRACE capability\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has process namespace sharing enabled and at least one container allowing the SYS_PTRACE capability\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testNamespaceResourceQuota", + "kind": "function", + "source": [ + "func testNamespaceResourceQuota(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\t// Look through all of the pods and compare their namespace to any potential", + "\t\t// resource quotas", + "\t\tfoundPodNamespaceRQ := false", + "\t\tfor index := range env.ResourceQuotas {", + "\t\t\t// We are just checking for the existence of the resource quota as of right now.", + "\t\t\t// Read more about the resource quota object here:", + "\t\t\t// https://kubernetes.io/docs/concepts/policy/resource-quotas/", + "\t\t\tif put.Namespace == env.ResourceQuotas[index].Namespace {", + "\t\t\t\tfoundPodNamespaceRQ = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !foundPodNamespaceRQ {", + "\t\t\tcheck.LogError(\"Pod %q is running in a namespace that does not have a ResourceQuota applied.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is running in a namespace that does not have a ResourceQuota applied\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is running in a namespace that has a ResourceQuota applied.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is running in a namespace that has a ResourceQuota applied\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testNoSSHDaemonsAllowed", + "kind": "function", + "source": [ + "func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tcut := put.Containers[0]", + "", + "\t\t// 1. Find SSH port", + "\t\tport, err := netutil.GetSSHDaemonPort(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get ssh daemon port on %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the ssh port for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif port == \"\" {", + "\t\t\tcheck.LogInfo(\"Pod %q is not running an SSH daemon\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not running an SSH daemon\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tsshServicePortNumber, err := strconv.ParseInt(port, 10, 32)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not convert port %q from string to integer on Container %q\", port, cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the listening ports for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// 2. Check if SSH port is listening", + "\t\tsshPortInfo := netutil.PortInfo{PortNumber: int32(sshServicePortNumber), Protocol: sshServicePortProtocol}", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get the listening ports for Pod %q, err: %v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the listening ports for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif _, ok := listeningPorts[sshPortInfo]; ok {", + "\t\t\tcheck.LogError(\"Pod %q is running an SSH daemon\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is running an SSH daemon\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is not running an SSH daemon\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not running an SSH daemon\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodRequests", + "kind": "function", + "source": [ + "func testPodRequests(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\t// Loop through the containers, looking for containers that are missing requests.", + "\t// These need to be defined in order to pass.", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif !resources.HasRequestsSet(cut, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Container %q is missing resource requests\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is missing resource requests\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has resource requests\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has resource requests\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "test1337UIDs", + "kind": "function", + "source": [ + "func test1337UIDs(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Note this test is only ran as part of the 'extended' test suite.", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tconst leetNum = 1337", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.IsRunAsUserID(leetNum) {", + "\t\t\tcheck.LogError(\"Pod %q is using securityContext RunAsUser 1337\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is using securityContext RunAsUser 1337\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is not using securityContext RunAsUser 1337\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not using securityContext RunAsUser 1337\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoServicesUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoServicesUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Services) == 0 {", + "\t\t\treturn true, \"no services to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testNodePort", + "kind": "function", + "source": [ + "func testNodePort(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, s := range env.Services {", + "\t\tcheck.LogInfo(\"Testing %q\", services.ToString(s))", + "", + "\t\tif s.Spec.Type == nodePort {", + "\t\t\tcheck.LogError(\"Service %q (ns %q) type is nodePort\", s.Name, s.Namespace)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Service is type NodePort\", testhelper.ServiceType, false).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceMode, string(s.Spec.Type)))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Service %q (ns %q) type is not nodePort (type=%q)\", s.Name, s.Namespace, s.Spec.Type)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"Service is not type NodePort\", testhelper.ServiceType, true).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceMode, string(s.Spec.Type)))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoCrdsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoCrdsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Crds) == 0 {", + "\t\t\treturn true, \"no roles to check\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoNamespacesSkipFn", + "kind": "function", + "source": [ + "func GetNoNamespacesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Namespaces) == 0 {", + "\t\t\treturn true, \"There are no namespaces to check. Please check config.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoRolesSkipFn", + "kind": "function", + "source": [ + "func GetNoRolesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Roles) == 0 {", + "\t\t\treturn true, \"There are no roles to check. Please check config.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testCrdRoles", + "kind": "function", + "source": [ + "func testCrdRoles(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcrdResources := rbac.GetCrdResources(env.Crds)", + "\tfor roleIndex := range env.Roles {", + "\t\tif !stringhelper.StringInSlice[string](env.Namespaces, env.Roles[roleIndex].Namespace, false) {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tallRules := rbac.GetAllRules(\u0026env.Roles[roleIndex])", + "", + "\t\tmatchingRules, nonMatchingRules := rbac.FilterRulesNonMatchingResources(allRules, crdResources)", + "\t\tif len(matchingRules) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor _, aRule := range matchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) applies to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"This applies to CRDs under test\", testhelper.RoleRuleType, true, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "\t\tfor _, aRule := range nonMatchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) does not apply to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"This rule does not apply to CRDs under test\", testhelper.RoleRuleType, false, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "", + "\t\tif len(nonMatchingRules) == 0 {", + "\t\t\tcheck.LogInfo(\"Role %q rules only apply to CRDs under test\", env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules only apply to CRDs under test\",", + "\t\t\t\ttesthelper.RoleType, true, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Role %q rules apply to a mix of CRDs under test and others.\", env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules apply to a mix of CRDs under test and others. See non compliant role rule objects.\",", + "\t\t\t\ttesthelper.RoleType, false, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadInternalChecksDB", + "kind": "function", + "source": [ + "func LoadInternalChecksDB() {", + "\taccesscontrol.LoadChecks()", + "\tcertification.LoadChecks()", + "\tlifecycle.LoadChecks()", + "\tmanageability.LoadChecks()", + "\tnetworking.LoadChecks()", + "\tobservability.LoadChecks()", + "\tperformance.LoadChecks()", + "\tplatform.LoadChecks()", + "\toperator.LoadChecks()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "name": "checkForbiddenCapability", + "qualifiedName": "checkForbiddenCapability", + "exported": false, + "signature": "func([]*provider.Container, string, *log.Logger)([]*testhelper.ReportObject)", + "doc": "checkForbiddenCapability determines if containers avoid a specific capability\n\nThe function iterates over each container, checking whether the specified\ncapability is present in its security context. Containers lacking the\ncapability are recorded as compliant; those with it are flagged\nnon‑compliant and an error logged. The results are returned as two slices\nof report objects for further processing.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:315", + "calls": [ + { + "name": "Info", + "kind": "function" + }, + { + "name": "isContainerCapabilitySet", + "kind": "function", + "source": [ + "func isContainerCapabilitySet(containerCapabilities *corev1.Capabilities, capability string) bool {", + "\tif containerCapabilities == nil {", + "\t\treturn false", + "\t}", + "", + "\tif len(containerCapabilities.Add) == 0 {", + "\t\treturn false", + "\t}", + "", + "\tif stringhelper.StringInSlice(containerCapabilities.Add, corev1.Capability(\"ALL\"), true) ||", + "\t\tstringhelper.StringInSlice(containerCapabilities.Add, corev1.Capability(capability), true) {", + "\t\treturn true", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testBpfCapability", + "kind": "function", + "source": [ + "func testBpfCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"BPF\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testIpcLockCapability", + "kind": "function", + "source": [ + "func testIpcLockCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"IPC_LOCK\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testNetAdminCapability", + "kind": "function", + "source": [ + "func testNetAdminCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"NET_ADMIN\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testNetRawCapability", + "kind": "function", + "source": [ + "func testNetRawCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"NET_RAW\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testSysAdminCapability", + "kind": "function", + "source": [ + "func testSysAdminCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"SYS_ADMIN\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func checkForbiddenCapability(containers []*provider.Container, capability string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, cut := range containers {", + "\t\tlogger.Info(\"Testing Container %q\", cut)", + "\t\tcompliant := true", + "", + "\t\tswitch {", + "\t\tcase cut.SecurityContext == nil:", + "\t\tcase cut.SecurityContext.Capabilities == nil:", + "\t\tcase isContainerCapabilitySet(cut.SecurityContext.Capabilities, capability):", + "\t\t\tcompliant = false", + "\t\t}", + "", + "\t\tif compliant {", + "\t\t\tlogger.Info(\"Container %q does not use non-compliant capability %q\", cut, capability)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"No forbidden capability \"+capability+\" detected in container\", true))", + "\t\t} else {", + "\t\t\tlogger.Error(\"Non compliant %q capability detected in container %q. All container caps: %q\", capability, cut, cut.SecurityContext.Capabilities)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Non compliant capability \"+capability+\" in container\", false).AddField(testhelper.SCCCapability, capability))", + "\t\t}", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "getNbOfProcessesInPidNamespace", + "qualifiedName": "getNbOfProcessesInPidNamespace", + "exported": false, + "signature": "func(clientsholder.Context, int, clientsholder.Command)(int, error)", + "doc": "getNbOfProcessesInPidNamespace determines the number of processes in a PID namespace\n\nThe function runs a container command that lists the PID namespace for a\ngiven process ID, then parses the output to count how many processes belong\nto that namespace. It returns the count as an integer and propagates any\nerrors from executing the command or parsing its result.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/pidshelper.go:35", + "calls": [ + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Fields", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testOneProcessPerContainer", + "kind": "function", + "source": [ + "func testOneProcessPerContainer(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t// the Istio sidecar container \"istio-proxy\" launches two processes: \"pilot-agent\" and \"envoy\"", + "\t\tif cut.IsIstioProxy() {", + "\t\t\tcheck.LogInfo(\"Skipping \\\"istio-proxy\\\" container\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Debug pod not found for node %q\", cut.NodeName)", + "\t\t\treturn", + "\t\t}", + "\t\tocpContext := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tpid, err := crclient.GetPidFromContainer(cut, ocpContext)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get PID for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tnbProcesses, err := getNbOfProcessesInPidNamespace(ocpContext, pid, clientsholder.GetClientsHolder())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get number of processes for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif nbProcesses \u003e 1 {", + "\t\t\tcheck.LogError(\"Container %q has more than one process running\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has more than one process running\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has only one process running\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has only one process running\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getNbOfProcessesInPidNamespace(ctx clientsholder.Context, targetPid int, ch clientsholder.Command) (int, error) {", + "\tcmd := \"lsns -p \" + strconv.Itoa(targetPid) + \" -t pid -n\"", + "", + "\toutStr, errStr, err := ch.ExecCommandContainer(ctx, cmd)", + "\tif err != nil {", + "\t\treturn 0, fmt.Errorf(\"can not execute command: \\\" %s \\\", err:%s\", cmd, err)", + "\t}", + "\tif errStr != \"\" {", + "\t\treturn 0, fmt.Errorf(\"cmd: \\\" %s \\\" returned %s\", cmd, errStr)", + "\t}", + "", + "\tretValues := strings.Fields(outStr)", + "\tif len(retValues) \u003c= nbProcessesIndex {", + "\t\treturn 0, fmt.Errorf(\"cmd: \\\" %s \\\" returned an invalid value %s\", cmd, outStr)", + "\t}", + "\treturn strconv.Atoi(retValues[nbProcessesIndex])", + "}" + ] + }, + { + "name": "isCSVAndClusterWide", + "qualifiedName": "isCSVAndClusterWide", + "exported": false, + "signature": "func(string, string, *provider.TestEnvironment)(bool)", + "doc": "isCSVAndClusterWide determines if a CSV belongs to a cluster‑wide operator\n\nThe function inspects all operators in the test environment, checking whether\nany have a CSV matching the given namespace and name. If the CSV is found and\nits operator is marked as cluster wide or supports all namespaces, it returns\ntrue; otherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:832", + "calls": [ + { + "name": "isInstallModeMultiNamespace", + "kind": "function", + "source": [ + "func isInstallModeMultiNamespace(installModes []v1alpha1.InstallMode) bool {", + "\tfor i := 0; i \u003c len(installModes); i++ {", + "\t\tif installModes[i].Type == v1alpha1.InstallModeTypeAllNamespaces {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "ownedByClusterWideOperator", + "kind": "function", + "source": [ + "func ownedByClusterWideOperator(topOwners map[string]podhelper.TopOwner, env *provider.TestEnvironment) (aNamespace, name string, found bool) {", + "\tfor _, owner := range topOwners {", + "\t\tif isCSVAndClusterWide(owner.Namespace, owner.Name, env) {", + "\t\t\treturn owner.Namespace, owner.Name, true", + "\t\t}", + "\t}", + "\treturn \"\", \"\", false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isCSVAndClusterWide(aNamespace, name string, env *provider.TestEnvironment) bool {", + "\tfor _, op := range env.Operators {", + "\t\tif op.Csv != nil \u0026\u0026", + "\t\t\top.Csv.Namespace == aNamespace \u0026\u0026", + "\t\t\top.Csv.Name == name \u0026\u0026", + "\t\t\t(op.IsClusterWide || isInstallModeMultiNamespace(op.Csv.Spec.InstallModes)) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "isContainerCapabilitySet", + "qualifiedName": "isContainerCapabilitySet", + "exported": false, + "signature": "func(*corev1.Capabilities, string)(bool)", + "doc": "isContainerCapabilitySet checks if a capability is explicitly added to a container\n\nThe function receives a pointer to a capabilities structure and a capability\nname. It returns true when the Add list contains either the specified\ncapability or the special ALL value, indicating that the capability has been\ngranted. If the capabilities object is nil or its Add list is empty, it\nreturns false.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:291", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "k8s.io/api/core/v1", + "name": "Capability", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "k8s.io/api/core/v1", + "name": "Capability", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "checkForbiddenCapability", + "kind": "function", + "source": [ + "func checkForbiddenCapability(containers []*provider.Container, capability string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, cut := range containers {", + "\t\tlogger.Info(\"Testing Container %q\", cut)", + "\t\tcompliant := true", + "", + "\t\tswitch {", + "\t\tcase cut.SecurityContext == nil:", + "\t\tcase cut.SecurityContext.Capabilities == nil:", + "\t\tcase isContainerCapabilitySet(cut.SecurityContext.Capabilities, capability):", + "\t\t\tcompliant = false", + "\t\t}", + "", + "\t\tif compliant {", + "\t\t\tlogger.Info(\"Container %q does not use non-compliant capability %q\", cut, capability)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"No forbidden capability \"+capability+\" detected in container\", true))", + "\t\t} else {", + "\t\t\tlogger.Error(\"Non compliant %q capability detected in container %q. All container caps: %q\", capability, cut, cut.SecurityContext.Capabilities)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Non compliant capability \"+capability+\" in container\", false).AddField(testhelper.SCCCapability, capability))", + "\t\t}", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testSYSNiceRealtimeCapability", + "kind": "function", + "source": [ + "func testSYSNiceRealtimeCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through all of the labeled containers and compare their security context capabilities and whether", + "\t// or not the node's kernel is realtime enabled.", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tn := env.Nodes[cut.NodeName]", + "\t\tif !n.IsRTKernel() {", + "\t\t\tcheck.LogInfo(\"Container is not running on a realtime kernel enabled node\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not running on a realtime kernel enabled node\", true))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif !isContainerCapabilitySet(cut.SecurityContext.Capabilities, \"SYS_NICE\") {", + "\t\t\tcheck.LogError(\"Container %q has been found running on a realtime kernel enabled node without SYS_NICE capability.\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is running on a realtime kernel enabled node without SYS_NICE capability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container is running on a realtime kernel enabled node with the SYS_NICE capability\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is running on a realtime kernel enabled node with the SYS_NICE capability\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isContainerCapabilitySet(containerCapabilities *corev1.Capabilities, capability string) bool {", + "\tif containerCapabilities == nil {", + "\t\treturn false", + "\t}", + "", + "\tif len(containerCapabilities.Add) == 0 {", + "\t\treturn false", + "\t}", + "", + "\tif stringhelper.StringInSlice(containerCapabilities.Add, corev1.Capability(\"ALL\"), true) ||", + "\t\tstringhelper.StringInSlice(containerCapabilities.Add, corev1.Capability(capability), true) {", + "\t\treturn true", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "isInstallModeMultiNamespace", + "qualifiedName": "isInstallModeMultiNamespace", + "exported": false, + "signature": "func([]v1alpha1.InstallMode)(bool)", + "doc": "isInstallModeMultiNamespace determines whether a CSV install mode includes all namespaces\n\nThe function iterates over the provided slice of install modes and returns\ntrue if any entry indicates an all‑namespace deployment. If none match, it\nreturns false. This result is used to identify cluster‑wide operators in\nsubsequent logic.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:850", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "isCSVAndClusterWide", + "kind": "function", + "source": [ + "func isCSVAndClusterWide(aNamespace, name string, env *provider.TestEnvironment) bool {", + "\tfor _, op := range env.Operators {", + "\t\tif op.Csv != nil \u0026\u0026", + "\t\t\top.Csv.Namespace == aNamespace \u0026\u0026", + "\t\t\top.Csv.Name == name \u0026\u0026", + "\t\t\t(op.IsClusterWide || isInstallModeMultiNamespace(op.Csv.Spec.InstallModes)) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isInstallModeMultiNamespace(installModes []v1alpha1.InstallMode) bool {", + "\tfor i := 0; i \u003c len(installModes); i++ {", + "\t\tif installModes[i].Type == v1alpha1.InstallModeTypeAllNamespaces {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "ownedByClusterWideOperator", + "qualifiedName": "ownedByClusterWideOperator", + "exported": false, + "signature": "func(map[string]podhelper.TopOwner, *provider.TestEnvironment)(string, bool)", + "doc": "ownedByClusterWideOperator Determines if any top owner is a cluster‑wide CSV\n\nThe function examines each top owner of an object, checking whether the owner\nis a ClusterServiceVersion that is installed by a cluster‑wide operator. It\nreturns the namespace and name of the matching CSV along with a boolean flag\nindicating a match. If no such owner exists, empty strings and false are\nreturned.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:866", + "calls": [ + { + "name": "isCSVAndClusterWide", + "kind": "function", + "source": [ + "func isCSVAndClusterWide(aNamespace, name string, env *provider.TestEnvironment) bool {", + "\tfor _, op := range env.Operators {", + "\t\tif op.Csv != nil \u0026\u0026", + "\t\t\top.Csv.Namespace == aNamespace \u0026\u0026", + "\t\t\top.Csv.Name == name \u0026\u0026", + "\t\t\t(op.IsClusterWide || isInstallModeMultiNamespace(op.Csv.Spec.InstallModes)) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testPodClusterRoleBindings", + "kind": "function", + "source": [ + "func testPodClusterRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tresult, roleRefName, err := put.IsUsingClusterRoleBinding(env.ClusterRoleBindings, check.GetLogger())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to determine if Pod %q is using a cluster role binding, err=%v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf(\"failed to determine if pod is using a cluster role binding: %v\", err), false).", + "\t\t\t\tAddField(testhelper.ClusterRoleName, roleRefName))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\ttopOwners, err := put.GetTopOwner()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get top owners of Pod %q, err=%v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf(\"Error getting top owners of this pod, err=%s\", err), false).", + "\t\t\t\tAddField(testhelper.ClusterRoleName, roleRefName))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcsvNamespace, csvName, isOwnedByClusterWideOperator := ownedByClusterWideOperator(topOwners, env)", + "\t\t// Pod is using a cluster role binding but is owned by a cluster wide operator, so it is ok", + "\t\tif isOwnedByClusterWideOperator \u0026\u0026 result {", + "\t\t\tcheck.LogInfo(\"Pod %q is using a cluster role binding but is owned by a cluster-wide operator (Csv %q, namespace %q)\", put, csvName, csvNamespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is using a cluster role binding but owned by a cluster-wide operator\", true))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif result {", + "\t\t\t// Pod was found to be using a cluster role binding. This is not allowed.", + "\t\t\t// Flagging this pod as a failed pod.", + "\t\t\tcheck.LogError(\"Pod %q is using a cluster role binding (roleRefName=%q)\", put, roleRefName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is using a cluster role binding\", false).", + "\t\t\t\tAddField(testhelper.ClusterRoleName, roleRefName))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcheck.LogInfo(\"Pod %q is not using a cluster role binding\", put)", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not using a cluster role binding\", true))", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ownedByClusterWideOperator(topOwners map[string]podhelper.TopOwner, env *provider.TestEnvironment) (aNamespace, name string, found bool) {", + "\tfor _, owner := range topOwners {", + "\t\tif isCSVAndClusterWide(owner.Namespace, owner.Name, env) {", + "\t\t\treturn owner.Namespace, owner.Name, true", + "\t\t}", + "\t}", + "\treturn \"\", \"\", false", + "}" + ] + }, + { + "name": "test1337UIDs", + "qualifiedName": "test1337UIDs", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "test1337UIDs Checks whether pods run with UID 1337\n\nThe function iterates over all pods in the test environment, logging each\npod’s status. It records a non‑compliant report object for any pod whose\nsecurityContext RunAsUser is set to 1337 and a compliant one otherwise.\nFinally, it sets the check result using these lists.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:1163", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsRunAsUserID", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func test1337UIDs(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Note this test is only ran as part of the 'extended' test suite.", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tconst leetNum = 1337", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.IsRunAsUserID(leetNum) {", + "\t\t\tcheck.LogError(\"Pod %q is using securityContext RunAsUser 1337\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is using securityContext RunAsUser 1337\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is not using securityContext RunAsUser 1337\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not using securityContext RunAsUser 1337\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testAutomountServiceToken", + "qualifiedName": "testAutomountServiceToken", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testAutomountServiceToken Verifies pod service account usage and automount token settings\n\nThe function iterates over all pods in the test environment, checking whether\na pod uses the default service account name and evaluating its\nautomountServiceAccountToken configuration via an external helper. It records\nnon-compliant objects when defaults are used or tokens are set to true, and\ncompliant objects otherwise. Results are stored in the check’s report for\nlater reporting.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:883", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/common/rbac", + "name": "EvaluateAutomountTokens", + "kind": "function", + "source": [ + "func EvaluateAutomountTokens(client corev1typed.CoreV1Interface, put *provider.Pod) (bool, string) {", + "\t// The token can be specified in the pod directly", + "\t// or it can be specified in the service account of the pod", + "\t// if no service account is configured, then the pod will use the configuration", + "\t// of the default service account in that namespace", + "\t// the token defined in the pod has takes precedence", + "\t// the test would pass iif token is explicitly set to false", + "\t// if the token is set to true in the pod, the test would fail right away", + "\tif put.Spec.AutomountServiceAccountToken != nil \u0026\u0026 *put.Spec.AutomountServiceAccountToken {", + "\t\treturn false, fmt.Sprintf(\"Pod %s:%s is configured with automountServiceAccountToken set to true\", put.Namespace, put.Name)", + "\t}", + "", + "\t// Collect information about the service account attached to the pod.", + "\tsaAutomountServiceAccountToken, err := put.IsAutomountServiceAccountSetOnSA()", + "\tif err != nil {", + "\t\treturn false, err.Error()", + "\t}", + "", + "\t// The pod token is false means the pod is configured properly", + "\t// The pod is not configured and the service account is configured with false means", + "\t// the pod will inherit the behavior `false` and the test would pass", + "\tif (put.Spec.AutomountServiceAccountToken != nil \u0026\u0026 !*put.Spec.AutomountServiceAccountToken) || (saAutomountServiceAccountToken != nil \u0026\u0026 !*saAutomountServiceAccountToken) {", + "\t\treturn true, \"\"", + "\t}", + "", + "\t// the service account is configured with true means all the pods", + "\t// using this service account are not configured properly, register the error", + "\t// message and fail", + "\tif saAutomountServiceAccountToken != nil \u0026\u0026 *saAutomountServiceAccountToken {", + "\t\treturn false, fmt.Sprintf(\"serviceaccount %s:%s is configured with automountServiceAccountToken set to true, impacting pod %s\", put.Namespace, put.Spec.ServiceAccountName, put.Name)", + "\t}", + "", + "\t// the token should be set explicitly to false, otherwise, it's a failure", + "\t// register the error message and check the next pod", + "\tif saAutomountServiceAccountToken == nil {", + "\t\treturn false, fmt.Sprintf(\"serviceaccount %s:%s is not configured with automountServiceAccountToken set to false, impacting pod %s\", put.Namespace, put.Spec.ServiceAccountName, put.Name)", + "\t}", + "", + "\treturn true, \"\" // Pod has passed all checks", + "}" + ] + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testAutomountServiceToken(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.ServiceAccountName == defaultServiceAccount {", + "\t\t\tcheck.LogError(\"Pod %q uses the default service account name.\", put.Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found with default service account name\", false))", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Evaluate the pod's automount service tokens and any attached service accounts", + "\t\tclient := clientsholder.GetClientsHolder()", + "\t\tpodPassed, newMsg := rbac.EvaluateAutomountTokens(client.K8sClient.CoreV1(), put)", + "\t\tif !podPassed {", + "\t\t\tcheck.LogError(\"%s\", newMsg)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, newMsg, false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q does not have automount service tokens set to true\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod does not have automount service tokens set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testBpfCapability", + "qualifiedName": "testBpfCapability", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testBpfCapability Verifies that containers do not use the BPF kernel capability\n\nThe function iterates over all test containers, checking whether the \"BPF\"\ncapability is present in their security context. It records compliant\ncontainers where the capability is absent and non‑compliant ones where it\nappears, attaching appropriate log messages. Finally, it stores the results\nwithin the provided check object for reporting.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:393", + "calls": [ + { + "name": "checkForbiddenCapability", + "kind": "function", + "source": [ + "func checkForbiddenCapability(containers []*provider.Container, capability string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, cut := range containers {", + "\t\tlogger.Info(\"Testing Container %q\", cut)", + "\t\tcompliant := true", + "", + "\t\tswitch {", + "\t\tcase cut.SecurityContext == nil:", + "\t\tcase cut.SecurityContext.Capabilities == nil:", + "\t\tcase isContainerCapabilitySet(cut.SecurityContext.Capabilities, capability):", + "\t\t\tcompliant = false", + "\t\t}", + "", + "\t\tif compliant {", + "\t\t\tlogger.Info(\"Container %q does not use non-compliant capability %q\", cut, capability)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"No forbidden capability \"+capability+\" detected in container\", true))", + "\t\t} else {", + "\t\t\tlogger.Error(\"Non compliant %q capability detected in container %q. All container caps: %q\", capability, cut, cut.SecurityContext.Capabilities)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Non compliant capability \"+capability+\" in container\", false).AddField(testhelper.SCCCapability, capability))", + "\t\t}", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testBpfCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"BPF\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testContainerHostPort", + "qualifiedName": "testContainerHostPort", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testContainerHostPort Verifies that containers do not expose host ports\n\nThe function iterates over all containers in the test environment, checking\neach declared port for a non‑zero HostPort value. If such a port is found,\nit records a non‑compliant report object with details of the offending\ncontainer and port number; otherwise it logs compliance and records a\ncompliant object. Finally, it sets the check result with the collected\nreports.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:496", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "int", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainerHostPort(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\thostPortFound := false", + "\t\tfor _, aPort := range cut.Ports {", + "\t\t\tif aPort.HostPort != 0 {", + "\t\t\t\tcheck.LogError(\"Host port %d is configured in Container %q.\", aPort.HostPort, cut)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Host port is configured\", false).", + "\t\t\t\t\tSetType(testhelper.HostPortType).", + "\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(aPort.HostPort))))", + "\t\t\t\thostPortFound = true", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !hostPortFound {", + "\t\t\tcheck.LogInfo(\"Host port not configured in Container %q.\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Host port is not configured\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testContainerSCC", + "qualifiedName": "testContainerSCC", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testContainerSCC Determines container privilege levels based on SCC analysis\n\nThe function iterates over all pods in the test environment, applying a\nsecurity context check to each pod's containers. Containers are classified\ninto categories, with only those in the lowest privileged category considered\ncompliant. It records both compliant and non‑compliant containers, tracks\nthe highest privilege level found, and reports an overall CNF category before\nsetting the test result.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:1190", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer", + "name": "CheckPod", + "kind": "function", + "source": [ + "func CheckPod(pod *provider.Pod) []PodListCategory {", + "\tvar containerSCC ContainerSCC", + "\tcontainerSCC.HostIPC = NOK", + "\tif pod.Spec.HostIPC {", + "\t\tcontainerSCC.HostIPC = OK", + "\t}", + "\tcontainerSCC.HostNetwork = NOK", + "\tif pod.Spec.HostNetwork {", + "\t\tcontainerSCC.HostNetwork = OK", + "\t}", + "\tcontainerSCC.HostPID = NOK", + "\tif pod.Spec.HostPID {", + "\t\tcontainerSCC.HostPID = OK", + "\t}", + "\tcontainerSCC.SeLinuxContextPresent = NOK", + "\tif pod.Spec.SecurityContext != nil \u0026\u0026 pod.Spec.SecurityContext.SELinuxOptions != nil {", + "\t\tcontainerSCC.SeLinuxContextPresent = OK", + "\t}", + "\tcontainerSCC.AllVolumeAllowed, containerSCC.HostDirVolumePluginPresent = AllVolumeAllowed(pod.Spec.Volumes)", + "\tif pod.Spec.SecurityContext != nil \u0026\u0026 pod.Spec.SecurityContext.RunAsUser != nil {", + "\t\tcontainerSCC.RunAsUserPresent = OK", + "\t} else {", + "\t\tcontainerSCC.RunAsUserPresent = NOK", + "\t}", + "\tif pod.Spec.SecurityContext != nil \u0026\u0026 pod.Spec.SecurityContext.FSGroup != nil {", + "\t\tcontainerSCC.FsGroupPresent = OK", + "\t} else {", + "\t\tcontainerSCC.FsGroupPresent = NOK", + "\t}", + "\treturn checkContainerCategory(pod.Spec.Containers, containerSCC, pod.Name, pod.Namespace)", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainerSCC(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\thighLevelCat := securitycontextcontainer.CategoryID1", + "\tfor _, pod := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", pod)", + "\t\tlistCategory := securitycontextcontainer.CheckPod(pod)", + "\t\tfor _, cat := range listCategory {", + "\t\t\tif cat.Category \u003e securitycontextcontainer.CategoryID1NoUID0 {", + "\t\t\t\tcheck.LogError(\"Category %q is NOT category 1 or category NoUID0\", cat)", + "\t\t\t\taContainerOut := testhelper.NewContainerReportObject(cat.NameSpace, cat.Podname, cat.Containername, \"container category is NOT category 1 or category NoUID0\", false).", + "\t\t\t\t\tSetType(testhelper.ContainerCategory).", + "\t\t\t\t\tAddField(testhelper.Category, cat.Category.String())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, aContainerOut)", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Category %q is category 1 or category NoUID0\", cat)", + "\t\t\t\taContainerOut := testhelper.NewContainerReportObject(cat.NameSpace, cat.Podname, cat.Containername, \"container category is category 1 or category NoUID0\", true).", + "\t\t\t\t\tSetType(testhelper.ContainerCategory).", + "\t\t\t\t\tAddField(testhelper.Category, cat.Category.String())", + "\t\t\t\tcompliantObjects = append(compliantObjects, aContainerOut)", + "\t\t\t}", + "\t\t\tif cat.Category \u003e highLevelCat {", + "\t\t\t\thighLevelCat = cat.Category", + "\t\t\t}", + "\t\t}", + "\t}", + "\taCNFOut := testhelper.NewReportObject(\"Overall CNF category\", testhelper.CnfType, false).AddField(testhelper.Category, highLevelCat.String())", + "\tcompliantObjects = append(compliantObjects, aCNFOut)", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testCrdRoles", + "qualifiedName": "testCrdRoles", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testCrdRoles Evaluates whether role rules target only CRDs under test\n\nThe function iterates over all roles in the environment, filtering by\nnamespace, then extracts each role's API rules. It compares these rules\nagainst the list of CRD resources under test to separate matching and\nnon‑matching rules. For each rule it records a report object indicating\ncompliance, and if any role contains mixed rule types it logs an error and\nmarks the role as non‑compliant.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:1258", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/common/rbac", + "name": "GetCrdResources", + "kind": "function", + "source": [ + "func GetCrdResources(crds []*apiextv1.CustomResourceDefinition) (resourceList []CrdResource) {", + "\tfor _, crd := range crds {", + "\t\tvar aResource CrdResource", + "\t\taResource.Group = crd.Spec.Group", + "\t\taResource.SingularName = crd.Spec.Names.Singular", + "\t\taResource.PluralName = crd.Spec.Names.Plural", + "\t\taResource.ShortNames = crd.Spec.Names.ShortNames", + "\t\tresourceList = append(resourceList, aResource)", + "\t}", + "\treturn resourceList", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/common/rbac", + "name": "GetAllRules", + "kind": "function", + "source": [ + "func GetAllRules(aRole *rbacv1.Role) (ruleList []RoleRule) {", + "\tfor _, aRule := range aRole.Rules {", + "\t\tfor _, aGroup := range aRule.APIGroups {", + "\t\t\tfor _, aResource := range aRule.Resources {", + "\t\t\t\tfor _, aVerb := range aRule.Verbs {", + "\t\t\t\t\tvar aRoleRule RoleRule", + "\t\t\t\t\taRoleRule.Resource.Group = aGroup", + "\t\t\t\t\taRoleRule.Resource.Name = aResource", + "\t\t\t\t\taRoleRule.Verb = aVerb", + "\t\t\t\t\truleList = append(ruleList, aRoleRule)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn ruleList", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/common/rbac", + "name": "FilterRulesNonMatchingResources", + "kind": "function", + "source": [ + "func FilterRulesNonMatchingResources(ruleList []RoleRule, resourceList []CrdResource) (matching, nonMatching []RoleRule) {", + "\tfor _, aRule := range ruleList {", + "\t\tfor _, aResource := range resourceList {", + "\t\t\tif isResourceInRoleRule(aResource, aRule) {", + "\t\t\t\tmatching = append(matching, aRule)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tnonMatching = SliceDifference(ruleList, matching)", + "\treturn matching, nonMatching", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace)", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedNamedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedNamedReportObject(aReason, aType string, isCompliant bool, aNamespace, aName string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace).AddField(Name, aName)", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedNamedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedNamedReportObject(aReason, aType string, isCompliant bool, aNamespace, aName string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace).AddField(Name, aName)", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testCrdRoles(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcrdResources := rbac.GetCrdResources(env.Crds)", + "\tfor roleIndex := range env.Roles {", + "\t\tif !stringhelper.StringInSlice[string](env.Namespaces, env.Roles[roleIndex].Namespace, false) {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tallRules := rbac.GetAllRules(\u0026env.Roles[roleIndex])", + "", + "\t\tmatchingRules, nonMatchingRules := rbac.FilterRulesNonMatchingResources(allRules, crdResources)", + "\t\tif len(matchingRules) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor _, aRule := range matchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) applies to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"This applies to CRDs under test\", testhelper.RoleRuleType, true, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "\t\tfor _, aRule := range nonMatchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) does not apply to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"This rule does not apply to CRDs under test\", testhelper.RoleRuleType, false, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "", + "\t\tif len(nonMatchingRules) == 0 {", + "\t\t\tcheck.LogInfo(\"Role %q rules only apply to CRDs under test\", env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules only apply to CRDs under test\",", + "\t\t\t\ttesthelper.RoleType, true, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Role %q rules apply to a mix of CRDs under test and others.\", env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules apply to a mix of CRDs under test and others. See non compliant role rule objects.\",", + "\t\t\t\ttesthelper.RoleType, false, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testIpcLockCapability", + "qualifiedName": "testIpcLockCapability", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testIpcLockCapability Verifies containers lack the IPC_LOCK capability\n\nThis function inspects each container in the test environment, checking\nwhether the IPC_LOCK capability is present in its security context.\nContainers without the capability are marked compliant; those with it are\nflagged non‑compliant and reported accordingly. The results are stored back\ninto the check object for later aggregation.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:381", + "calls": [ + { + "name": "checkForbiddenCapability", + "kind": "function", + "source": [ + "func checkForbiddenCapability(containers []*provider.Container, capability string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, cut := range containers {", + "\t\tlogger.Info(\"Testing Container %q\", cut)", + "\t\tcompliant := true", + "", + "\t\tswitch {", + "\t\tcase cut.SecurityContext == nil:", + "\t\tcase cut.SecurityContext.Capabilities == nil:", + "\t\tcase isContainerCapabilitySet(cut.SecurityContext.Capabilities, capability):", + "\t\t\tcompliant = false", + "\t\t}", + "", + "\t\tif compliant {", + "\t\t\tlogger.Info(\"Container %q does not use non-compliant capability %q\", cut, capability)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"No forbidden capability \"+capability+\" detected in container\", true))", + "\t\t} else {", + "\t\t\tlogger.Error(\"Non compliant %q capability detected in container %q. All container caps: %q\", capability, cut, cut.SecurityContext.Capabilities)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Non compliant capability \"+capability+\" in container\", false).AddField(testhelper.SCCCapability, capability))", + "\t\t}", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testIpcLockCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"IPC_LOCK\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testNamespace", + "qualifiedName": "testNamespace", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testNamespace Checks namespace names for disallowed prefixes and validates CR placement\n\nThe function iterates over all namespaces in the test environment, logging\neach one. For every namespace it verifies that none of the predefined invalid\nprefixes are present; if an invalid prefix is found a non‑compliant report\nobject is created. After checking prefixes, it calls helper routines to\nexamine whether custom resources exist in unauthorized namespaces and records\nany failures as non‑compliant objects. Finally, results for both compliant\nand non‑compliant cases are set on the check.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:633", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "HasPrefix", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace)", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/namespace", + "name": "TestCrsNamespaces", + "kind": "function", + "source": [ + "func TestCrsNamespaces(crds []*apiextv1.CustomResourceDefinition, configNamespaces []string, logger *log.Logger) (invalidCrs map[string]map[string][]string, err error) {", + "\t// Initialize the top level map", + "\tinvalidCrs = make(map[string]map[string][]string)", + "\tfor _, crd := range crds {", + "\t\tcrNamespaces, err := getCrsPerNamespaces(crd)", + "\t\tif err != nil {", + "\t\t\treturn invalidCrs, fmt.Errorf(\"failed to get CRs for CRD %s - Error: %v\", crd.Name, err)", + "\t\t}", + "\t\tfor namespace, crNames := range crNamespaces {", + "\t\t\tif !stringhelper.StringInSlice(configNamespaces, namespace, false) {", + "\t\t\t\tlogger.Error(\"CRD: %q (kind:%q/ plural:%q) has CRs %v deployed in namespace %q not in configured namespaces %v\",", + "\t\t\t\t\tcrd.Name, crd.Spec.Names.Kind, crd.Spec.Names.Plural, crNames, namespace, configNamespaces)", + "\t\t\t\t// Initialize this map dimension before use", + "\t\t\t\tif invalidCrs[crd.Name] == nil {", + "\t\t\t\t\tinvalidCrs[crd.Name] = make(map[string][]string)", + "\t\t\t\t}", + "\t\t\t\tinvalidCrs[crd.Name][namespace] = append(invalidCrs[crd.Name][namespace], crNames...)", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn invalidCrs, nil", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/namespace", + "name": "GetInvalidCRsNum", + "kind": "function", + "source": [ + "func GetInvalidCRsNum(invalidCrs map[string]map[string][]string, logger *log.Logger) int {", + "\tvar invalidCrsNum int", + "\tfor crdName, namespaces := range invalidCrs {", + "\t\tfor namespace, crNames := range namespaces {", + "\t\t\tfor _, crName := range crNames {", + "\t\t\t\tlogger.Error(\"crName=%q namespace=%q is invalid (crd=%q)\", crName, namespace, crdName)", + "\t\t\t\tinvalidCrsNum++", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn invalidCrsNum", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testNamespace(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, namespace := range env.Namespaces {", + "\t\tcheck.LogInfo(\"Testing namespace %q\", namespace)", + "\t\tnamespaceCompliant := true", + "\t\tfor _, invalidPrefix := range invalidNamespacePrefixes {", + "\t\t\tif strings.HasPrefix(namespace, invalidPrefix) {", + "\t\t\t\tcheck.LogError(\"Namespace %q has invalid prefix %q\", namespace, invalidPrefix)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"Namespace has invalid prefix\", testhelper.Namespace, false, namespace))", + "\t\t\t\tnamespaceCompliant = false", + "\t\t\t\tbreak // Break out of the loop if we find an invalid prefix", + "\t\t\t}", + "\t\t}", + "\t\tif namespaceCompliant {", + "\t\t\tcheck.LogInfo(\"Namespace %q has valid prefix\", namespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"Namespace has valid prefix\", testhelper.Namespace, true, namespace))", + "\t\t}", + "\t}", + "\tif failedNamespacesNum := len(nonCompliantObjects); failedNamespacesNum \u003e 0 {", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "", + "\tinvalidCrs, err := namespace.TestCrsNamespaces(env.Crds, env.Namespaces, check.GetLogger())", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error while testing CRs namespaces, err=%v\", err)", + "\t\treturn", + "\t}", + "", + "\tinvalidCrsNum := namespace.GetInvalidCRsNum(invalidCrs, check.GetLogger())", + "\tif invalidCrsNum \u003e 0 {", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"CRs are not in the configured namespaces\", testhelper.Namespace, false))", + "\t} else {", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"CRs are in the configured namespaces\", testhelper.Namespace, true))", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testNamespaceResourceQuota", + "qualifiedName": "testNamespaceResourceQuota", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testNamespaceResourceQuota evaluates pod placement against namespace resource quotas\n\nThe function iterates through all pods in the test environment, checking\nwhether each pod's namespace has an associated ResourceQuota object. If a\nmatching quota is found, the pod is marked compliant; otherwise it is\nrecorded as non‑compliant and logged with an error message. After\nprocessing all pods, the compliance results are stored via the check’s\nSetResult method.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:1037", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testNamespaceResourceQuota(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\t// Look through all of the pods and compare their namespace to any potential", + "\t\t// resource quotas", + "\t\tfoundPodNamespaceRQ := false", + "\t\tfor index := range env.ResourceQuotas {", + "\t\t\t// We are just checking for the existence of the resource quota as of right now.", + "\t\t\t// Read more about the resource quota object here:", + "\t\t\t// https://kubernetes.io/docs/concepts/policy/resource-quotas/", + "\t\t\tif put.Namespace == env.ResourceQuotas[index].Namespace {", + "\t\t\t\tfoundPodNamespaceRQ = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !foundPodNamespaceRQ {", + "\t\t\tcheck.LogError(\"Pod %q is running in a namespace that does not have a ResourceQuota applied.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is running in a namespace that does not have a ResourceQuota applied\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is running in a namespace that has a ResourceQuota applied.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is running in a namespace that has a ResourceQuota applied\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testNetAdminCapability", + "qualifiedName": "testNetAdminCapability", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testNetAdminCapability Verifies that containers do not use the NET_ADMIN capability\n\nThe function queries all test containers for the presence of the NET_ADMIN\ncapability using a helper routine, collecting compliant and non‑compliant\nreports. It then records these results in the provided check object. This\nensures that network administration privileges are not granted to container\nprocesses.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:357", + "calls": [ + { + "name": "checkForbiddenCapability", + "kind": "function", + "source": [ + "func checkForbiddenCapability(containers []*provider.Container, capability string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, cut := range containers {", + "\t\tlogger.Info(\"Testing Container %q\", cut)", + "\t\tcompliant := true", + "", + "\t\tswitch {", + "\t\tcase cut.SecurityContext == nil:", + "\t\tcase cut.SecurityContext.Capabilities == nil:", + "\t\tcase isContainerCapabilitySet(cut.SecurityContext.Capabilities, capability):", + "\t\t\tcompliant = false", + "\t\t}", + "", + "\t\tif compliant {", + "\t\t\tlogger.Info(\"Container %q does not use non-compliant capability %q\", cut, capability)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"No forbidden capability \"+capability+\" detected in container\", true))", + "\t\t} else {", + "\t\t\tlogger.Error(\"Non compliant %q capability detected in container %q. All container caps: %q\", capability, cut, cut.SecurityContext.Capabilities)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Non compliant capability \"+capability+\" in container\", false).AddField(testhelper.SCCCapability, capability))", + "\t\t}", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testNetAdminCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"NET_ADMIN\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testNetRawCapability", + "qualifiedName": "testNetRawCapability", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testNetRawCapability Validates that containers do not use the NET_RAW capability\n\nThe function examines all container security contexts in the test environment\nand reports any instance where the NET_RAW capability is granted. It records\ncompliant and non‑compliant findings, attaching relevant details to each\nreport object. Finally, it stores the results in the provided check for later\naggregation.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:369", + "calls": [ + { + "name": "checkForbiddenCapability", + "kind": "function", + "source": [ + "func checkForbiddenCapability(containers []*provider.Container, capability string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, cut := range containers {", + "\t\tlogger.Info(\"Testing Container %q\", cut)", + "\t\tcompliant := true", + "", + "\t\tswitch {", + "\t\tcase cut.SecurityContext == nil:", + "\t\tcase cut.SecurityContext.Capabilities == nil:", + "\t\tcase isContainerCapabilitySet(cut.SecurityContext.Capabilities, capability):", + "\t\t\tcompliant = false", + "\t\t}", + "", + "\t\tif compliant {", + "\t\t\tlogger.Info(\"Container %q does not use non-compliant capability %q\", cut, capability)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"No forbidden capability \"+capability+\" detected in container\", true))", + "\t\t} else {", + "\t\t\tlogger.Error(\"Non compliant %q capability detected in container %q. All container caps: %q\", capability, cut, cut.SecurityContext.Capabilities)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Non compliant capability \"+capability+\" in container\", false).AddField(testhelper.SCCCapability, capability))", + "\t\t}", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testNetRawCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"NET_RAW\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testNoSSHDaemonsAllowed", + "qualifiedName": "testNoSSHDaemonsAllowed", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testNoSSHDaemonsAllowed Verifies pods do not run SSH daemons\n\nThe function iterates over each pod in the test environment, attempting to\nlocate an SSH listening port within its first container. If a port is found,\nit checks whether that port is actively listening; presence indicates a\nrunning SSH daemon and marks the pod non‑compliant. Pods without an SSH\nport or with no active listener are considered compliant. Results are\naggregated into report objects and set as the check outcome.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:1080", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil", + "name": "GetSSHDaemonPort", + "kind": "function", + "source": [ + "func GetSSHDaemonPort(cut *provider.Container) (string, error) {", + "\tconst findSSHDaemonPort = \"ss -tpln | grep sshd | head -1 | awk '{ print $4 }' | awk -F : '{ print $2 }'\"", + "\toutStr, errStr, err := crclient.ExecCommandContainerNSEnter(findSSHDaemonPort, cut)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn \"\", fmt.Errorf(\"failed to execute command %s on %s, err: %v\", findSSHDaemonPort, cut, err)", + "\t}", + "", + "\treturn strings.TrimSpace(outStr), nil", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "ParseInt", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "int32", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil", + "name": "GetListeningPorts", + "kind": "function", + "source": [ + "func GetListeningPorts(cut *provider.Container) (map[PortInfo]bool, error) {", + "\toutStr, errStr, err := crclient.ExecCommandContainerNSEnter(getListeningPortsCmd, cut)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"failed to execute command %s on %s, err: %v\", getListeningPortsCmd, cut, err)", + "\t}", + "", + "\treturn parseListeningPorts(outStr)", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tcut := put.Containers[0]", + "", + "\t\t// 1. Find SSH port", + "\t\tport, err := netutil.GetSSHDaemonPort(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get ssh daemon port on %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the ssh port for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif port == \"\" {", + "\t\t\tcheck.LogInfo(\"Pod %q is not running an SSH daemon\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not running an SSH daemon\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tsshServicePortNumber, err := strconv.ParseInt(port, 10, 32)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not convert port %q from string to integer on Container %q\", port, cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the listening ports for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// 2. Check if SSH port is listening", + "\t\tsshPortInfo := netutil.PortInfo{PortNumber: int32(sshServicePortNumber), Protocol: sshServicePortProtocol}", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get the listening ports for Pod %q, err: %v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the listening ports for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif _, ok := listeningPorts[sshPortInfo]; ok {", + "\t\t\tcheck.LogError(\"Pod %q is running an SSH daemon\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is running an SSH daemon\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is not running an SSH daemon\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not running an SSH daemon\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testNodePort", + "qualifiedName": "testNodePort", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testNodePort Checks services for disallowed nodePort usage\n\nThe function iterates over all services in the test environment, logging each\none. If a service is of type NodePort, it records a non‑compliant report\nobject and logs an error; otherwise it records a compliant report object.\nFinally, it sets the check result with the two lists.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:1227", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/services", + "name": "ToString", + "kind": "function", + "source": [ + "func ToString(aService *corev1.Service) (out string) {", + "\treturn fmt.Sprintf(\"Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v\", aService.Namespace,", + "\t\taService.Name,", + "\t\taService.Spec.ClusterIP,", + "\t\taService.Spec.ClusterIPs)", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "string", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "string", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testNodePort(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, s := range env.Services {", + "\t\tcheck.LogInfo(\"Testing %q\", services.ToString(s))", + "", + "\t\tif s.Spec.Type == nodePort {", + "\t\t\tcheck.LogError(\"Service %q (ns %q) type is nodePort\", s.Name, s.Namespace)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Service is type NodePort\", testhelper.ServiceType, false).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceMode, string(s.Spec.Type)))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Service %q (ns %q) type is not nodePort (type=%q)\", s.Name, s.Namespace, s.Spec.Type)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"Service is not type NodePort\", testhelper.ServiceType, true).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceMode, string(s.Spec.Type)))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testOneProcessPerContainer", + "qualifiedName": "testOneProcessPerContainer", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOneProcessPerContainer verifies that each container runs only a single process\n\nThe function iterates over all containers in the test environment, excluding\nIstio proxy sidecars. For each container it obtains the main PID via the\nprobe pod and counts processes in its namespace. Containers with more than\none process are flagged as non‑compliant; otherwise they are marked\ncompliant. The results are aggregated into report objects and set on the\ncheck.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:917", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsIstioProxy", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetPidFromContainer", + "kind": "function", + "source": [ + "func GetPidFromContainer(cut *provider.Container, ctx clientsholder.Context) (int, error) {", + "\tvar pidCmd string", + "", + "\tswitch cut.Runtime {", + "\tcase \"docker\":", + "\t\tpidCmd = DockerInspectPID + cut.UID + DevNull", + "\tcase \"docker-pullable\":", + "\t\tpidCmd = DockerInspectPID + cut.UID + DevNull", + "\tcase \"cri-o\", \"containerd\":", + "\t\tpidCmd = \"chroot /host crictl inspect --output go-template --template '{{.info.pid}}' \" + cut.UID + DevNull", + "\tdefault:", + "\t\tlog.Debug(\"Container runtime %s not supported yet for this test, skipping\", cut.Runtime)", + "\t\treturn 0, fmt.Errorf(\"container runtime %s not supported\", cut.Runtime)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "\toutStr, errStr, err := ch.ExecCommandContainer(ctx, pidCmd)", + "\tif err != nil {", + "\t\treturn 0, fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", pidCmd, cut, err)", + "\t}", + "\tif errStr != \"\" {", + "\t\treturn 0, fmt.Errorf(\"cmd: \\\" %s \\\" on %s returned %s\", pidCmd, cut, errStr)", + "\t}", + "", + "\treturn strconv.Atoi(strings.TrimSuffix(outStr, \"\\n\"))", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "getNbOfProcessesInPidNamespace", + "kind": "function", + "source": [ + "func getNbOfProcessesInPidNamespace(ctx clientsholder.Context, targetPid int, ch clientsholder.Command) (int, error) {", + "\tcmd := \"lsns -p \" + strconv.Itoa(targetPid) + \" -t pid -n\"", + "", + "\toutStr, errStr, err := ch.ExecCommandContainer(ctx, cmd)", + "\tif err != nil {", + "\t\treturn 0, fmt.Errorf(\"can not execute command: \\\" %s \\\", err:%s\", cmd, err)", + "\t}", + "\tif errStr != \"\" {", + "\t\treturn 0, fmt.Errorf(\"cmd: \\\" %s \\\" returned %s\", cmd, errStr)", + "\t}", + "", + "\tretValues := strings.Fields(outStr)", + "\tif len(retValues) \u003c= nbProcessesIndex {", + "\t\treturn 0, fmt.Errorf(\"cmd: \\\" %s \\\" returned an invalid value %s\", cmd, outStr)", + "\t}", + "\treturn strconv.Atoi(retValues[nbProcessesIndex])", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOneProcessPerContainer(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t// the Istio sidecar container \"istio-proxy\" launches two processes: \"pilot-agent\" and \"envoy\"", + "\t\tif cut.IsIstioProxy() {", + "\t\t\tcheck.LogInfo(\"Skipping \\\"istio-proxy\\\" container\")", + "\t\t\tcontinue", + "\t\t}", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Debug pod not found for node %q\", cut.NodeName)", + "\t\t\treturn", + "\t\t}", + "\t\tocpContext := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tpid, err := crclient.GetPidFromContainer(cut, ocpContext)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get PID for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tnbProcesses, err := getNbOfProcessesInPidNamespace(ocpContext, pid, clientsholder.GetClientsHolder())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get number of processes for Container %q, error: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, err.Error(), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif nbProcesses \u003e 1 {", + "\t\t\tcheck.LogError(\"Container %q has more than one process running\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has more than one process running\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has only one process running\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has only one process running\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodClusterRoleBindings", + "qualifiedName": "testPodClusterRoleBindings", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodClusterRoleBindings Verifies that pods do not use cluster role bindings\n\nThe function iterates over all pods in the test environment, checking each\nfor usage of a cluster role binding. If a pod uses one but is owned by a\ncluster‑wide operator, it is considered compliant; otherwise any usage\nflags the pod as non‑compliant. Results are recorded in report objects and\nreturned via the check result.\n\nnolint:dupl", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:783", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsUsingClusterRoleBinding", + "kind": "function" + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "GetTopOwner", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "ownedByClusterWideOperator", + "kind": "function", + "source": [ + "func ownedByClusterWideOperator(topOwners map[string]podhelper.TopOwner, env *provider.TestEnvironment) (aNamespace, name string, found bool) {", + "\tfor _, owner := range topOwners {", + "\t\tif isCSVAndClusterWide(owner.Namespace, owner.Name, env) {", + "\t\t\treturn owner.Namespace, owner.Name, true", + "\t\t}", + "\t}", + "\treturn \"\", \"\", false", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodClusterRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tresult, roleRefName, err := put.IsUsingClusterRoleBinding(env.ClusterRoleBindings, check.GetLogger())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to determine if Pod %q is using a cluster role binding, err=%v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf(\"failed to determine if pod is using a cluster role binding: %v\", err), false).", + "\t\t\t\tAddField(testhelper.ClusterRoleName, roleRefName))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\ttopOwners, err := put.GetTopOwner()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get top owners of Pod %q, err=%v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf(\"Error getting top owners of this pod, err=%s\", err), false).", + "\t\t\t\tAddField(testhelper.ClusterRoleName, roleRefName))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcsvNamespace, csvName, isOwnedByClusterWideOperator := ownedByClusterWideOperator(topOwners, env)", + "\t\t// Pod is using a cluster role binding but is owned by a cluster wide operator, so it is ok", + "\t\tif isOwnedByClusterWideOperator \u0026\u0026 result {", + "\t\t\tcheck.LogInfo(\"Pod %q is using a cluster role binding but is owned by a cluster-wide operator (Csv %q, namespace %q)\", put, csvName, csvNamespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is using a cluster role binding but owned by a cluster-wide operator\", true))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif result {", + "\t\t\t// Pod was found to be using a cluster role binding. This is not allowed.", + "\t\t\t// Flagging this pod as a failed pod.", + "\t\t\tcheck.LogError(\"Pod %q is using a cluster role binding (roleRefName=%q)\", put, roleRefName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is using a cluster role binding\", false).", + "\t\t\t\tAddField(testhelper.ClusterRoleName, roleRefName))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcheck.LogInfo(\"Pod %q is not using a cluster role binding\", put)", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not using a cluster role binding\", true))", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodHostIPC", + "qualifiedName": "testPodHostIPC", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodHostIPC Ensures Pod HostIPC is disabled\n\nThe function iterates over all pods in the test environment, logging each\npod’s name. For every pod it checks whether the HostIPC flag is true; if\nso, it records a non‑compliant report object and logs an error, otherwise\nit records a compliant report object. Finally, it sets the check result with\nboth lists of objects.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:583", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodHostIPC(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.HostIPC {", + "\t\t\tcheck.LogError(\"HostIpc is set in Pod %q.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"HostIpc is set to true\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"HostIpc not set in Pod %q.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"HostIpc is not set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodHostNetwork", + "qualifiedName": "testPodHostNetwork", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodHostNetwork Checks whether pods enable host networking\n\nThe routine iterates over all pod objects in the test environment, logging\neach one. If a pod’s HostNetwork flag is true it records a non‑compliant\nreport object and logs an error; otherwise it records a compliant report\nobject and logs success. Finally, it stores both lists of results on the\ncheck instance.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:528", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodHostNetwork(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.HostNetwork {", + "\t\t\tcheck.LogError(\"Host network is set to true in Pod %q.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Host network is set to true\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Host network is set to false in Pod %q.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Host network is not set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodHostPID", + "qualifiedName": "testPodHostPID", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodHostPID Checks that no pod uses the host PID namespace\n\nThe function iterates over all pods in the test environment, logging each\none’s status. If a pod has HostPID enabled it records a non‑compliant\nreport object and logs an error; otherwise it records a compliant object and\nlogs informational text. Finally, it sets the check result with the lists of\ncompliant and non‑compliant objects.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:607", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodHostPID(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.HostPID {", + "\t\t\tcheck.LogError(\"HostPid is set in Pod %q.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"HostPid is set to true\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"HostPid not set in Pod %q.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"HostPid is not set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodHostPath", + "qualifiedName": "testPodHostPath", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodHostPath Verifies that pods do not use host path volumes\n\nThe function iterates over all pods in the test environment, checking each\nvolume for a non-empty HostPath field. If such a path is found, the pod is\nmarked non‑compliant and recorded with details; otherwise it is considered\ncompliant. Results are logged and reported via the check object.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:551", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodHostPath(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tpodIsCompliant := true", + "\t\tfor idx := range put.Spec.Volumes {", + "\t\t\tvol := \u0026put.Spec.Volumes[idx]", + "\t\t\tif vol.HostPath != nil \u0026\u0026 vol.HostPath.Path != \"\" {", + "\t\t\t\tcheck.LogError(\"Hostpath path: %q is set in Pod %q.\", vol.HostPath.Path, put)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Hostpath path is set\", false).", + "\t\t\t\t\tSetType(testhelper.HostPathType).", + "\t\t\t\t\tAddField(testhelper.Path, vol.HostPath.Path))", + "\t\t\t\tpodIsCompliant = false", + "\t\t\t}", + "\t\t}", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogError(\"Hostpath path not set in Pod %q.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Hostpath path is not set\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodRequests", + "qualifiedName": "testPodRequests", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodRequests Verifies that every container has defined CPU and memory requests\n\nThe function iterates over all containers in the test environment, checking\nwhether each one specifies resource requests using a helper routine.\nContainers lacking any request or with zero CPU or memory values are logged\nas errors and collected into a non‑compliant list; those that pass are\nrecorded as compliant. Finally, it records both lists as the result of the\ncompliance check.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:1138", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/resources", + "name": "HasRequestsSet", + "kind": "function", + "source": [ + "func HasRequestsSet(cut *provider.Container, logger *log.Logger) bool {", + "\tpassed := true", + "", + "\t// Parse the requests.", + "\tif len(cut.Resources.Requests) == 0 {", + "\t\tlogger.Error(\"Container %q has been found missing resource requests\", cut)", + "\t\tpassed = false", + "\t} else {", + "\t\tif cut.Resources.Requests.Cpu().IsZero() {", + "\t\t\tlogger.Error(\"Container %q has been found missing CPU requests\", cut)", + "\t\t\tpassed = false", + "\t\t}", + "", + "\t\tif cut.Resources.Requests.Memory().IsZero() {", + "\t\t\tlogger.Error(\"Container %q has been found missing memory requests\", cut)", + "\t\t\tpassed = false", + "\t\t}", + "\t}", + "\treturn passed", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodRequests(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\t// Loop through the containers, looking for containers that are missing requests.", + "\t// These need to be defined in order to pass.", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif !resources.HasRequestsSet(cut, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Container %q is missing resource requests\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is missing resource requests\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has resource requests\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has resource requests\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodRoleBindings", + "qualifiedName": "testPodRoleBindings", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodRoleBindings Checks pod role bindings against CNF namespace rules\n\nThe routine iterates over each pod in the test environment, verifying that\nits service account is not default and that any role binding it relies on\nresides within an approved CNF namespace. If a pod references a role binding\noutside these namespaces, it is marked non‑compliant and detailed\ninformation about the offending binding is recorded. Pods passing all checks\nare logged as compliant, and results are reported back to the test framework.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:704", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tpodIsCompliant := true", + "\t\tif put.Spec.ServiceAccountName == defaultServiceAccount {", + "\t\t\tcheck.LogError(\"Pod %q has an empty or default serviceAccountName\", put)", + "\t\t\t// Add the pod to the non-compliant list", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\"The serviceAccountName is either empty or default\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has a serviceAccountName %q, checking role bindings.\", put, put.Spec.ServiceAccountName)", + "\t\t\t// Loop through the rolebindings and check if they are from another namespace", + "\t\t\tfor rbIndex := range env.RoleBindings {", + "\t\t\t\t// Short circuit if the role binding and the pod are in the same namespace.", + "\t\t\t\tif env.RoleBindings[rbIndex].Namespace == put.Namespace {", + "\t\t\t\t\tcheck.LogInfo(\"Pod %q and the role binding are in the same namespace\", put)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\t// If we make it to this point, the role binding and the pod are in different namespaces.", + "\t\t\t\t// We must check if the pod's service account is in the role binding's subjects.", + "\t\t\t\tfound := false", + "\t\t\t\tfor _, subject := range env.RoleBindings[rbIndex].Subjects {", + "\t\t\t\t\t// If the subject is a service account and the service account is in the same namespace as one of the CNF's namespaces, then continue, this is allowed", + "\t\t\t\t\tif subject.Kind == rbacv1.ServiceAccountKind \u0026\u0026", + "\t\t\t\t\t\tsubject.Namespace == put.Namespace \u0026\u0026", + "\t\t\t\t\t\tsubject.Name == put.Spec.ServiceAccountName \u0026\u0026", + "\t\t\t\t\t\tstringhelper.StringInSlice[string](env.Namespaces, env.RoleBindings[rbIndex].Namespace, false) {", + "\t\t\t\t\t\tcontinue", + "\t\t\t\t\t}", + "", + "\t\t\t\t\t// Finally, if the subject is a service account and the service account is in the same namespace as the pod, then we have a failure", + "\t\t\t\t\tif subject.Kind == rbacv1.ServiceAccountKind \u0026\u0026", + "\t\t\t\t\t\tsubject.Namespace == put.Namespace \u0026\u0026", + "\t\t\t\t\t\tsubject.Name == put.Spec.ServiceAccountName {", + "\t\t\t\t\t\tcheck.LogError(\"Pod %q has the following role bindings that do not live in one of the CNF namespaces: %q\", put, env.RoleBindings[rbIndex].Name)", + "", + "\t\t\t\t\t\t// Add the pod to the non-compliant list", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\t\t\"The role bindings used by this pod do not live in one of the CNF namespaces\", false).", + "\t\t\t\t\t\t\t\tAddField(testhelper.RoleBindingName, env.RoleBindings[rbIndex].Name).", + "\t\t\t\t\t\t\t\tAddField(testhelper.RoleBindingNamespace, env.RoleBindings[rbIndex].Namespace).", + "\t\t\t\t\t\t\t\tAddField(testhelper.ServiceAccountName, put.Spec.ServiceAccountName).", + "\t\t\t\t\t\t\t\tSetType(testhelper.PodRoleBinding))", + "\t\t\t\t\t\tfound = true", + "\t\t\t\t\t\tpodIsCompliant = false", + "\t\t\t\t\t\tbreak", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\t// Break of out the loop if we found a role binding that is out of namespace", + "\t\t\t\tif found {", + "\t\t\t\t\tbreak", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\t// Add pod to the compliant object list", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogInfo(\"All the role bindings used by Pod %q (applied by the service accounts) live in one of the CNF namespaces\", put)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"All the role bindings used by this pod (applied by the service accounts) live in one of the CNF namespaces\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodServiceAccount", + "qualifiedName": "testPodServiceAccount", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodServiceAccount checks that pods use non‑default service accounts\n\nThe function iterates over all pods in the test environment, logging each\npod’s name. For every pod it verifies whether the ServiceAccountName equals\nthe default account; if so, it records a non‑compliant report object and\nlogs an error, otherwise it records a compliant report object and logs\nsuccess. Finally, it sets the check result with the collected compliant and\nnon‑compliant objects.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:679", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodServiceAccount(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.ServiceAccountName == defaultServiceAccount {", + "\t\t\tcheck.LogError(\"Pod %q does not have a valid service account name (uses the default service account instead).\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod does not have a valid service account name\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has a valid service account name\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has a service account name\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testSYSNiceRealtimeCapability", + "qualifiedName": "testSYSNiceRealtimeCapability", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testSYSNiceRealtimeCapability Verifies SYS_NICE capability on containers in realtime kernel nodes\n\nThe function iterates over all test containers, checks if their node uses a\nrealtime kernel, and then determines whether the container has the SYS_NICE\ncapability set. Containers running on non‑realtime nodes are automatically\nconsidered compliant. Results are collected into compliant and\nnon‑compliant lists that are reported back to the test framework.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:966", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsRTKernel", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "isContainerCapabilitySet", + "kind": "function", + "source": [ + "func isContainerCapabilitySet(containerCapabilities *corev1.Capabilities, capability string) bool {", + "\tif containerCapabilities == nil {", + "\t\treturn false", + "\t}", + "", + "\tif len(containerCapabilities.Add) == 0 {", + "\t\treturn false", + "\t}", + "", + "\tif stringhelper.StringInSlice(containerCapabilities.Add, corev1.Capability(\"ALL\"), true) ||", + "\t\tstringhelper.StringInSlice(containerCapabilities.Add, corev1.Capability(capability), true) {", + "\t\treturn true", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testSYSNiceRealtimeCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through all of the labeled containers and compare their security context capabilities and whether", + "\t// or not the node's kernel is realtime enabled.", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tn := env.Nodes[cut.NodeName]", + "\t\tif !n.IsRTKernel() {", + "\t\t\tcheck.LogInfo(\"Container is not running on a realtime kernel enabled node\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not running on a realtime kernel enabled node\", true))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif !isContainerCapabilitySet(cut.SecurityContext.Capabilities, \"SYS_NICE\") {", + "\t\t\tcheck.LogError(\"Container %q has been found running on a realtime kernel enabled node without SYS_NICE capability.\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is running on a realtime kernel enabled node without SYS_NICE capability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container is running on a realtime kernel enabled node with the SYS_NICE capability\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is running on a realtime kernel enabled node with the SYS_NICE capability\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testSecConPrivilegeEscalation", + "qualifiedName": "testSecConPrivilegeEscalation", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testSecConPrivilegeEscalation Verifies that containers do not allow privilege escalation\n\nThe function iterates over each container in the test environment, checking\nif the SecurityContext's AllowPrivilegeEscalation flag is explicitly set to\ntrue. Containers with this setting are marked non‑compliant and logged as\nerrors; those without the flag or with it false are considered compliant.\nResults are collected into report objects and stored via SetResult.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:437", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testSecConPrivilegeEscalation(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tprivEscFound := false", + "\t\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.AllowPrivilegeEscalation != nil {", + "\t\t\tif *(cut.SecurityContext.AllowPrivilegeEscalation) {", + "\t\t\t\tcheck.LogError(\"AllowPrivilegeEscalation is set to true in Container %q.\", cut)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"AllowPrivilegeEscalation is set to true\", false))", + "\t\t\t\tprivEscFound = true", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !privEscFound {", + "\t\t\tcheck.LogInfo(\"AllowPrivilegeEscalation is set to false in Container %q.\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"AllowPrivilegeEscalation is not set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testSecConReadOnlyFilesystem", + "qualifiedName": "testSecConReadOnlyFilesystem", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testSecConReadOnlyFilesystem verifies that each container mounts a read‑only root filesystem\n\nThe function iterates over every pod and its containers in the test\nenvironment, checking if the container’s root filesystem is set to\nread‑only using a helper method. Containers that satisfy the requirement\nare recorded as compliant; those that do not are logged as errors and marked\nnon‑compliant. Finally, the results are aggregated into report objects and\npassed back via the check’s result setter.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:468", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsReadOnlyRootFilesystem", + "kind": "function" + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testSecConReadOnlyFilesystem(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, pod := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q in namespace %q\", pod.Name, pod.Namespace)", + "\t\tfor _, cut := range pod.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q in Pod %q\", cut.Name, pod.Name)", + "\t\t\tif cut.IsReadOnlyRootFilesystem(check.GetLogger()) {", + "\t\t\t\tcheck.LogInfo(\"Container %q in Pod %q has a read-only root filesystem.\", cut.Name, pod.Name)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Container has a read-only root filesystem\", true))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogError(\"Container %q in Pod %q does not have a read-only root filesystem.\", cut.Name, pod.Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Container does not have a read-only root filesystem\", false))", + "\t\t\t}", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testSecConRunAsNonRoot", + "qualifiedName": "testSecConRunAsNonRoot", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testSecConRunAsNonRoot checks that pods do not run containers as root\n\nThe routine iterates over all test pods, determines which containers are\nconfigured to run as root, and records compliance results. For each pod it\nlogs a message, then calls a helper to retrieve non‑compliant containers.\nIf none exist the pod is marked compliant; otherwise each offending container\nis logged with an error and added to the non‑compliant list. Finally, the\ncheck’s result is set with both compliant and non‑compliant report\nobjects.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:407", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "GetRunAsNonRootFalseContainers", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testSecConRunAsNonRoot(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing pod %s/%s\", put.Namespace, put.Name)", + "\t\tnonCompliantContainers, nonComplianceReason := put.GetRunAsNonRootFalseContainers(knownContainersToSkip)", + "\t\tif len(nonCompliantContainers) == 0 {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is configured with RunAsNonRoot=true or RunAsUser!=0 at pod or container level.\", true))", + "\t\t} else {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"One or more containers of the pod are running with root user\", false))", + "\t\t\tfor index := range nonCompliantContainers {", + "\t\t\t\tcheck.LogError(\"Pod %s/%s, container %q is not compliant: %s\", put.Namespace, put.Name, nonCompliantContainers[index].Name, nonComplianceReason[index])", + "", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name, nonCompliantContainers[index].Name,", + "\t\t\t\t\tnonComplianceReason[index], false))", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testSysAdminCapability", + "qualifiedName": "testSysAdminCapability", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testSysAdminCapability Checks containers for the SYS_ADMIN capability\n\nThis routine examines each container in the test environment, looking for the\nSYS_ADMIN capability in its security context. Containers lacking this\ncapability are recorded as compliant; those that include it are flagged\nnon‑compliant with an error log entry. The results are then stored back\ninto the check object.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:345", + "calls": [ + { + "name": "checkForbiddenCapability", + "kind": "function", + "source": [ + "func checkForbiddenCapability(containers []*provider.Container, capability string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, cut := range containers {", + "\t\tlogger.Info(\"Testing Container %q\", cut)", + "\t\tcompliant := true", + "", + "\t\tswitch {", + "\t\tcase cut.SecurityContext == nil:", + "\t\tcase cut.SecurityContext.Capabilities == nil:", + "\t\tcase isContainerCapabilitySet(cut.SecurityContext.Capabilities, capability):", + "\t\t\tcompliant = false", + "\t\t}", + "", + "\t\tif compliant {", + "\t\t\tlogger.Info(\"Container %q does not use non-compliant capability %q\", cut, capability)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"No forbidden capability \"+capability+\" detected in container\", true))", + "\t\t} else {", + "\t\t\tlogger.Error(\"Non compliant %q capability detected in container %q. All container caps: %q\", capability, cut, cut.SecurityContext.Capabilities)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Non compliant capability \"+capability+\" in container\", false).AddField(testhelper.SCCCapability, capability))", + "\t\t}", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testSysAdminCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcompliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, \"SYS_ADMIN\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testSysPtraceCapability", + "qualifiedName": "testSysPtraceCapability", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testSysPtraceCapability verifies pods with shared process namespaces contain a container granting SYS_PTRACE\n\nThe function iterates over all pods that enable shared process namespaces,\nexamining each container’s security context for the SYS_PTRACE capability.\nIf at least one container has this capability it records the pod as\ncompliant; otherwise it logs an error and marks it non‑compliant. Finally,\nit sets the check result with lists of compliant and non‑compliant report\nobjects.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:1000", + "calls": [ + { + "name": "GetShareProcessNamespacePods", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testSysPtraceCapability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.GetShareProcessNamespacePods() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tsysPtraceEnabled := false", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tif cut.SecurityContext == nil ||", + "\t\t\t\tcut.SecurityContext.Capabilities == nil ||", + "\t\t\t\tlen(cut.SecurityContext.Capabilities.Add) == 0 {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif stringhelper.StringInSlice(cut.SecurityContext.Capabilities.Add, \"SYS_PTRACE\", false) {", + "\t\t\t\tcheck.LogInfo(\"Container %q defines the SYS_PTRACE capability\", cut)", + "\t\t\t\tsysPtraceEnabled = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !sysPtraceEnabled {", + "\t\t\tcheck.LogError(\"Pod %q has process namespace sharing enabled but no container allowing the SYS_PTRACE capability.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has process namespace sharing enabled but no container allowing the SYS_PTRACE capability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has process namespace sharing enabled and at least one container allowing the SYS_PTRACE capability\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has process namespace sharing enabled and at least one container allowing the SYS_PTRACE capability\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "globals": [ + { + "name": "beforeEachFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:64" + }, + { + "name": "env", + "exported": false, + "type": "provider.TestEnvironment", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:62" + }, + { + "name": "invalidNamespacePrefixes", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:51" + }, + { + "name": "knownContainersToSkip", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:58" + } + ], + "consts": [ + { + "name": "defaultServiceAccount", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:47" + }, + { + "name": "nbProcessesIndex", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/pidshelper.go:27" + }, + { + "name": "nodePort", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:46" + }, + { + "name": "sshServicePortProtocol", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/suite.go:1069" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/namespace", + "name": "namespace", + "files": 1, + "imports": [ + "context", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/runtime/schema" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "GetInvalidCRsNum", + "qualifiedName": "GetInvalidCRsNum", + "exported": true, + "signature": "func(map[string]map[string][]string, *log.Logger)(int)", + "doc": "GetInvalidCRsNum Counts the number of custom resources that are not in allowed namespaces\n\nThe function walks through a nested map where each CRD maps to namespaces and\nthen to lists of CR names, logging an error for every invalid entry it finds.\nIt tallies these occurrences into an integer which is returned as the total\ncount of invalid custom resources.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/namespace/namespace.go:111", + "calls": [ + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testNamespace", + "kind": "function", + "source": [ + "func testNamespace(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, namespace := range env.Namespaces {", + "\t\tcheck.LogInfo(\"Testing namespace %q\", namespace)", + "\t\tnamespaceCompliant := true", + "\t\tfor _, invalidPrefix := range invalidNamespacePrefixes {", + "\t\t\tif strings.HasPrefix(namespace, invalidPrefix) {", + "\t\t\t\tcheck.LogError(\"Namespace %q has invalid prefix %q\", namespace, invalidPrefix)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"Namespace has invalid prefix\", testhelper.Namespace, false, namespace))", + "\t\t\t\tnamespaceCompliant = false", + "\t\t\t\tbreak // Break out of the loop if we find an invalid prefix", + "\t\t\t}", + "\t\t}", + "\t\tif namespaceCompliant {", + "\t\t\tcheck.LogInfo(\"Namespace %q has valid prefix\", namespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"Namespace has valid prefix\", testhelper.Namespace, true, namespace))", + "\t\t}", + "\t}", + "\tif failedNamespacesNum := len(nonCompliantObjects); failedNamespacesNum \u003e 0 {", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "", + "\tinvalidCrs, err := namespace.TestCrsNamespaces(env.Crds, env.Namespaces, check.GetLogger())", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error while testing CRs namespaces, err=%v\", err)", + "\t\treturn", + "\t}", + "", + "\tinvalidCrsNum := namespace.GetInvalidCRsNum(invalidCrs, check.GetLogger())", + "\tif invalidCrsNum \u003e 0 {", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"CRs are not in the configured namespaces\", testhelper.Namespace, false))", + "\t} else {", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"CRs are in the configured namespaces\", testhelper.Namespace, true))", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetInvalidCRsNum(invalidCrs map[string]map[string][]string, logger *log.Logger) int {", + "\tvar invalidCrsNum int", + "\tfor crdName, namespaces := range invalidCrs {", + "\t\tfor namespace, crNames := range namespaces {", + "\t\t\tfor _, crName := range crNames {", + "\t\t\t\tlogger.Error(\"crName=%q namespace=%q is invalid (crd=%q)\", crName, namespace, crdName)", + "\t\t\t\tinvalidCrsNum++", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn invalidCrsNum", + "}" + ] + }, + { + "name": "TestCrsNamespaces", + "qualifiedName": "TestCrsNamespaces", + "exported": true, + "signature": "func([]*apiextv1.CustomResourceDefinition, []string, *log.Logger)(map[string]map[string][]string, error)", + "doc": "TestCrsNamespaces identifies custom resources outside allowed namespaces\n\nThe function examines each provided CRD, gathers all its instances across the\ncluster, and checks whether their namespaces match a given list of permitted\nnamespaces. For any instance found in an unauthorized namespace, it records\nthe CRD name, namespace, and resource names in a nested map. The resulting\nmap is returned along with any error that occurred during retrieval;\notherwise nil indicates success.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/namespace/namespace.go:39", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "getCrsPerNamespaces", + "kind": "function", + "source": [ + "func getCrsPerNamespaces(aCrd *apiextv1.CustomResourceDefinition) (crdNamespaces map[string][]string, err error) {", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, version := range aCrd.Spec.Versions {", + "\t\tgvr := schema.GroupVersionResource{", + "\t\t\tGroup: aCrd.Spec.Group,", + "\t\t\tVersion: version.Name,", + "\t\t\tResource: aCrd.Spec.Names.Plural,", + "\t\t}", + "\t\tlog.Debug(\"Looking for CRs from CRD: %s api version:%s group:%s plural:%s\", aCrd.Name, version.Name, aCrd.Spec.Group, aCrd.Spec.Names.Plural)", + "\t\tcrs, err := oc.DynamicClient.Resource(gvr).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error getting %s: %v\\n\", aCrd.Name, err)", + "\t\t\treturn crdNamespaces, err", + "\t\t}", + "\t\tcrdNamespaces = make(map[string][]string)", + "\t\tfor _, cr := range crs.Items {", + "\t\t\tname := cr.Object[\"metadata\"].(map[string]interface{})[\"name\"]", + "\t\t\tnamespace := cr.Object[\"metadata\"].(map[string]interface{})[\"namespace\"]", + "\t\t\tvar namespaceStr, nameStr string", + "\t\t\tif namespace == nil {", + "\t\t\t\tnamespaceStr = \"\"", + "\t\t\t} else {", + "\t\t\t\tnamespaceStr = fmt.Sprintf(\"%s\", namespace)", + "\t\t\t}", + "\t\t\tif name == nil {", + "\t\t\t\tnameStr = \"\"", + "\t\t\t} else {", + "\t\t\t\tnameStr = fmt.Sprintf(\"%s\", name)", + "\t\t\t}", + "\t\t\tcrdNamespaces[namespaceStr] = append(crdNamespaces[namespaceStr], nameStr)", + "\t\t}", + "\t}", + "\treturn crdNamespaces, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testNamespace", + "kind": "function", + "source": [ + "func testNamespace(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, namespace := range env.Namespaces {", + "\t\tcheck.LogInfo(\"Testing namespace %q\", namespace)", + "\t\tnamespaceCompliant := true", + "\t\tfor _, invalidPrefix := range invalidNamespacePrefixes {", + "\t\t\tif strings.HasPrefix(namespace, invalidPrefix) {", + "\t\t\t\tcheck.LogError(\"Namespace %q has invalid prefix %q\", namespace, invalidPrefix)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"Namespace has invalid prefix\", testhelper.Namespace, false, namespace))", + "\t\t\t\tnamespaceCompliant = false", + "\t\t\t\tbreak // Break out of the loop if we find an invalid prefix", + "\t\t\t}", + "\t\t}", + "\t\tif namespaceCompliant {", + "\t\t\tcheck.LogInfo(\"Namespace %q has valid prefix\", namespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"Namespace has valid prefix\", testhelper.Namespace, true, namespace))", + "\t\t}", + "\t}", + "\tif failedNamespacesNum := len(nonCompliantObjects); failedNamespacesNum \u003e 0 {", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "", + "\tinvalidCrs, err := namespace.TestCrsNamespaces(env.Crds, env.Namespaces, check.GetLogger())", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error while testing CRs namespaces, err=%v\", err)", + "\t\treturn", + "\t}", + "", + "\tinvalidCrsNum := namespace.GetInvalidCRsNum(invalidCrs, check.GetLogger())", + "\tif invalidCrsNum \u003e 0 {", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"CRs are not in the configured namespaces\", testhelper.Namespace, false))", + "\t} else {", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"CRs are in the configured namespaces\", testhelper.Namespace, true))", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func TestCrsNamespaces(crds []*apiextv1.CustomResourceDefinition, configNamespaces []string, logger *log.Logger) (invalidCrs map[string]map[string][]string, err error) {", + "\t// Initialize the top level map", + "\tinvalidCrs = make(map[string]map[string][]string)", + "\tfor _, crd := range crds {", + "\t\tcrNamespaces, err := getCrsPerNamespaces(crd)", + "\t\tif err != nil {", + "\t\t\treturn invalidCrs, fmt.Errorf(\"failed to get CRs for CRD %s - Error: %v\", crd.Name, err)", + "\t\t}", + "\t\tfor namespace, crNames := range crNamespaces {", + "\t\t\tif !stringhelper.StringInSlice(configNamespaces, namespace, false) {", + "\t\t\t\tlogger.Error(\"CRD: %q (kind:%q/ plural:%q) has CRs %v deployed in namespace %q not in configured namespaces %v\",", + "\t\t\t\t\tcrd.Name, crd.Spec.Names.Kind, crd.Spec.Names.Plural, crNames, namespace, configNamespaces)", + "\t\t\t\t// Initialize this map dimension before use", + "\t\t\t\tif invalidCrs[crd.Name] == nil {", + "\t\t\t\t\tinvalidCrs[crd.Name] = make(map[string][]string)", + "\t\t\t\t}", + "\t\t\t\tinvalidCrs[crd.Name][namespace] = append(invalidCrs[crd.Name][namespace], crNames...)", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn invalidCrs, nil", + "}" + ] + }, + { + "name": "getCrsPerNamespaces", + "qualifiedName": "getCrsPerNamespaces", + "exported": false, + "signature": "func(*apiextv1.CustomResourceDefinition)(map[string][]string, error)", + "doc": "getCrsPerNamespaces Retrieves custom resources per namespace\n\nThis function queries the Kubernetes cluster for all instances of a given\nCustomResourceDefinition across its versions, organizing them into a map\nkeyed by namespace with lists of resource names as values. It uses a dynamic\nclient from a shared holder to perform list operations and logs debug\ninformation during the search. If any listing operation fails, an error is\nreturned along with a nil or partially filled map.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/namespace/namespace.go:70", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "Resource", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/namespace", + "name": "TestCrsNamespaces", + "kind": "function", + "source": [ + "func TestCrsNamespaces(crds []*apiextv1.CustomResourceDefinition, configNamespaces []string, logger *log.Logger) (invalidCrs map[string]map[string][]string, err error) {", + "\t// Initialize the top level map", + "\tinvalidCrs = make(map[string]map[string][]string)", + "\tfor _, crd := range crds {", + "\t\tcrNamespaces, err := getCrsPerNamespaces(crd)", + "\t\tif err != nil {", + "\t\t\treturn invalidCrs, fmt.Errorf(\"failed to get CRs for CRD %s - Error: %v\", crd.Name, err)", + "\t\t}", + "\t\tfor namespace, crNames := range crNamespaces {", + "\t\t\tif !stringhelper.StringInSlice(configNamespaces, namespace, false) {", + "\t\t\t\tlogger.Error(\"CRD: %q (kind:%q/ plural:%q) has CRs %v deployed in namespace %q not in configured namespaces %v\",", + "\t\t\t\t\tcrd.Name, crd.Spec.Names.Kind, crd.Spec.Names.Plural, crNames, namespace, configNamespaces)", + "\t\t\t\t// Initialize this map dimension before use", + "\t\t\t\tif invalidCrs[crd.Name] == nil {", + "\t\t\t\t\tinvalidCrs[crd.Name] = make(map[string][]string)", + "\t\t\t\t}", + "\t\t\t\tinvalidCrs[crd.Name][namespace] = append(invalidCrs[crd.Name][namespace], crNames...)", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn invalidCrs, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getCrsPerNamespaces(aCrd *apiextv1.CustomResourceDefinition) (crdNamespaces map[string][]string, err error) {", + "\toc := clientsholder.GetClientsHolder()", + "\tfor _, version := range aCrd.Spec.Versions {", + "\t\tgvr := schema.GroupVersionResource{", + "\t\t\tGroup: aCrd.Spec.Group,", + "\t\t\tVersion: version.Name,", + "\t\t\tResource: aCrd.Spec.Names.Plural,", + "\t\t}", + "\t\tlog.Debug(\"Looking for CRs from CRD: %s api version:%s group:%s plural:%s\", aCrd.Name, version.Name, aCrd.Spec.Group, aCrd.Spec.Names.Plural)", + "\t\tcrs, err := oc.DynamicClient.Resource(gvr).List(context.TODO(), metav1.ListOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error getting %s: %v\\n\", aCrd.Name, err)", + "\t\t\treturn crdNamespaces, err", + "\t\t}", + "\t\tcrdNamespaces = make(map[string][]string)", + "\t\tfor _, cr := range crs.Items {", + "\t\t\tname := cr.Object[\"metadata\"].(map[string]interface{})[\"name\"]", + "\t\t\tnamespace := cr.Object[\"metadata\"].(map[string]interface{})[\"namespace\"]", + "\t\t\tvar namespaceStr, nameStr string", + "\t\t\tif namespace == nil {", + "\t\t\t\tnamespaceStr = \"\"", + "\t\t\t} else {", + "\t\t\t\tnamespaceStr = fmt.Sprintf(\"%s\", namespace)", + "\t\t\t}", + "\t\t\tif name == nil {", + "\t\t\t\tnameStr = \"\"", + "\t\t\t} else {", + "\t\t\t\tnameStr = fmt.Sprintf(\"%s\", name)", + "\t\t\t}", + "\t\t\tcrdNamespaces[namespaceStr] = append(crdNamespaces[namespaceStr], nameStr)", + "\t\t}", + "\t}", + "\treturn crdNamespaces, nil", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/resources", + "name": "resources", + "files": 1, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "HasExclusiveCPUsAssigned", + "qualifiedName": "HasExclusiveCPUsAssigned", + "exported": true, + "signature": "func(*provider.Container, *log.Logger)(bool)", + "doc": "HasExclusiveCPUsAssigned Determines if a container runs with exclusive CPU allocation\n\nThe function examines the CPU and memory limits and requests of a container\nto decide whether it belongs to an exclusive CPU pool. If either limit is\nmissing, non‑integer, or mismatched with its request, the container is\nconsidered shared; otherwise it is marked exclusive. The result is returned\nas a boolean.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/resources/resources.go:42", + "calls": [ + { + "name": "Cpu", + "kind": "function" + }, + { + "name": "Memory", + "kind": "function" + }, + { + "name": "IsZero", + "kind": "function" + }, + { + "name": "IsZero", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "AsInt64", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "AsInt64", + "kind": "function" + }, + { + "name": "Cpu", + "kind": "function" + }, + { + "name": "AsInt64", + "kind": "function" + }, + { + "name": "Memory", + "kind": "function" + }, + { + "name": "AsInt64", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testExclusiveCPUPool", + "kind": "function", + "source": [ + "func testExclusiveCPUPool(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tnBExclusiveCPUPoolContainers := 0", + "\t\tnBSharedCPUPoolContainers := 0", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tif resources.HasExclusiveCPUsAssigned(cut, check.GetLogger()) {", + "\t\t\t\tnBExclusiveCPUPoolContainers++", + "\t\t\t} else {", + "\t\t\t\tnBSharedCPUPoolContainers++", + "\t\t\t}", + "\t\t}", + "", + "\t\tif nBExclusiveCPUPoolContainers \u003e 0 \u0026\u0026 nBSharedCPUPoolContainers \u003e 0 {", + "\t\t\texclusiveStr := strconv.Itoa(nBExclusiveCPUPoolContainers)", + "\t\t\tsharedStr := strconv.Itoa(nBSharedCPUPoolContainers)", + "", + "\t\t\tcheck.LogError(\"Pod %q has containers whose CPUs belong to different pools. Containers in the shared cpu pool: %d \"+", + "\t\t\t\t\"Containers in the exclusive cpu pool: %d\", put, nBSharedCPUPoolContainers, nBExclusiveCPUPoolContainers)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has containers whose CPUs belong to different pools\", false).", + "\t\t\t\tAddField(\"SharedCPUPoolContainers\", sharedStr).", + "\t\t\t\tAddField(\"ExclusiveCPUPoolContainers\", exclusiveStr))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has no containers whose CPUs belong to different pools\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has no containers whose CPUs belong to different pools\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func HasExclusiveCPUsAssigned(cut *provider.Container, logger *log.Logger) bool {", + "\tcpuLimits := cut.Resources.Limits.Cpu()", + "\tmemLimits := cut.Resources.Limits.Memory()", + "", + "\t// if no cpu or memory limits are specified the container will run in the shared cpu pool", + "\tif cpuLimits.IsZero() || memLimits.IsZero() {", + "\t\tlogger.Debug(\"Container %q has been found missing cpu/memory resource limits\", cut)", + "\t\treturn false", + "\t}", + "", + "\t// if the cpu limits quantity is not an integer the container will run in the shared cpu pool", + "\tcpuLimitsVal, isInteger := cpuLimits.AsInt64()", + "\tif !isInteger {", + "\t\tlogger.Debug(\"Container %q cpu resource limit is not an integer\", cut)", + "\t\treturn false", + "\t}", + "", + "\t// if the cpu and memory limits and requests are equal to each other the container will run in the exclusive cpu pool", + "\tcpuRequestsVal, _ := cut.Resources.Requests.Cpu().AsInt64()", + "\tmemRequestsVal, _ := cut.Resources.Requests.Memory().AsInt64()", + "\tmemLimitsVal, _ := memLimits.AsInt64()", + "\tif cpuLimitsVal == cpuRequestsVal \u0026\u0026 memLimitsVal == memRequestsVal {", + "\t\treturn true", + "\t}", + "", + "\t// if the cpu limits and request are different, the container will run in the shared cpu pool", + "\tlogger.Debug(\"Container %q cpu/memory resources and limits are not equal to each other\", cut)", + "\treturn false", + "}" + ] + }, + { + "name": "HasRequestsSet", + "qualifiedName": "HasRequestsSet", + "exported": true, + "signature": "func(*provider.Container, *log.Logger)(bool)", + "doc": "HasRequestsSet Determines if a container has resource requests defined\n\nThis function examines the request fields of a container's resource\nspecification. It checks that there is at least one request entry, and that\nboth CPU and memory requests are non‑zero values. If any requirement is\nmissing it logs an error and returns false; otherwise it returns true.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/resources/resources.go:14", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "IsZero", + "kind": "function" + }, + { + "name": "Cpu", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "IsZero", + "kind": "function" + }, + { + "name": "Memory", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testPodRequests", + "kind": "function", + "source": [ + "func testPodRequests(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\t// Loop through the containers, looking for containers that are missing requests.", + "\t// These need to be defined in order to pass.", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif !resources.HasRequestsSet(cut, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Container %q is missing resource requests\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is missing resource requests\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has resource requests\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has resource requests\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func HasRequestsSet(cut *provider.Container, logger *log.Logger) bool {", + "\tpassed := true", + "", + "\t// Parse the requests.", + "\tif len(cut.Resources.Requests) == 0 {", + "\t\tlogger.Error(\"Container %q has been found missing resource requests\", cut)", + "\t\tpassed = false", + "\t} else {", + "\t\tif cut.Resources.Requests.Cpu().IsZero() {", + "\t\t\tlogger.Error(\"Container %q has been found missing CPU requests\", cut)", + "\t\t\tpassed = false", + "\t\t}", + "", + "\t\tif cut.Resources.Requests.Memory().IsZero() {", + "\t\t\tlogger.Error(\"Container %q has been found missing memory requests\", cut)", + "\t\t\tpassed = false", + "\t\t}", + "\t}", + "\treturn passed", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer", + "name": "securitycontextcontainer", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "k8s.io/api/core/v1", + "slices", + "sort" + ], + "structs": [ + { + "name": "ContainerSCC", + "exported": true, + "doc": "ContainerSCC Represents a container’s security context compliance state\n\nThis struct holds flags indicating whether each security setting of a\ncontainer satisfies the requirements of a given security context constraint.\nEach field is an OkNok value that marks the presence or absence of a feature\nsuch as host networking, privilege escalation, or required capabilities. The\nstruct also records the lowest capability category applicable to the\ncontainer.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:55", + "fields": { + "AllVolumeAllowed": "OkNok", + "CapabilitiesCategory": "CategoryID", + "FsGroupPresent": "OkNok", + "HostDirVolumePluginPresent": "OkNok", + "HostIPC": "OkNok", + "HostNetwork": "OkNok", + "HostPID": "OkNok", + "HostPorts": "OkNok", + "PrivilegeEscalation": "OkNok", + "PrivilegedContainer": "OkNok", + "ReadOnlyRootFilesystem": "OkNok", + "RequiredDropCapabilitiesPresent": "OkNok", + "RunAsNonRoot": "OkNok", + "RunAsUserPresent": "OkNok", + "SeLinuxContextPresent": "OkNok" + }, + "methodNames": null, + "source": [ + "type ContainerSCC struct {", + "\tHostDirVolumePluginPresent OkNok // 0 or 1 - 0 is false 1 - true", + "\tHostIPC OkNok", + "\tHostNetwork OkNok", + "\tHostPID OkNok", + "\tHostPorts OkNok", + "\tPrivilegeEscalation OkNok // this can be true or false", + "\tPrivilegedContainer OkNok", + "\tRunAsUserPresent OkNok", + "\tReadOnlyRootFilesystem OkNok", + "\tRunAsNonRoot OkNok", + "\tFsGroupPresent OkNok", + "\tSeLinuxContextPresent OkNok", + "\tCapabilitiesCategory CategoryID", + "\tRequiredDropCapabilitiesPresent OkNok", + "\tAllVolumeAllowed OkNok", + "}" + ] + }, + { + "name": "PodListCategory", + "exported": true, + "doc": "PodListCategory Represents a container’s classification within a pod\n\nThis structure holds identifying information for a specific container in a\nKubernetes pod, including the container name, pod name, namespace, and its\nsecurity context category. It is used to record and report which security\npolicy tier applies to each container during analysis. The String method\nformats these fields into a readable string for logging or output.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:91", + "fields": { + "Category": "CategoryID", + "Containername": "string", + "NameSpace": "string", + "Podname": "string" + }, + "methodNames": [ + "String" + ], + "source": [ + "type PodListCategory struct {", + "\tContainername string", + "\tPodname string", + "\tNameSpace string", + "\tCategory CategoryID", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "AllVolumeAllowed", + "qualifiedName": "AllVolumeAllowed", + "exported": true, + "signature": "func([]corev1.Volume)(OkNok)", + "doc": "AllVolumeAllowed Verifies all volumes are permitted and detects host path usage\n\nThe function examines each volume in the provided slice, counting only those\nof allowed types such as ConfigMap, DownwardAPI, EmptyDir,\nPersistentVolumeClaim, Projected, or Secret. If every volume is of an allowed\ntype, it returns OK for the overall check; otherwise it returns NOK. It also\nflags whether any HostPath volume was encountered by setting a separate\nstatus value.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:307", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer", + "name": "CheckPod", + "kind": "function", + "source": [ + "func CheckPod(pod *provider.Pod) []PodListCategory {", + "\tvar containerSCC ContainerSCC", + "\tcontainerSCC.HostIPC = NOK", + "\tif pod.Spec.HostIPC {", + "\t\tcontainerSCC.HostIPC = OK", + "\t}", + "\tcontainerSCC.HostNetwork = NOK", + "\tif pod.Spec.HostNetwork {", + "\t\tcontainerSCC.HostNetwork = OK", + "\t}", + "\tcontainerSCC.HostPID = NOK", + "\tif pod.Spec.HostPID {", + "\t\tcontainerSCC.HostPID = OK", + "\t}", + "\tcontainerSCC.SeLinuxContextPresent = NOK", + "\tif pod.Spec.SecurityContext != nil \u0026\u0026 pod.Spec.SecurityContext.SELinuxOptions != nil {", + "\t\tcontainerSCC.SeLinuxContextPresent = OK", + "\t}", + "\tcontainerSCC.AllVolumeAllowed, containerSCC.HostDirVolumePluginPresent = AllVolumeAllowed(pod.Spec.Volumes)", + "\tif pod.Spec.SecurityContext != nil \u0026\u0026 pod.Spec.SecurityContext.RunAsUser != nil {", + "\t\tcontainerSCC.RunAsUserPresent = OK", + "\t} else {", + "\t\tcontainerSCC.RunAsUserPresent = NOK", + "\t}", + "\tif pod.Spec.SecurityContext != nil \u0026\u0026 pod.Spec.SecurityContext.FSGroup != nil {", + "\t\tcontainerSCC.FsGroupPresent = OK", + "\t} else {", + "\t\tcontainerSCC.FsGroupPresent = NOK", + "\t}", + "\treturn checkContainerCategory(pod.Spec.Containers, containerSCC, pod.Name, pod.Namespace)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func AllVolumeAllowed(volumes []corev1.Volume) (r1, r2 OkNok) {", + "\tcountVolume := 0", + "\tvar value OkNok", + "\tvalue = NOK", + "\tfor j := 0; j \u003c len(volumes); j++ {", + "\t\tif volumes[j].HostPath != nil {", + "\t\t\tvalue = OK", + "\t\t}", + "\t\tif volumes[j].ConfigMap != nil {", + "\t\t\tcountVolume++", + "\t\t}", + "\t\tif volumes[j].DownwardAPI != nil {", + "\t\t\tcountVolume++", + "\t\t}", + "\t\tif volumes[j].EmptyDir != nil {", + "\t\t\tcountVolume++", + "\t\t}", + "\t\tif volumes[j].PersistentVolumeClaim != nil {", + "\t\t\tcountVolume++", + "\t\t}", + "\t\tif volumes[j].Projected != nil {", + "\t\t\tcountVolume++", + "\t\t}", + "\t\tif volumes[j].Secret != nil {", + "\t\t\tcountVolume++", + "\t\t}", + "\t}", + "\tif countVolume == len(volumes) {", + "\t\treturn OK, value", + "\t}", + "\treturn NOK, value", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "CategoryID.String", + "exported": true, + "receiver": "CategoryID", + "signature": "func()(string)", + "doc": "CategoryID.String Returns the string representation of a CategoryID\n\nThe method examines the receiver value and maps each predefined constant to\nits corresponding string. It uses a switch statement to select the\nappropriate case and returns that string, defaulting to a fallback if none\nmatch.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:197", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (category CategoryID) String() string {", + "\tswitch category {", + "\tcase CategoryID1:", + "\t\treturn CategoryID1String", + "\tcase CategoryID1NoUID0:", + "\t\treturn CategoryID1NoUID0String", + "\tcase CategoryID2:", + "\t\treturn CategoryID2String", + "\tcase CategoryID3:", + "\t\treturn CategoryID3String", + "\tcase CategoryID4:", + "\t\treturn CategoryID4String", + "\tcase Undefined:", + "\t\treturn CategoryID4String", + "\t}", + "\treturn CategoryID4String", + "}" + ] + }, + { + "name": "CheckPod", + "qualifiedName": "CheckPod", + "exported": true, + "signature": "func(*provider.Pod)([]PodListCategory)", + "doc": "CheckPod Evaluates a pod’s security context and categorizes its containers\n\nThe function inspects the pod's host networking, IPC, PID settings, SELinux\noptions, volume types, run-as-user, and FSGroup fields to build a\nContainerSCC profile. It then determines each container’s category by\ncomparing that profile against predefined security categories. The result is\na slice of PodListCategory structs, one per container, indicating the\ncontainer name, pod details, namespace, and assigned category.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:401", + "calls": [ + { + "name": "AllVolumeAllowed", + "kind": "function", + "source": [ + "func AllVolumeAllowed(volumes []corev1.Volume) (r1, r2 OkNok) {", + "\tcountVolume := 0", + "\tvar value OkNok", + "\tvalue = NOK", + "\tfor j := 0; j \u003c len(volumes); j++ {", + "\t\tif volumes[j].HostPath != nil {", + "\t\t\tvalue = OK", + "\t\t}", + "\t\tif volumes[j].ConfigMap != nil {", + "\t\t\tcountVolume++", + "\t\t}", + "\t\tif volumes[j].DownwardAPI != nil {", + "\t\t\tcountVolume++", + "\t\t}", + "\t\tif volumes[j].EmptyDir != nil {", + "\t\t\tcountVolume++", + "\t\t}", + "\t\tif volumes[j].PersistentVolumeClaim != nil {", + "\t\t\tcountVolume++", + "\t\t}", + "\t\tif volumes[j].Projected != nil {", + "\t\t\tcountVolume++", + "\t\t}", + "\t\tif volumes[j].Secret != nil {", + "\t\t\tcountVolume++", + "\t\t}", + "\t}", + "\tif countVolume == len(volumes) {", + "\t\treturn OK, value", + "\t}", + "\treturn NOK, value", + "}" + ] + }, + { + "name": "checkContainerCategory", + "kind": "function", + "source": [ + "func checkContainerCategory(containers []corev1.Container, containerSCC ContainerSCC, podName, nameSpace string) []PodListCategory {", + "\tvar ContainerList []PodListCategory", + "\tvar categoryinfo PodListCategory", + "\tfor j := 0; j \u003c len(containers); j++ {", + "\t\tcut := \u0026provider.Container{Podname: podName, Namespace: nameSpace, Container: \u0026containers[j]}", + "\t\tpercontainerSCC := GetContainerSCC(cut, containerSCC)", + "\t\t// after building the containerSCC need to check to which category it is", + "\t\tcategoryinfo = PodListCategory{", + "\t\t\tContainername: cut.Name,", + "\t\t\tPodname: podName,", + "\t\t\tNameSpace: nameSpace,", + "\t\t}", + "\t\tif compareCategory(\u0026Category1, \u0026percontainerSCC, CategoryID1) {", + "\t\t\tcategoryinfo.Category = CategoryID1", + "\t\t} else if compareCategory(\u0026Category1NoUID0, \u0026percontainerSCC, CategoryID1NoUID0) {", + "\t\t\tcategoryinfo.Category = CategoryID1NoUID0", + "\t\t} else if compareCategory(\u0026Category2, \u0026percontainerSCC, CategoryID2) {", + "\t\t\tcategoryinfo.Category = CategoryID2", + "\t\t} else if compareCategory(\u0026Category3, \u0026percontainerSCC, CategoryID3) {", + "\t\t\tcategoryinfo.Category = CategoryID3", + "\t\t} else {", + "\t\t\tcategoryinfo.Category = CategoryID4", + "\t\t}", + "\t\t// after building the containerSCC need to check to which category it is", + "\t\tContainerList = append(ContainerList, categoryinfo)", + "\t}", + "\treturn ContainerList", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testContainerSCC", + "kind": "function", + "source": [ + "func testContainerSCC(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\thighLevelCat := securitycontextcontainer.CategoryID1", + "\tfor _, pod := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", pod)", + "\t\tlistCategory := securitycontextcontainer.CheckPod(pod)", + "\t\tfor _, cat := range listCategory {", + "\t\t\tif cat.Category \u003e securitycontextcontainer.CategoryID1NoUID0 {", + "\t\t\t\tcheck.LogError(\"Category %q is NOT category 1 or category NoUID0\", cat)", + "\t\t\t\taContainerOut := testhelper.NewContainerReportObject(cat.NameSpace, cat.Podname, cat.Containername, \"container category is NOT category 1 or category NoUID0\", false).", + "\t\t\t\t\tSetType(testhelper.ContainerCategory).", + "\t\t\t\t\tAddField(testhelper.Category, cat.Category.String())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, aContainerOut)", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Category %q is category 1 or category NoUID0\", cat)", + "\t\t\t\taContainerOut := testhelper.NewContainerReportObject(cat.NameSpace, cat.Podname, cat.Containername, \"container category is category 1 or category NoUID0\", true).", + "\t\t\t\t\tSetType(testhelper.ContainerCategory).", + "\t\t\t\t\tAddField(testhelper.Category, cat.Category.String())", + "\t\t\t\tcompliantObjects = append(compliantObjects, aContainerOut)", + "\t\t\t}", + "\t\t\tif cat.Category \u003e highLevelCat {", + "\t\t\t\thighLevelCat = cat.Category", + "\t\t\t}", + "\t\t}", + "\t}", + "\taCNFOut := testhelper.NewReportObject(\"Overall CNF category\", testhelper.CnfType, false).AddField(testhelper.Category, highLevelCat.String())", + "\tcompliantObjects = append(compliantObjects, aCNFOut)", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CheckPod(pod *provider.Pod) []PodListCategory {", + "\tvar containerSCC ContainerSCC", + "\tcontainerSCC.HostIPC = NOK", + "\tif pod.Spec.HostIPC {", + "\t\tcontainerSCC.HostIPC = OK", + "\t}", + "\tcontainerSCC.HostNetwork = NOK", + "\tif pod.Spec.HostNetwork {", + "\t\tcontainerSCC.HostNetwork = OK", + "\t}", + "\tcontainerSCC.HostPID = NOK", + "\tif pod.Spec.HostPID {", + "\t\tcontainerSCC.HostPID = OK", + "\t}", + "\tcontainerSCC.SeLinuxContextPresent = NOK", + "\tif pod.Spec.SecurityContext != nil \u0026\u0026 pod.Spec.SecurityContext.SELinuxOptions != nil {", + "\t\tcontainerSCC.SeLinuxContextPresent = OK", + "\t}", + "\tcontainerSCC.AllVolumeAllowed, containerSCC.HostDirVolumePluginPresent = AllVolumeAllowed(pod.Spec.Volumes)", + "\tif pod.Spec.SecurityContext != nil \u0026\u0026 pod.Spec.SecurityContext.RunAsUser != nil {", + "\t\tcontainerSCC.RunAsUserPresent = OK", + "\t} else {", + "\t\tcontainerSCC.RunAsUserPresent = NOK", + "\t}", + "\tif pod.Spec.SecurityContext != nil \u0026\u0026 pod.Spec.SecurityContext.FSGroup != nil {", + "\t\tcontainerSCC.FsGroupPresent = OK", + "\t} else {", + "\t\tcontainerSCC.FsGroupPresent = NOK", + "\t}", + "\treturn checkContainerCategory(pod.Spec.Containers, containerSCC, pod.Name, pod.Namespace)", + "}" + ] + }, + { + "name": "GetContainerSCC", + "qualifiedName": "GetContainerSCC", + "exported": true, + "signature": "func(*provider.Container, ContainerSCC)(ContainerSCC)", + "doc": "GetContainerSCC updates a container's security context compliance status\n\nThe function examines a container’s properties such as host ports,\ncapabilities, privilege escalation settings, and SELinux options. It sets\ncorresponding flags in the provided ContainerSCC structure to indicate\nwhether each security requirement is satisfied. The updated ContainerSCC is\nreturned for further classification or reporting.\n\nnolint:gocritic", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:224", + "calls": [ + { + "name": "updateCapabilitiesFromContainer", + "kind": "function", + "source": [ + "func updateCapabilitiesFromContainer(cut *provider.Container, containerSCC *ContainerSCC) {", + "\tcontainerSCC.RequiredDropCapabilitiesPresent = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.Capabilities != nil {", + "\t\tvar sliceDropCapabilities []string", + "\t\tfor _, ncc := range cut.SecurityContext.Capabilities.Drop {", + "\t\t\tsliceDropCapabilities = append(sliceDropCapabilities, string(ncc))", + "\t\t}", + "", + "\t\t// Sort the slices", + "\t\tsort.Strings(sliceDropCapabilities)", + "\t\tsort.Strings(requiredDropCapabilities)", + "", + "\t\tif stringhelper.SubSlice(sliceDropCapabilities, requiredDropCapabilities) || slices.Equal(sliceDropCapabilities, dropAll) {", + "\t\t\tcontainerSCC.RequiredDropCapabilitiesPresent = OK", + "\t\t}", + "\t\t//nolint:gocritic", + "\t\tif len(cut.SecurityContext.Capabilities.Add) == 0 { // check if the len=0 this mean that is cat1", + "\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID1", + "\t\t} else if checkContainCategory(cut.SecurityContext.Capabilities.Add, category2AddCapabilities) {", + "\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID2", + "\t\t} else {", + "\t\t\tif checkContainCategory(cut.SecurityContext.Capabilities.Add, category3AddCapabilities) {", + "\t\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID3", + "\t\t\t} else {", + "\t\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID4", + "\t\t\t}", + "\t\t}", + "\t} else {", + "\t\tcontainerSCC.CapabilitiesCategory = CategoryID1", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer", + "name": "checkContainerCategory", + "kind": "function", + "source": [ + "func checkContainerCategory(containers []corev1.Container, containerSCC ContainerSCC, podName, nameSpace string) []PodListCategory {", + "\tvar ContainerList []PodListCategory", + "\tvar categoryinfo PodListCategory", + "\tfor j := 0; j \u003c len(containers); j++ {", + "\t\tcut := \u0026provider.Container{Podname: podName, Namespace: nameSpace, Container: \u0026containers[j]}", + "\t\tpercontainerSCC := GetContainerSCC(cut, containerSCC)", + "\t\t// after building the containerSCC need to check to which category it is", + "\t\tcategoryinfo = PodListCategory{", + "\t\t\tContainername: cut.Name,", + "\t\t\tPodname: podName,", + "\t\t\tNameSpace: nameSpace,", + "\t\t}", + "\t\tif compareCategory(\u0026Category1, \u0026percontainerSCC, CategoryID1) {", + "\t\t\tcategoryinfo.Category = CategoryID1", + "\t\t} else if compareCategory(\u0026Category1NoUID0, \u0026percontainerSCC, CategoryID1NoUID0) {", + "\t\t\tcategoryinfo.Category = CategoryID1NoUID0", + "\t\t} else if compareCategory(\u0026Category2, \u0026percontainerSCC, CategoryID2) {", + "\t\t\tcategoryinfo.Category = CategoryID2", + "\t\t} else if compareCategory(\u0026Category3, \u0026percontainerSCC, CategoryID3) {", + "\t\t\tcategoryinfo.Category = CategoryID3", + "\t\t} else {", + "\t\t\tcategoryinfo.Category = CategoryID4", + "\t\t}", + "\t\t// after building the containerSCC need to check to which category it is", + "\t\tContainerList = append(ContainerList, categoryinfo)", + "\t}", + "\treturn ContainerList", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetContainerSCC(cut *provider.Container, containerSCC ContainerSCC) ContainerSCC {", + "\tcontainerSCC.HostPorts = NOK", + "\tfor _, aPort := range cut.Ports {", + "\t\tif aPort.HostPort != 0 {", + "\t\t\tcontainerSCC.HostPorts = OK", + "\t\t\tbreak", + "\t\t}", + "\t}", + "\tupdateCapabilitiesFromContainer(cut, \u0026containerSCC)", + "\tcontainerSCC.PrivilegeEscalation = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.AllowPrivilegeEscalation != nil {", + "\t\tcontainerSCC.PrivilegeEscalation = OK", + "\t}", + "\tcontainerSCC.PrivilegedContainer = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.Privileged != nil \u0026\u0026 *(cut.SecurityContext.Privileged) {", + "\t\tcontainerSCC.PrivilegedContainer = OK", + "\t}", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.RunAsUser != nil {", + "\t\tcontainerSCC.RunAsUserPresent = OK", + "\t}", + "\tcontainerSCC.ReadOnlyRootFilesystem = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.ReadOnlyRootFilesystem != nil \u0026\u0026 *cut.SecurityContext.ReadOnlyRootFilesystem {", + "\t\tcontainerSCC.ReadOnlyRootFilesystem = OK", + "\t}", + "\tcontainerSCC.RunAsNonRoot = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.RunAsNonRoot != nil \u0026\u0026 *cut.SecurityContext.RunAsNonRoot {", + "\t\tcontainerSCC.RunAsNonRoot = OK", + "\t}", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.SELinuxOptions != nil {", + "\t\tcontainerSCC.SeLinuxContextPresent = OK", + "\t}", + "\treturn containerSCC", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "OkNok.String", + "exported": true, + "receiver": "OkNok", + "signature": "func()(string)", + "doc": "OkNok.String returns a textual representation of the status\n\nWhen invoked, this method examines its receiver value and maps specific\nenumeration cases to predefined string constants. If the value matches the\nsuccess case it returns the corresponding OKString; if it matches the failure\ncase it returns NOKString. For any other value, it defaults to returning\n\"false\".", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:37", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (okNok OkNok) String() string {", + "\tswitch okNok {", + "\tcase OK:", + "\t\treturn OKString", + "\tcase NOK:", + "\t\treturn NOKString", + "\t}", + "\treturn \"false\"", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "PodListCategory.String", + "exported": true, + "receiver": "PodListCategory", + "signature": "func()(string)", + "doc": "PodListCategory.String Formats PodListCategory fields into a readable string\n\nThe method combines the container name, pod name, namespace, and category of\na PodListCategory instance into a single line with labels. It returns this\nformatted string for display or logging purposes.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:177", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (category PodListCategory) String() string {", + "\treturnString := fmt.Sprintf(\"Containername: %s Podname: %s NameSpace: %s Category: %s \\n \",", + "\t\tcategory.Containername, category.Podname, category.NameSpace, category.Category)", + "\treturn returnString", + "}" + ] + }, + { + "name": "checkContainCategory", + "qualifiedName": "checkContainCategory", + "exported": false, + "signature": "func([]corev1.Capability, []string)(bool)", + "doc": "checkContainCategory verifies that every capability in a list is present in another set\n\nThe function receives a slice of capabilities and a reference slice of\nstrings. It iterates through each capability, checking whether its string\nrepresentation appears in the reference slice using a helper routine. If any\ncapability is missing, it immediately returns false; otherwise it returns\ntrue after all checks pass.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:384", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "string", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer", + "name": "updateCapabilitiesFromContainer", + "kind": "function", + "source": [ + "func updateCapabilitiesFromContainer(cut *provider.Container, containerSCC *ContainerSCC) {", + "\tcontainerSCC.RequiredDropCapabilitiesPresent = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.Capabilities != nil {", + "\t\tvar sliceDropCapabilities []string", + "\t\tfor _, ncc := range cut.SecurityContext.Capabilities.Drop {", + "\t\t\tsliceDropCapabilities = append(sliceDropCapabilities, string(ncc))", + "\t\t}", + "", + "\t\t// Sort the slices", + "\t\tsort.Strings(sliceDropCapabilities)", + "\t\tsort.Strings(requiredDropCapabilities)", + "", + "\t\tif stringhelper.SubSlice(sliceDropCapabilities, requiredDropCapabilities) || slices.Equal(sliceDropCapabilities, dropAll) {", + "\t\t\tcontainerSCC.RequiredDropCapabilitiesPresent = OK", + "\t\t}", + "\t\t//nolint:gocritic", + "\t\tif len(cut.SecurityContext.Capabilities.Add) == 0 { // check if the len=0 this mean that is cat1", + "\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID1", + "\t\t} else if checkContainCategory(cut.SecurityContext.Capabilities.Add, category2AddCapabilities) {", + "\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID2", + "\t\t} else {", + "\t\t\tif checkContainCategory(cut.SecurityContext.Capabilities.Add, category3AddCapabilities) {", + "\t\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID3", + "\t\t\t} else {", + "\t\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID4", + "\t\t\t}", + "\t\t}", + "\t} else {", + "\t\tcontainerSCC.CapabilitiesCategory = CategoryID1", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func checkContainCategory(addCapability []corev1.Capability, referenceCategoryAddCapabilities []string) bool {", + "\tfor _, ncc := range addCapability {", + "\t\tif !stringhelper.StringInSlice(referenceCategoryAddCapabilities, string(ncc), true) {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "checkContainerCategory", + "qualifiedName": "checkContainerCategory", + "exported": false, + "signature": "func([]corev1.Container, ContainerSCC, string, string)([]PodListCategory)", + "doc": "checkContainerCategory creates a list of container categories based on security context checks\n\nFor each container in the pod, it builds a container-specific SCC\nrepresentation and then determines which predefined category matches that\nSCC. The function returns a slice of structs containing the container name,\npod name, namespace, and assigned category identifier.\n\nnolint:gocritic", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:348", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "GetContainerSCC", + "kind": "function", + "source": [ + "func GetContainerSCC(cut *provider.Container, containerSCC ContainerSCC) ContainerSCC {", + "\tcontainerSCC.HostPorts = NOK", + "\tfor _, aPort := range cut.Ports {", + "\t\tif aPort.HostPort != 0 {", + "\t\t\tcontainerSCC.HostPorts = OK", + "\t\t\tbreak", + "\t\t}", + "\t}", + "\tupdateCapabilitiesFromContainer(cut, \u0026containerSCC)", + "\tcontainerSCC.PrivilegeEscalation = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.AllowPrivilegeEscalation != nil {", + "\t\tcontainerSCC.PrivilegeEscalation = OK", + "\t}", + "\tcontainerSCC.PrivilegedContainer = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.Privileged != nil \u0026\u0026 *(cut.SecurityContext.Privileged) {", + "\t\tcontainerSCC.PrivilegedContainer = OK", + "\t}", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.RunAsUser != nil {", + "\t\tcontainerSCC.RunAsUserPresent = OK", + "\t}", + "\tcontainerSCC.ReadOnlyRootFilesystem = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.ReadOnlyRootFilesystem != nil \u0026\u0026 *cut.SecurityContext.ReadOnlyRootFilesystem {", + "\t\tcontainerSCC.ReadOnlyRootFilesystem = OK", + "\t}", + "\tcontainerSCC.RunAsNonRoot = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.RunAsNonRoot != nil \u0026\u0026 *cut.SecurityContext.RunAsNonRoot {", + "\t\tcontainerSCC.RunAsNonRoot = OK", + "\t}", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.SELinuxOptions != nil {", + "\t\tcontainerSCC.SeLinuxContextPresent = OK", + "\t}", + "\treturn containerSCC", + "}" + ] + }, + { + "name": "compareCategory", + "kind": "function", + "source": [ + "func compareCategory(refCategory, containerSCC *ContainerSCC, id CategoryID) bool {", + "\tresult := true", + "\tlog.Debug(\"Testing if pod belongs to category %s\", \u0026id)", + "\t// AllVolumeAllowed reports whether the volumes in the container are compliant to the SCC (same volume list for all SCCs).", + "\t// True means that all volumes declared in the pod are allowed in the SCC.", + "\t// False means that at least one volume is disallowed", + "\tif refCategory.AllVolumeAllowed == containerSCC.AllVolumeAllowed {", + "\t\tlog.Debug(\"AllVolumeAllowed = %s - OK\", containerSCC.AllVolumeAllowed)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"AllVolumeAllowed = %s but expected \u003e=\u003c=%s - NOK\", containerSCC.AllVolumeAllowed, refCategory.AllVolumeAllowed)", + "\t}", + "\t// RunAsUserPresent reports whether the RunAsUser Field is set to something other than nil as requested by All SCC categories.", + "\t// True means that the RunAsUser Field is set.", + "\t// False means that it is not set (nil)", + "\t// The runAsUser range can be specified in the SCC itself. If not, it is specified in the namespace, see", + "\t// https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth", + "\t// runAsUser:", + "\t// type: MustRunAsRange", + "\t// uidRangeMin: 1000", + "\t// uidRangeMax: 2000", + "\tif refCategory.RunAsUserPresent == containerSCC.RunAsUserPresent {", + "\t\tlog.Debug(\"RunAsUserPresent = %s - OK\", containerSCC.RunAsUserPresent)", + "\t} else {", + "\t\tlog.Debug(\"RunAsUserPresent = %s but expected %s - NOK\", containerSCC.RunAsUserPresent, refCategory.RunAsUserPresent)", + "\t\tresult = false", + "\t}", + "\t// RunAsNonRoot is true if the RunAsNonRoot field is set to true, false otherwise.", + "\t// if setting a range including the roor UID 0 ( for instance 0-2000), then this option can disallow it.", + "\tif refCategory.RunAsNonRoot \u003e= containerSCC.RunAsNonRoot {", + "\t\tlog.Debug(\"RunAsNonRoot = %s - OK\", containerSCC.RunAsNonRoot)", + "\t} else {", + "\t\tlog.Debug(\"RunAsNonRoot = %s but expected %s - NOK\", containerSCC.RunAsNonRoot, refCategory.RunAsNonRoot)", + "\t\tresult = false", + "\t}", + "\t// FsGroupPresent reports whether the FsGroup Field is set to something other than nil as requested by All SCC categories.", + "\t// True means that the FsGroup Field is set.", + "\t// False means that it is not set (nil)", + "\t// The FSGroup range can be specified in the SCC itself. If not, it is specified in the namespace, see", + "\t// https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth", + "\t// fsGroup:", + "\t// type: MustRunAs", + "\t// ranges:", + "\t// - min: 1000900000", + "\t// max: 1000900010", + "\tif refCategory.FsGroupPresent == containerSCC.FsGroupPresent {", + "\t\tlog.Debug(\"FsGroupPresent = %s - OK\", containerSCC.FsGroupPresent)", + "\t} else {", + "\t\tlog.Debug(\"FsGroupPresent = %s but expected %s - NOK\", containerSCC.FsGroupPresent, refCategory.FsGroupPresent)", + "\t\tresult = false", + "\t}", + "\t// RequiredDropCapabilitiesPresent is true if the drop DropCapabilities field has at least the set of required drop capabilities ( same required set for all categories ).", + "\t// False means that some required DropCapabilities are missing.", + "\tif refCategory.RequiredDropCapabilitiesPresent == containerSCC.RequiredDropCapabilitiesPresent {", + "\t\tlog.Debug(\"DropCapabilities list - OK\")", + "\t} else {", + "\t\tlog.Debug(\"RequiredDropCapabilitiesPresent = %s but expected %s - NOK\", containerSCC.RequiredDropCapabilitiesPresent, refCategory.RequiredDropCapabilitiesPresent)", + "\t\tlog.Debug(\"its didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \")", + "\t\tresult = false", + "\t}", + "\t// HostDirVolumePluginPresent is true if a hostpath volume is configured, false otherwise.", + "\t// It is a deprecated field and is derived from the volume list currently configured in the container.", + "\t// see https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html#use-the-hostpath-volume-plugin", + "\tif refCategory.HostDirVolumePluginPresent == containerSCC.HostDirVolumePluginPresent {", + "\t\tlog.Debug(\"HostDirVolumePluginPresent = %s - OK\", containerSCC.HostDirVolumePluginPresent)", + "\t} else {", + "\t\tlog.Debug(\"HostDirVolumePluginPresent = %s but expected %s - NOK\", containerSCC.HostDirVolumePluginPresent, refCategory.HostDirVolumePluginPresent)", + "\t\tresult = false", + "\t}", + "\t// HostIPC is true if the HostIPC field is set to true, false otherwise.", + "\tif refCategory.HostIPC \u003e= containerSCC.HostIPC {", + "\t\tlog.Debug(\"HostIPC = %s - OK\", containerSCC.HostIPC)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostIPC = %s but expected \u003c= %s - NOK\", containerSCC.HostIPC, refCategory.HostIPC)", + "\t}", + "\t// HostNetwork is true if the HostNetwork field is set to true, false otherwise.", + "\tif refCategory.HostNetwork \u003e= containerSCC.HostNetwork {", + "\t\tlog.Debug(\"HostNetwork = %s - OK\", containerSCC.HostNetwork)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostNetwork = %s but expected \u003c= %s - NOK\", containerSCC.HostNetwork, refCategory.HostNetwork)", + "\t}", + "\t// HostPID is true if the HostPID field is set to true, false otherwise.", + "\tif refCategory.HostPID \u003e= containerSCC.HostPID {", + "\t\tlog.Debug(\"HostPID = %s - OK\", containerSCC.HostPID)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostPID = %s but expected \u003c= %s - NOK\", containerSCC.HostPID, refCategory.HostPID)", + "\t}", + "\t// HostPorts is true if the HostPorts field is set to true, false otherwise.", + "\tif refCategory.HostPorts \u003e= containerSCC.HostPorts {", + "\t\tlog.Debug(\"HostPorts = %s - OK\", containerSCC.HostPorts)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostPorts = %s but expected \u003c= %s - NOK\", containerSCC.HostPorts, refCategory.HostPorts)", + "\t}", + "\t// PrivilegeEscalation is true if the PrivilegeEscalation field is set to true, false otherwise.", + "\tif refCategory.PrivilegeEscalation \u003e= containerSCC.PrivilegeEscalation {", + "\t\tlog.Debug(\"HostNetwork = %s - OK\", containerSCC.HostNetwork)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"PrivilegeEscalation = %s but expected \u003c= %s - NOK\", containerSCC.PrivilegeEscalation, refCategory.PrivilegeEscalation)", + "\t}", + "\t// PrivilegedContainer is true if the PrivilegedContainer field is set to true, false otherwise.", + "\tif refCategory.PrivilegedContainer \u003e= containerSCC.PrivilegedContainer {", + "\t\tlog.Debug(\"PrivilegedContainer = %s - OK\", containerSCC.PrivilegedContainer)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"PrivilegedContainer = %s but expected \u003c= %s - NOK\", containerSCC.PrivilegedContainer, refCategory.PrivilegedContainer)", + "\t}", + "", + "\t// From the SecurityContextConstraint CRD spec:", + "\t// description: ReadOnlyRootFilesystem when set to true will force containers", + "\t// to run with a read only root file system. If the container specifically", + "\t// requests to run with a non-read only root file system the SCC should", + "\t// deny the pod. If set to false the container may run with a read only", + "\t// root file system if it wishes but it will not be forced to.", + "\t// type: boolean", + "\tif refCategory.ReadOnlyRootFilesystem == NOK {", + "\t\tlog.Debug(\"ReadOnlyRootFilesystem = %s - OK (not enforced by SCC)\", containerSCC.ReadOnlyRootFilesystem)", + "\t} else if containerSCC.ReadOnlyRootFilesystem != OK {", + "\t\tresult = false", + "\t\tlog.Debug(\"ReadOnlyRootFilesystem = %s but expected \u003c= %s - NOK\", containerSCC.ReadOnlyRootFilesystem, refCategory.ReadOnlyRootFilesystem)", + "\t}", + "\t// SeLinuxContextPresent is true if the SeLinuxContext field is present and set to a value (e.g. not nil), false otherwise.", + "\t// An SELinuxContext strategy of MustRunAs with no level set. Admission looks for the openshift.io/sa.scc.mcs annotation to populate the level.", + "\tif refCategory.SeLinuxContextPresent == containerSCC.SeLinuxContextPresent {", + "\t\tlog.Debug(\"SeLinuxContextPresent is not nil - OK\")", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"SeLinuxContextPresent = %s but expected %s expected to be non nil - NOK\", containerSCC.SeLinuxContextPresent, refCategory.SeLinuxContextPresent)", + "\t}", + "\t// CapabilitiesCategory indicates the lowest SCC category to which the list of capabilities.add in the container can be mapped to.", + "\tif refCategory.CapabilitiesCategory != containerSCC.CapabilitiesCategory {", + "\t\tresult = false", + "\t\tlog.Debug(\"CapabilitiesCategory = %s but expected %s - NOK\", containerSCC.CapabilitiesCategory, refCategory.CapabilitiesCategory)", + "\t} else {", + "\t\tlog.Debug(\"CapabilitiesCategory list is as expected %s - OK\", containerSCC.CapabilitiesCategory)", + "\t}", + "\treturn result", + "}" + ] + }, + { + "name": "compareCategory", + "kind": "function", + "source": [ + "func compareCategory(refCategory, containerSCC *ContainerSCC, id CategoryID) bool {", + "\tresult := true", + "\tlog.Debug(\"Testing if pod belongs to category %s\", \u0026id)", + "\t// AllVolumeAllowed reports whether the volumes in the container are compliant to the SCC (same volume list for all SCCs).", + "\t// True means that all volumes declared in the pod are allowed in the SCC.", + "\t// False means that at least one volume is disallowed", + "\tif refCategory.AllVolumeAllowed == containerSCC.AllVolumeAllowed {", + "\t\tlog.Debug(\"AllVolumeAllowed = %s - OK\", containerSCC.AllVolumeAllowed)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"AllVolumeAllowed = %s but expected \u003e=\u003c=%s - NOK\", containerSCC.AllVolumeAllowed, refCategory.AllVolumeAllowed)", + "\t}", + "\t// RunAsUserPresent reports whether the RunAsUser Field is set to something other than nil as requested by All SCC categories.", + "\t// True means that the RunAsUser Field is set.", + "\t// False means that it is not set (nil)", + "\t// The runAsUser range can be specified in the SCC itself. If not, it is specified in the namespace, see", + "\t// https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth", + "\t// runAsUser:", + "\t// type: MustRunAsRange", + "\t// uidRangeMin: 1000", + "\t// uidRangeMax: 2000", + "\tif refCategory.RunAsUserPresent == containerSCC.RunAsUserPresent {", + "\t\tlog.Debug(\"RunAsUserPresent = %s - OK\", containerSCC.RunAsUserPresent)", + "\t} else {", + "\t\tlog.Debug(\"RunAsUserPresent = %s but expected %s - NOK\", containerSCC.RunAsUserPresent, refCategory.RunAsUserPresent)", + "\t\tresult = false", + "\t}", + "\t// RunAsNonRoot is true if the RunAsNonRoot field is set to true, false otherwise.", + "\t// if setting a range including the roor UID 0 ( for instance 0-2000), then this option can disallow it.", + "\tif refCategory.RunAsNonRoot \u003e= containerSCC.RunAsNonRoot {", + "\t\tlog.Debug(\"RunAsNonRoot = %s - OK\", containerSCC.RunAsNonRoot)", + "\t} else {", + "\t\tlog.Debug(\"RunAsNonRoot = %s but expected %s - NOK\", containerSCC.RunAsNonRoot, refCategory.RunAsNonRoot)", + "\t\tresult = false", + "\t}", + "\t// FsGroupPresent reports whether the FsGroup Field is set to something other than nil as requested by All SCC categories.", + "\t// True means that the FsGroup Field is set.", + "\t// False means that it is not set (nil)", + "\t// The FSGroup range can be specified in the SCC itself. If not, it is specified in the namespace, see", + "\t// https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth", + "\t// fsGroup:", + "\t// type: MustRunAs", + "\t// ranges:", + "\t// - min: 1000900000", + "\t// max: 1000900010", + "\tif refCategory.FsGroupPresent == containerSCC.FsGroupPresent {", + "\t\tlog.Debug(\"FsGroupPresent = %s - OK\", containerSCC.FsGroupPresent)", + "\t} else {", + "\t\tlog.Debug(\"FsGroupPresent = %s but expected %s - NOK\", containerSCC.FsGroupPresent, refCategory.FsGroupPresent)", + "\t\tresult = false", + "\t}", + "\t// RequiredDropCapabilitiesPresent is true if the drop DropCapabilities field has at least the set of required drop capabilities ( same required set for all categories ).", + "\t// False means that some required DropCapabilities are missing.", + "\tif refCategory.RequiredDropCapabilitiesPresent == containerSCC.RequiredDropCapabilitiesPresent {", + "\t\tlog.Debug(\"DropCapabilities list - OK\")", + "\t} else {", + "\t\tlog.Debug(\"RequiredDropCapabilitiesPresent = %s but expected %s - NOK\", containerSCC.RequiredDropCapabilitiesPresent, refCategory.RequiredDropCapabilitiesPresent)", + "\t\tlog.Debug(\"its didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \")", + "\t\tresult = false", + "\t}", + "\t// HostDirVolumePluginPresent is true if a hostpath volume is configured, false otherwise.", + "\t// It is a deprecated field and is derived from the volume list currently configured in the container.", + "\t// see https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html#use-the-hostpath-volume-plugin", + "\tif refCategory.HostDirVolumePluginPresent == containerSCC.HostDirVolumePluginPresent {", + "\t\tlog.Debug(\"HostDirVolumePluginPresent = %s - OK\", containerSCC.HostDirVolumePluginPresent)", + "\t} else {", + "\t\tlog.Debug(\"HostDirVolumePluginPresent = %s but expected %s - NOK\", containerSCC.HostDirVolumePluginPresent, refCategory.HostDirVolumePluginPresent)", + "\t\tresult = false", + "\t}", + "\t// HostIPC is true if the HostIPC field is set to true, false otherwise.", + "\tif refCategory.HostIPC \u003e= containerSCC.HostIPC {", + "\t\tlog.Debug(\"HostIPC = %s - OK\", containerSCC.HostIPC)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostIPC = %s but expected \u003c= %s - NOK\", containerSCC.HostIPC, refCategory.HostIPC)", + "\t}", + "\t// HostNetwork is true if the HostNetwork field is set to true, false otherwise.", + "\tif refCategory.HostNetwork \u003e= containerSCC.HostNetwork {", + "\t\tlog.Debug(\"HostNetwork = %s - OK\", containerSCC.HostNetwork)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostNetwork = %s but expected \u003c= %s - NOK\", containerSCC.HostNetwork, refCategory.HostNetwork)", + "\t}", + "\t// HostPID is true if the HostPID field is set to true, false otherwise.", + "\tif refCategory.HostPID \u003e= containerSCC.HostPID {", + "\t\tlog.Debug(\"HostPID = %s - OK\", containerSCC.HostPID)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostPID = %s but expected \u003c= %s - NOK\", containerSCC.HostPID, refCategory.HostPID)", + "\t}", + "\t// HostPorts is true if the HostPorts field is set to true, false otherwise.", + "\tif refCategory.HostPorts \u003e= containerSCC.HostPorts {", + "\t\tlog.Debug(\"HostPorts = %s - OK\", containerSCC.HostPorts)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostPorts = %s but expected \u003c= %s - NOK\", containerSCC.HostPorts, refCategory.HostPorts)", + "\t}", + "\t// PrivilegeEscalation is true if the PrivilegeEscalation field is set to true, false otherwise.", + "\tif refCategory.PrivilegeEscalation \u003e= containerSCC.PrivilegeEscalation {", + "\t\tlog.Debug(\"HostNetwork = %s - OK\", containerSCC.HostNetwork)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"PrivilegeEscalation = %s but expected \u003c= %s - NOK\", containerSCC.PrivilegeEscalation, refCategory.PrivilegeEscalation)", + "\t}", + "\t// PrivilegedContainer is true if the PrivilegedContainer field is set to true, false otherwise.", + "\tif refCategory.PrivilegedContainer \u003e= containerSCC.PrivilegedContainer {", + "\t\tlog.Debug(\"PrivilegedContainer = %s - OK\", containerSCC.PrivilegedContainer)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"PrivilegedContainer = %s but expected \u003c= %s - NOK\", containerSCC.PrivilegedContainer, refCategory.PrivilegedContainer)", + "\t}", + "", + "\t// From the SecurityContextConstraint CRD spec:", + "\t// description: ReadOnlyRootFilesystem when set to true will force containers", + "\t// to run with a read only root file system. If the container specifically", + "\t// requests to run with a non-read only root file system the SCC should", + "\t// deny the pod. If set to false the container may run with a read only", + "\t// root file system if it wishes but it will not be forced to.", + "\t// type: boolean", + "\tif refCategory.ReadOnlyRootFilesystem == NOK {", + "\t\tlog.Debug(\"ReadOnlyRootFilesystem = %s - OK (not enforced by SCC)\", containerSCC.ReadOnlyRootFilesystem)", + "\t} else if containerSCC.ReadOnlyRootFilesystem != OK {", + "\t\tresult = false", + "\t\tlog.Debug(\"ReadOnlyRootFilesystem = %s but expected \u003c= %s - NOK\", containerSCC.ReadOnlyRootFilesystem, refCategory.ReadOnlyRootFilesystem)", + "\t}", + "\t// SeLinuxContextPresent is true if the SeLinuxContext field is present and set to a value (e.g. not nil), false otherwise.", + "\t// An SELinuxContext strategy of MustRunAs with no level set. Admission looks for the openshift.io/sa.scc.mcs annotation to populate the level.", + "\tif refCategory.SeLinuxContextPresent == containerSCC.SeLinuxContextPresent {", + "\t\tlog.Debug(\"SeLinuxContextPresent is not nil - OK\")", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"SeLinuxContextPresent = %s but expected %s expected to be non nil - NOK\", containerSCC.SeLinuxContextPresent, refCategory.SeLinuxContextPresent)", + "\t}", + "\t// CapabilitiesCategory indicates the lowest SCC category to which the list of capabilities.add in the container can be mapped to.", + "\tif refCategory.CapabilitiesCategory != containerSCC.CapabilitiesCategory {", + "\t\tresult = false", + "\t\tlog.Debug(\"CapabilitiesCategory = %s but expected %s - NOK\", containerSCC.CapabilitiesCategory, refCategory.CapabilitiesCategory)", + "\t} else {", + "\t\tlog.Debug(\"CapabilitiesCategory list is as expected %s - OK\", containerSCC.CapabilitiesCategory)", + "\t}", + "\treturn result", + "}" + ] + }, + { + "name": "compareCategory", + "kind": "function", + "source": [ + "func compareCategory(refCategory, containerSCC *ContainerSCC, id CategoryID) bool {", + "\tresult := true", + "\tlog.Debug(\"Testing if pod belongs to category %s\", \u0026id)", + "\t// AllVolumeAllowed reports whether the volumes in the container are compliant to the SCC (same volume list for all SCCs).", + "\t// True means that all volumes declared in the pod are allowed in the SCC.", + "\t// False means that at least one volume is disallowed", + "\tif refCategory.AllVolumeAllowed == containerSCC.AllVolumeAllowed {", + "\t\tlog.Debug(\"AllVolumeAllowed = %s - OK\", containerSCC.AllVolumeAllowed)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"AllVolumeAllowed = %s but expected \u003e=\u003c=%s - NOK\", containerSCC.AllVolumeAllowed, refCategory.AllVolumeAllowed)", + "\t}", + "\t// RunAsUserPresent reports whether the RunAsUser Field is set to something other than nil as requested by All SCC categories.", + "\t// True means that the RunAsUser Field is set.", + "\t// False means that it is not set (nil)", + "\t// The runAsUser range can be specified in the SCC itself. If not, it is specified in the namespace, see", + "\t// https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth", + "\t// runAsUser:", + "\t// type: MustRunAsRange", + "\t// uidRangeMin: 1000", + "\t// uidRangeMax: 2000", + "\tif refCategory.RunAsUserPresent == containerSCC.RunAsUserPresent {", + "\t\tlog.Debug(\"RunAsUserPresent = %s - OK\", containerSCC.RunAsUserPresent)", + "\t} else {", + "\t\tlog.Debug(\"RunAsUserPresent = %s but expected %s - NOK\", containerSCC.RunAsUserPresent, refCategory.RunAsUserPresent)", + "\t\tresult = false", + "\t}", + "\t// RunAsNonRoot is true if the RunAsNonRoot field is set to true, false otherwise.", + "\t// if setting a range including the roor UID 0 ( for instance 0-2000), then this option can disallow it.", + "\tif refCategory.RunAsNonRoot \u003e= containerSCC.RunAsNonRoot {", + "\t\tlog.Debug(\"RunAsNonRoot = %s - OK\", containerSCC.RunAsNonRoot)", + "\t} else {", + "\t\tlog.Debug(\"RunAsNonRoot = %s but expected %s - NOK\", containerSCC.RunAsNonRoot, refCategory.RunAsNonRoot)", + "\t\tresult = false", + "\t}", + "\t// FsGroupPresent reports whether the FsGroup Field is set to something other than nil as requested by All SCC categories.", + "\t// True means that the FsGroup Field is set.", + "\t// False means that it is not set (nil)", + "\t// The FSGroup range can be specified in the SCC itself. If not, it is specified in the namespace, see", + "\t// https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth", + "\t// fsGroup:", + "\t// type: MustRunAs", + "\t// ranges:", + "\t// - min: 1000900000", + "\t// max: 1000900010", + "\tif refCategory.FsGroupPresent == containerSCC.FsGroupPresent {", + "\t\tlog.Debug(\"FsGroupPresent = %s - OK\", containerSCC.FsGroupPresent)", + "\t} else {", + "\t\tlog.Debug(\"FsGroupPresent = %s but expected %s - NOK\", containerSCC.FsGroupPresent, refCategory.FsGroupPresent)", + "\t\tresult = false", + "\t}", + "\t// RequiredDropCapabilitiesPresent is true if the drop DropCapabilities field has at least the set of required drop capabilities ( same required set for all categories ).", + "\t// False means that some required DropCapabilities are missing.", + "\tif refCategory.RequiredDropCapabilitiesPresent == containerSCC.RequiredDropCapabilitiesPresent {", + "\t\tlog.Debug(\"DropCapabilities list - OK\")", + "\t} else {", + "\t\tlog.Debug(\"RequiredDropCapabilitiesPresent = %s but expected %s - NOK\", containerSCC.RequiredDropCapabilitiesPresent, refCategory.RequiredDropCapabilitiesPresent)", + "\t\tlog.Debug(\"its didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \")", + "\t\tresult = false", + "\t}", + "\t// HostDirVolumePluginPresent is true if a hostpath volume is configured, false otherwise.", + "\t// It is a deprecated field and is derived from the volume list currently configured in the container.", + "\t// see https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html#use-the-hostpath-volume-plugin", + "\tif refCategory.HostDirVolumePluginPresent == containerSCC.HostDirVolumePluginPresent {", + "\t\tlog.Debug(\"HostDirVolumePluginPresent = %s - OK\", containerSCC.HostDirVolumePluginPresent)", + "\t} else {", + "\t\tlog.Debug(\"HostDirVolumePluginPresent = %s but expected %s - NOK\", containerSCC.HostDirVolumePluginPresent, refCategory.HostDirVolumePluginPresent)", + "\t\tresult = false", + "\t}", + "\t// HostIPC is true if the HostIPC field is set to true, false otherwise.", + "\tif refCategory.HostIPC \u003e= containerSCC.HostIPC {", + "\t\tlog.Debug(\"HostIPC = %s - OK\", containerSCC.HostIPC)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostIPC = %s but expected \u003c= %s - NOK\", containerSCC.HostIPC, refCategory.HostIPC)", + "\t}", + "\t// HostNetwork is true if the HostNetwork field is set to true, false otherwise.", + "\tif refCategory.HostNetwork \u003e= containerSCC.HostNetwork {", + "\t\tlog.Debug(\"HostNetwork = %s - OK\", containerSCC.HostNetwork)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostNetwork = %s but expected \u003c= %s - NOK\", containerSCC.HostNetwork, refCategory.HostNetwork)", + "\t}", + "\t// HostPID is true if the HostPID field is set to true, false otherwise.", + "\tif refCategory.HostPID \u003e= containerSCC.HostPID {", + "\t\tlog.Debug(\"HostPID = %s - OK\", containerSCC.HostPID)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostPID = %s but expected \u003c= %s - NOK\", containerSCC.HostPID, refCategory.HostPID)", + "\t}", + "\t// HostPorts is true if the HostPorts field is set to true, false otherwise.", + "\tif refCategory.HostPorts \u003e= containerSCC.HostPorts {", + "\t\tlog.Debug(\"HostPorts = %s - OK\", containerSCC.HostPorts)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostPorts = %s but expected \u003c= %s - NOK\", containerSCC.HostPorts, refCategory.HostPorts)", + "\t}", + "\t// PrivilegeEscalation is true if the PrivilegeEscalation field is set to true, false otherwise.", + "\tif refCategory.PrivilegeEscalation \u003e= containerSCC.PrivilegeEscalation {", + "\t\tlog.Debug(\"HostNetwork = %s - OK\", containerSCC.HostNetwork)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"PrivilegeEscalation = %s but expected \u003c= %s - NOK\", containerSCC.PrivilegeEscalation, refCategory.PrivilegeEscalation)", + "\t}", + "\t// PrivilegedContainer is true if the PrivilegedContainer field is set to true, false otherwise.", + "\tif refCategory.PrivilegedContainer \u003e= containerSCC.PrivilegedContainer {", + "\t\tlog.Debug(\"PrivilegedContainer = %s - OK\", containerSCC.PrivilegedContainer)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"PrivilegedContainer = %s but expected \u003c= %s - NOK\", containerSCC.PrivilegedContainer, refCategory.PrivilegedContainer)", + "\t}", + "", + "\t// From the SecurityContextConstraint CRD spec:", + "\t// description: ReadOnlyRootFilesystem when set to true will force containers", + "\t// to run with a read only root file system. If the container specifically", + "\t// requests to run with a non-read only root file system the SCC should", + "\t// deny the pod. If set to false the container may run with a read only", + "\t// root file system if it wishes but it will not be forced to.", + "\t// type: boolean", + "\tif refCategory.ReadOnlyRootFilesystem == NOK {", + "\t\tlog.Debug(\"ReadOnlyRootFilesystem = %s - OK (not enforced by SCC)\", containerSCC.ReadOnlyRootFilesystem)", + "\t} else if containerSCC.ReadOnlyRootFilesystem != OK {", + "\t\tresult = false", + "\t\tlog.Debug(\"ReadOnlyRootFilesystem = %s but expected \u003c= %s - NOK\", containerSCC.ReadOnlyRootFilesystem, refCategory.ReadOnlyRootFilesystem)", + "\t}", + "\t// SeLinuxContextPresent is true if the SeLinuxContext field is present and set to a value (e.g. not nil), false otherwise.", + "\t// An SELinuxContext strategy of MustRunAs with no level set. Admission looks for the openshift.io/sa.scc.mcs annotation to populate the level.", + "\tif refCategory.SeLinuxContextPresent == containerSCC.SeLinuxContextPresent {", + "\t\tlog.Debug(\"SeLinuxContextPresent is not nil - OK\")", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"SeLinuxContextPresent = %s but expected %s expected to be non nil - NOK\", containerSCC.SeLinuxContextPresent, refCategory.SeLinuxContextPresent)", + "\t}", + "\t// CapabilitiesCategory indicates the lowest SCC category to which the list of capabilities.add in the container can be mapped to.", + "\tif refCategory.CapabilitiesCategory != containerSCC.CapabilitiesCategory {", + "\t\tresult = false", + "\t\tlog.Debug(\"CapabilitiesCategory = %s but expected %s - NOK\", containerSCC.CapabilitiesCategory, refCategory.CapabilitiesCategory)", + "\t} else {", + "\t\tlog.Debug(\"CapabilitiesCategory list is as expected %s - OK\", containerSCC.CapabilitiesCategory)", + "\t}", + "\treturn result", + "}" + ] + }, + { + "name": "compareCategory", + "kind": "function", + "source": [ + "func compareCategory(refCategory, containerSCC *ContainerSCC, id CategoryID) bool {", + "\tresult := true", + "\tlog.Debug(\"Testing if pod belongs to category %s\", \u0026id)", + "\t// AllVolumeAllowed reports whether the volumes in the container are compliant to the SCC (same volume list for all SCCs).", + "\t// True means that all volumes declared in the pod are allowed in the SCC.", + "\t// False means that at least one volume is disallowed", + "\tif refCategory.AllVolumeAllowed == containerSCC.AllVolumeAllowed {", + "\t\tlog.Debug(\"AllVolumeAllowed = %s - OK\", containerSCC.AllVolumeAllowed)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"AllVolumeAllowed = %s but expected \u003e=\u003c=%s - NOK\", containerSCC.AllVolumeAllowed, refCategory.AllVolumeAllowed)", + "\t}", + "\t// RunAsUserPresent reports whether the RunAsUser Field is set to something other than nil as requested by All SCC categories.", + "\t// True means that the RunAsUser Field is set.", + "\t// False means that it is not set (nil)", + "\t// The runAsUser range can be specified in the SCC itself. If not, it is specified in the namespace, see", + "\t// https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth", + "\t// runAsUser:", + "\t// type: MustRunAsRange", + "\t// uidRangeMin: 1000", + "\t// uidRangeMax: 2000", + "\tif refCategory.RunAsUserPresent == containerSCC.RunAsUserPresent {", + "\t\tlog.Debug(\"RunAsUserPresent = %s - OK\", containerSCC.RunAsUserPresent)", + "\t} else {", + "\t\tlog.Debug(\"RunAsUserPresent = %s but expected %s - NOK\", containerSCC.RunAsUserPresent, refCategory.RunAsUserPresent)", + "\t\tresult = false", + "\t}", + "\t// RunAsNonRoot is true if the RunAsNonRoot field is set to true, false otherwise.", + "\t// if setting a range including the roor UID 0 ( for instance 0-2000), then this option can disallow it.", + "\tif refCategory.RunAsNonRoot \u003e= containerSCC.RunAsNonRoot {", + "\t\tlog.Debug(\"RunAsNonRoot = %s - OK\", containerSCC.RunAsNonRoot)", + "\t} else {", + "\t\tlog.Debug(\"RunAsNonRoot = %s but expected %s - NOK\", containerSCC.RunAsNonRoot, refCategory.RunAsNonRoot)", + "\t\tresult = false", + "\t}", + "\t// FsGroupPresent reports whether the FsGroup Field is set to something other than nil as requested by All SCC categories.", + "\t// True means that the FsGroup Field is set.", + "\t// False means that it is not set (nil)", + "\t// The FSGroup range can be specified in the SCC itself. If not, it is specified in the namespace, see", + "\t// https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth", + "\t// fsGroup:", + "\t// type: MustRunAs", + "\t// ranges:", + "\t// - min: 1000900000", + "\t// max: 1000900010", + "\tif refCategory.FsGroupPresent == containerSCC.FsGroupPresent {", + "\t\tlog.Debug(\"FsGroupPresent = %s - OK\", containerSCC.FsGroupPresent)", + "\t} else {", + "\t\tlog.Debug(\"FsGroupPresent = %s but expected %s - NOK\", containerSCC.FsGroupPresent, refCategory.FsGroupPresent)", + "\t\tresult = false", + "\t}", + "\t// RequiredDropCapabilitiesPresent is true if the drop DropCapabilities field has at least the set of required drop capabilities ( same required set for all categories ).", + "\t// False means that some required DropCapabilities are missing.", + "\tif refCategory.RequiredDropCapabilitiesPresent == containerSCC.RequiredDropCapabilitiesPresent {", + "\t\tlog.Debug(\"DropCapabilities list - OK\")", + "\t} else {", + "\t\tlog.Debug(\"RequiredDropCapabilitiesPresent = %s but expected %s - NOK\", containerSCC.RequiredDropCapabilitiesPresent, refCategory.RequiredDropCapabilitiesPresent)", + "\t\tlog.Debug(\"its didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \")", + "\t\tresult = false", + "\t}", + "\t// HostDirVolumePluginPresent is true if a hostpath volume is configured, false otherwise.", + "\t// It is a deprecated field and is derived from the volume list currently configured in the container.", + "\t// see https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html#use-the-hostpath-volume-plugin", + "\tif refCategory.HostDirVolumePluginPresent == containerSCC.HostDirVolumePluginPresent {", + "\t\tlog.Debug(\"HostDirVolumePluginPresent = %s - OK\", containerSCC.HostDirVolumePluginPresent)", + "\t} else {", + "\t\tlog.Debug(\"HostDirVolumePluginPresent = %s but expected %s - NOK\", containerSCC.HostDirVolumePluginPresent, refCategory.HostDirVolumePluginPresent)", + "\t\tresult = false", + "\t}", + "\t// HostIPC is true if the HostIPC field is set to true, false otherwise.", + "\tif refCategory.HostIPC \u003e= containerSCC.HostIPC {", + "\t\tlog.Debug(\"HostIPC = %s - OK\", containerSCC.HostIPC)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostIPC = %s but expected \u003c= %s - NOK\", containerSCC.HostIPC, refCategory.HostIPC)", + "\t}", + "\t// HostNetwork is true if the HostNetwork field is set to true, false otherwise.", + "\tif refCategory.HostNetwork \u003e= containerSCC.HostNetwork {", + "\t\tlog.Debug(\"HostNetwork = %s - OK\", containerSCC.HostNetwork)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostNetwork = %s but expected \u003c= %s - NOK\", containerSCC.HostNetwork, refCategory.HostNetwork)", + "\t}", + "\t// HostPID is true if the HostPID field is set to true, false otherwise.", + "\tif refCategory.HostPID \u003e= containerSCC.HostPID {", + "\t\tlog.Debug(\"HostPID = %s - OK\", containerSCC.HostPID)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostPID = %s but expected \u003c= %s - NOK\", containerSCC.HostPID, refCategory.HostPID)", + "\t}", + "\t// HostPorts is true if the HostPorts field is set to true, false otherwise.", + "\tif refCategory.HostPorts \u003e= containerSCC.HostPorts {", + "\t\tlog.Debug(\"HostPorts = %s - OK\", containerSCC.HostPorts)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostPorts = %s but expected \u003c= %s - NOK\", containerSCC.HostPorts, refCategory.HostPorts)", + "\t}", + "\t// PrivilegeEscalation is true if the PrivilegeEscalation field is set to true, false otherwise.", + "\tif refCategory.PrivilegeEscalation \u003e= containerSCC.PrivilegeEscalation {", + "\t\tlog.Debug(\"HostNetwork = %s - OK\", containerSCC.HostNetwork)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"PrivilegeEscalation = %s but expected \u003c= %s - NOK\", containerSCC.PrivilegeEscalation, refCategory.PrivilegeEscalation)", + "\t}", + "\t// PrivilegedContainer is true if the PrivilegedContainer field is set to true, false otherwise.", + "\tif refCategory.PrivilegedContainer \u003e= containerSCC.PrivilegedContainer {", + "\t\tlog.Debug(\"PrivilegedContainer = %s - OK\", containerSCC.PrivilegedContainer)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"PrivilegedContainer = %s but expected \u003c= %s - NOK\", containerSCC.PrivilegedContainer, refCategory.PrivilegedContainer)", + "\t}", + "", + "\t// From the SecurityContextConstraint CRD spec:", + "\t// description: ReadOnlyRootFilesystem when set to true will force containers", + "\t// to run with a read only root file system. If the container specifically", + "\t// requests to run with a non-read only root file system the SCC should", + "\t// deny the pod. If set to false the container may run with a read only", + "\t// root file system if it wishes but it will not be forced to.", + "\t// type: boolean", + "\tif refCategory.ReadOnlyRootFilesystem == NOK {", + "\t\tlog.Debug(\"ReadOnlyRootFilesystem = %s - OK (not enforced by SCC)\", containerSCC.ReadOnlyRootFilesystem)", + "\t} else if containerSCC.ReadOnlyRootFilesystem != OK {", + "\t\tresult = false", + "\t\tlog.Debug(\"ReadOnlyRootFilesystem = %s but expected \u003c= %s - NOK\", containerSCC.ReadOnlyRootFilesystem, refCategory.ReadOnlyRootFilesystem)", + "\t}", + "\t// SeLinuxContextPresent is true if the SeLinuxContext field is present and set to a value (e.g. not nil), false otherwise.", + "\t// An SELinuxContext strategy of MustRunAs with no level set. Admission looks for the openshift.io/sa.scc.mcs annotation to populate the level.", + "\tif refCategory.SeLinuxContextPresent == containerSCC.SeLinuxContextPresent {", + "\t\tlog.Debug(\"SeLinuxContextPresent is not nil - OK\")", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"SeLinuxContextPresent = %s but expected %s expected to be non nil - NOK\", containerSCC.SeLinuxContextPresent, refCategory.SeLinuxContextPresent)", + "\t}", + "\t// CapabilitiesCategory indicates the lowest SCC category to which the list of capabilities.add in the container can be mapped to.", + "\tif refCategory.CapabilitiesCategory != containerSCC.CapabilitiesCategory {", + "\t\tresult = false", + "\t\tlog.Debug(\"CapabilitiesCategory = %s but expected %s - NOK\", containerSCC.CapabilitiesCategory, refCategory.CapabilitiesCategory)", + "\t} else {", + "\t\tlog.Debug(\"CapabilitiesCategory list is as expected %s - OK\", containerSCC.CapabilitiesCategory)", + "\t}", + "\treturn result", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer", + "name": "CheckPod", + "kind": "function", + "source": [ + "func CheckPod(pod *provider.Pod) []PodListCategory {", + "\tvar containerSCC ContainerSCC", + "\tcontainerSCC.HostIPC = NOK", + "\tif pod.Spec.HostIPC {", + "\t\tcontainerSCC.HostIPC = OK", + "\t}", + "\tcontainerSCC.HostNetwork = NOK", + "\tif pod.Spec.HostNetwork {", + "\t\tcontainerSCC.HostNetwork = OK", + "\t}", + "\tcontainerSCC.HostPID = NOK", + "\tif pod.Spec.HostPID {", + "\t\tcontainerSCC.HostPID = OK", + "\t}", + "\tcontainerSCC.SeLinuxContextPresent = NOK", + "\tif pod.Spec.SecurityContext != nil \u0026\u0026 pod.Spec.SecurityContext.SELinuxOptions != nil {", + "\t\tcontainerSCC.SeLinuxContextPresent = OK", + "\t}", + "\tcontainerSCC.AllVolumeAllowed, containerSCC.HostDirVolumePluginPresent = AllVolumeAllowed(pod.Spec.Volumes)", + "\tif pod.Spec.SecurityContext != nil \u0026\u0026 pod.Spec.SecurityContext.RunAsUser != nil {", + "\t\tcontainerSCC.RunAsUserPresent = OK", + "\t} else {", + "\t\tcontainerSCC.RunAsUserPresent = NOK", + "\t}", + "\tif pod.Spec.SecurityContext != nil \u0026\u0026 pod.Spec.SecurityContext.FSGroup != nil {", + "\t\tcontainerSCC.FsGroupPresent = OK", + "\t} else {", + "\t\tcontainerSCC.FsGroupPresent = NOK", + "\t}", + "\treturn checkContainerCategory(pod.Spec.Containers, containerSCC, pod.Name, pod.Namespace)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func checkContainerCategory(containers []corev1.Container, containerSCC ContainerSCC, podName, nameSpace string) []PodListCategory {", + "\tvar ContainerList []PodListCategory", + "\tvar categoryinfo PodListCategory", + "\tfor j := 0; j \u003c len(containers); j++ {", + "\t\tcut := \u0026provider.Container{Podname: podName, Namespace: nameSpace, Container: \u0026containers[j]}", + "\t\tpercontainerSCC := GetContainerSCC(cut, containerSCC)", + "\t\t// after building the containerSCC need to check to which category it is", + "\t\tcategoryinfo = PodListCategory{", + "\t\t\tContainername: cut.Name,", + "\t\t\tPodname: podName,", + "\t\t\tNameSpace: nameSpace,", + "\t\t}", + "\t\tif compareCategory(\u0026Category1, \u0026percontainerSCC, CategoryID1) {", + "\t\t\tcategoryinfo.Category = CategoryID1", + "\t\t} else if compareCategory(\u0026Category1NoUID0, \u0026percontainerSCC, CategoryID1NoUID0) {", + "\t\t\tcategoryinfo.Category = CategoryID1NoUID0", + "\t\t} else if compareCategory(\u0026Category2, \u0026percontainerSCC, CategoryID2) {", + "\t\t\tcategoryinfo.Category = CategoryID2", + "\t\t} else if compareCategory(\u0026Category3, \u0026percontainerSCC, CategoryID3) {", + "\t\t\tcategoryinfo.Category = CategoryID3", + "\t\t} else {", + "\t\t\tcategoryinfo.Category = CategoryID4", + "\t\t}", + "\t\t// after building the containerSCC need to check to which category it is", + "\t\tContainerList = append(ContainerList, categoryinfo)", + "\t}", + "\treturn ContainerList", + "}" + ] + }, + { + "name": "compareCategory", + "qualifiedName": "compareCategory", + "exported": false, + "signature": "func(*ContainerSCC, *ContainerSCC, CategoryID)(bool)", + "doc": "compareCategory determines if a container matches a reference security context category\n\nThe function compares the security context properties of two containers,\nchecking fields such as volume allowance, user settings, privilege flags, and\ncapability lists against a predefined category definition. It logs each\ncomparison step for debugging purposes and aggregates any mismatches into a\nboolean result. The returned value indicates whether the container conforms\nto all constraints specified by the reference category.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:443", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer", + "name": "checkContainerCategory", + "kind": "function", + "source": [ + "func checkContainerCategory(containers []corev1.Container, containerSCC ContainerSCC, podName, nameSpace string) []PodListCategory {", + "\tvar ContainerList []PodListCategory", + "\tvar categoryinfo PodListCategory", + "\tfor j := 0; j \u003c len(containers); j++ {", + "\t\tcut := \u0026provider.Container{Podname: podName, Namespace: nameSpace, Container: \u0026containers[j]}", + "\t\tpercontainerSCC := GetContainerSCC(cut, containerSCC)", + "\t\t// after building the containerSCC need to check to which category it is", + "\t\tcategoryinfo = PodListCategory{", + "\t\t\tContainername: cut.Name,", + "\t\t\tPodname: podName,", + "\t\t\tNameSpace: nameSpace,", + "\t\t}", + "\t\tif compareCategory(\u0026Category1, \u0026percontainerSCC, CategoryID1) {", + "\t\t\tcategoryinfo.Category = CategoryID1", + "\t\t} else if compareCategory(\u0026Category1NoUID0, \u0026percontainerSCC, CategoryID1NoUID0) {", + "\t\t\tcategoryinfo.Category = CategoryID1NoUID0", + "\t\t} else if compareCategory(\u0026Category2, \u0026percontainerSCC, CategoryID2) {", + "\t\t\tcategoryinfo.Category = CategoryID2", + "\t\t} else if compareCategory(\u0026Category3, \u0026percontainerSCC, CategoryID3) {", + "\t\t\tcategoryinfo.Category = CategoryID3", + "\t\t} else {", + "\t\t\tcategoryinfo.Category = CategoryID4", + "\t\t}", + "\t\t// after building the containerSCC need to check to which category it is", + "\t\tContainerList = append(ContainerList, categoryinfo)", + "\t}", + "\treturn ContainerList", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func compareCategory(refCategory, containerSCC *ContainerSCC, id CategoryID) bool {", + "\tresult := true", + "\tlog.Debug(\"Testing if pod belongs to category %s\", \u0026id)", + "\t// AllVolumeAllowed reports whether the volumes in the container are compliant to the SCC (same volume list for all SCCs).", + "\t// True means that all volumes declared in the pod are allowed in the SCC.", + "\t// False means that at least one volume is disallowed", + "\tif refCategory.AllVolumeAllowed == containerSCC.AllVolumeAllowed {", + "\t\tlog.Debug(\"AllVolumeAllowed = %s - OK\", containerSCC.AllVolumeAllowed)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"AllVolumeAllowed = %s but expected \u003e=\u003c=%s - NOK\", containerSCC.AllVolumeAllowed, refCategory.AllVolumeAllowed)", + "\t}", + "\t// RunAsUserPresent reports whether the RunAsUser Field is set to something other than nil as requested by All SCC categories.", + "\t// True means that the RunAsUser Field is set.", + "\t// False means that it is not set (nil)", + "\t// The runAsUser range can be specified in the SCC itself. If not, it is specified in the namespace, see", + "\t// https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth", + "\t// runAsUser:", + "\t// type: MustRunAsRange", + "\t// uidRangeMin: 1000", + "\t// uidRangeMax: 2000", + "\tif refCategory.RunAsUserPresent == containerSCC.RunAsUserPresent {", + "\t\tlog.Debug(\"RunAsUserPresent = %s - OK\", containerSCC.RunAsUserPresent)", + "\t} else {", + "\t\tlog.Debug(\"RunAsUserPresent = %s but expected %s - NOK\", containerSCC.RunAsUserPresent, refCategory.RunAsUserPresent)", + "\t\tresult = false", + "\t}", + "\t// RunAsNonRoot is true if the RunAsNonRoot field is set to true, false otherwise.", + "\t// if setting a range including the roor UID 0 ( for instance 0-2000), then this option can disallow it.", + "\tif refCategory.RunAsNonRoot \u003e= containerSCC.RunAsNonRoot {", + "\t\tlog.Debug(\"RunAsNonRoot = %s - OK\", containerSCC.RunAsNonRoot)", + "\t} else {", + "\t\tlog.Debug(\"RunAsNonRoot = %s but expected %s - NOK\", containerSCC.RunAsNonRoot, refCategory.RunAsNonRoot)", + "\t\tresult = false", + "\t}", + "\t// FsGroupPresent reports whether the FsGroup Field is set to something other than nil as requested by All SCC categories.", + "\t// True means that the FsGroup Field is set.", + "\t// False means that it is not set (nil)", + "\t// The FSGroup range can be specified in the SCC itself. If not, it is specified in the namespace, see", + "\t// https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html#security-context-constraints-pre-allocated-values_configuring-internal-oauth", + "\t// fsGroup:", + "\t// type: MustRunAs", + "\t// ranges:", + "\t// - min: 1000900000", + "\t// max: 1000900010", + "\tif refCategory.FsGroupPresent == containerSCC.FsGroupPresent {", + "\t\tlog.Debug(\"FsGroupPresent = %s - OK\", containerSCC.FsGroupPresent)", + "\t} else {", + "\t\tlog.Debug(\"FsGroupPresent = %s but expected %s - NOK\", containerSCC.FsGroupPresent, refCategory.FsGroupPresent)", + "\t\tresult = false", + "\t}", + "\t// RequiredDropCapabilitiesPresent is true if the drop DropCapabilities field has at least the set of required drop capabilities ( same required set for all categories ).", + "\t// False means that some required DropCapabilities are missing.", + "\tif refCategory.RequiredDropCapabilitiesPresent == containerSCC.RequiredDropCapabilitiesPresent {", + "\t\tlog.Debug(\"DropCapabilities list - OK\")", + "\t} else {", + "\t\tlog.Debug(\"RequiredDropCapabilitiesPresent = %s but expected %s - NOK\", containerSCC.RequiredDropCapabilitiesPresent, refCategory.RequiredDropCapabilitiesPresent)", + "\t\tlog.Debug(\"its didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \")", + "\t\tresult = false", + "\t}", + "\t// HostDirVolumePluginPresent is true if a hostpath volume is configured, false otherwise.", + "\t// It is a deprecated field and is derived from the volume list currently configured in the container.", + "\t// see https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html#use-the-hostpath-volume-plugin", + "\tif refCategory.HostDirVolumePluginPresent == containerSCC.HostDirVolumePluginPresent {", + "\t\tlog.Debug(\"HostDirVolumePluginPresent = %s - OK\", containerSCC.HostDirVolumePluginPresent)", + "\t} else {", + "\t\tlog.Debug(\"HostDirVolumePluginPresent = %s but expected %s - NOK\", containerSCC.HostDirVolumePluginPresent, refCategory.HostDirVolumePluginPresent)", + "\t\tresult = false", + "\t}", + "\t// HostIPC is true if the HostIPC field is set to true, false otherwise.", + "\tif refCategory.HostIPC \u003e= containerSCC.HostIPC {", + "\t\tlog.Debug(\"HostIPC = %s - OK\", containerSCC.HostIPC)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostIPC = %s but expected \u003c= %s - NOK\", containerSCC.HostIPC, refCategory.HostIPC)", + "\t}", + "\t// HostNetwork is true if the HostNetwork field is set to true, false otherwise.", + "\tif refCategory.HostNetwork \u003e= containerSCC.HostNetwork {", + "\t\tlog.Debug(\"HostNetwork = %s - OK\", containerSCC.HostNetwork)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostNetwork = %s but expected \u003c= %s - NOK\", containerSCC.HostNetwork, refCategory.HostNetwork)", + "\t}", + "\t// HostPID is true if the HostPID field is set to true, false otherwise.", + "\tif refCategory.HostPID \u003e= containerSCC.HostPID {", + "\t\tlog.Debug(\"HostPID = %s - OK\", containerSCC.HostPID)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostPID = %s but expected \u003c= %s - NOK\", containerSCC.HostPID, refCategory.HostPID)", + "\t}", + "\t// HostPorts is true if the HostPorts field is set to true, false otherwise.", + "\tif refCategory.HostPorts \u003e= containerSCC.HostPorts {", + "\t\tlog.Debug(\"HostPorts = %s - OK\", containerSCC.HostPorts)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"HostPorts = %s but expected \u003c= %s - NOK\", containerSCC.HostPorts, refCategory.HostPorts)", + "\t}", + "\t// PrivilegeEscalation is true if the PrivilegeEscalation field is set to true, false otherwise.", + "\tif refCategory.PrivilegeEscalation \u003e= containerSCC.PrivilegeEscalation {", + "\t\tlog.Debug(\"HostNetwork = %s - OK\", containerSCC.HostNetwork)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"PrivilegeEscalation = %s but expected \u003c= %s - NOK\", containerSCC.PrivilegeEscalation, refCategory.PrivilegeEscalation)", + "\t}", + "\t// PrivilegedContainer is true if the PrivilegedContainer field is set to true, false otherwise.", + "\tif refCategory.PrivilegedContainer \u003e= containerSCC.PrivilegedContainer {", + "\t\tlog.Debug(\"PrivilegedContainer = %s - OK\", containerSCC.PrivilegedContainer)", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"PrivilegedContainer = %s but expected \u003c= %s - NOK\", containerSCC.PrivilegedContainer, refCategory.PrivilegedContainer)", + "\t}", + "", + "\t// From the SecurityContextConstraint CRD spec:", + "\t// description: ReadOnlyRootFilesystem when set to true will force containers", + "\t// to run with a read only root file system. If the container specifically", + "\t// requests to run with a non-read only root file system the SCC should", + "\t// deny the pod. If set to false the container may run with a read only", + "\t// root file system if it wishes but it will not be forced to.", + "\t// type: boolean", + "\tif refCategory.ReadOnlyRootFilesystem == NOK {", + "\t\tlog.Debug(\"ReadOnlyRootFilesystem = %s - OK (not enforced by SCC)\", containerSCC.ReadOnlyRootFilesystem)", + "\t} else if containerSCC.ReadOnlyRootFilesystem != OK {", + "\t\tresult = false", + "\t\tlog.Debug(\"ReadOnlyRootFilesystem = %s but expected \u003c= %s - NOK\", containerSCC.ReadOnlyRootFilesystem, refCategory.ReadOnlyRootFilesystem)", + "\t}", + "\t// SeLinuxContextPresent is true if the SeLinuxContext field is present and set to a value (e.g. not nil), false otherwise.", + "\t// An SELinuxContext strategy of MustRunAs with no level set. Admission looks for the openshift.io/sa.scc.mcs annotation to populate the level.", + "\tif refCategory.SeLinuxContextPresent == containerSCC.SeLinuxContextPresent {", + "\t\tlog.Debug(\"SeLinuxContextPresent is not nil - OK\")", + "\t} else {", + "\t\tresult = false", + "\t\tlog.Debug(\"SeLinuxContextPresent = %s but expected %s expected to be non nil - NOK\", containerSCC.SeLinuxContextPresent, refCategory.SeLinuxContextPresent)", + "\t}", + "\t// CapabilitiesCategory indicates the lowest SCC category to which the list of capabilities.add in the container can be mapped to.", + "\tif refCategory.CapabilitiesCategory != containerSCC.CapabilitiesCategory {", + "\t\tresult = false", + "\t\tlog.Debug(\"CapabilitiesCategory = %s but expected %s - NOK\", containerSCC.CapabilitiesCategory, refCategory.CapabilitiesCategory)", + "\t} else {", + "\t\tlog.Debug(\"CapabilitiesCategory list is as expected %s - OK\", containerSCC.CapabilitiesCategory)", + "\t}", + "\treturn result", + "}" + ] + }, + { + "name": "updateCapabilitiesFromContainer", + "qualifiedName": "updateCapabilitiesFromContainer", + "exported": false, + "signature": "func(*provider.Container, *ContainerSCC)()", + "doc": "updateCapabilitiesFromContainer updates container capability settings based on its security context\n\nThis routine examines a container’s SecurityContext for defined\ncapabilities, adjusting the SCC record to reflect required drop capabilities\nand categorizing the added capabilities into predefined groups. It checks if\nall required drops are present or if an empty add list implies Category 1,\notherwise it matches the added capabilities against three category sets. The\nfunction marks the appropriate flags in the ContainerSCC structure to\nindicate compliance status.", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:267", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Strings", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Strings", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "SubSlice", + "kind": "function", + "source": [ + "func SubSlice(s, sub []string) bool {", + "\tfor _, v := range sub {", + "\t\tif !StringInSlice(s, v, false) {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "slices", + "name": "Equal", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "checkContainCategory", + "kind": "function", + "source": [ + "func checkContainCategory(addCapability []corev1.Capability, referenceCategoryAddCapabilities []string) bool {", + "\tfor _, ncc := range addCapability {", + "\t\tif !stringhelper.StringInSlice(referenceCategoryAddCapabilities, string(ncc), true) {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "checkContainCategory", + "kind": "function", + "source": [ + "func checkContainCategory(addCapability []corev1.Capability, referenceCategoryAddCapabilities []string) bool {", + "\tfor _, ncc := range addCapability {", + "\t\tif !stringhelper.StringInSlice(referenceCategoryAddCapabilities, string(ncc), true) {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer", + "name": "GetContainerSCC", + "kind": "function", + "source": [ + "func GetContainerSCC(cut *provider.Container, containerSCC ContainerSCC) ContainerSCC {", + "\tcontainerSCC.HostPorts = NOK", + "\tfor _, aPort := range cut.Ports {", + "\t\tif aPort.HostPort != 0 {", + "\t\t\tcontainerSCC.HostPorts = OK", + "\t\t\tbreak", + "\t\t}", + "\t}", + "\tupdateCapabilitiesFromContainer(cut, \u0026containerSCC)", + "\tcontainerSCC.PrivilegeEscalation = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.AllowPrivilegeEscalation != nil {", + "\t\tcontainerSCC.PrivilegeEscalation = OK", + "\t}", + "\tcontainerSCC.PrivilegedContainer = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.Privileged != nil \u0026\u0026 *(cut.SecurityContext.Privileged) {", + "\t\tcontainerSCC.PrivilegedContainer = OK", + "\t}", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.RunAsUser != nil {", + "\t\tcontainerSCC.RunAsUserPresent = OK", + "\t}", + "\tcontainerSCC.ReadOnlyRootFilesystem = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.ReadOnlyRootFilesystem != nil \u0026\u0026 *cut.SecurityContext.ReadOnlyRootFilesystem {", + "\t\tcontainerSCC.ReadOnlyRootFilesystem = OK", + "\t}", + "\tcontainerSCC.RunAsNonRoot = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.RunAsNonRoot != nil \u0026\u0026 *cut.SecurityContext.RunAsNonRoot {", + "\t\tcontainerSCC.RunAsNonRoot = OK", + "\t}", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.SELinuxOptions != nil {", + "\t\tcontainerSCC.SeLinuxContextPresent = OK", + "\t}", + "\treturn containerSCC", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func updateCapabilitiesFromContainer(cut *provider.Container, containerSCC *ContainerSCC) {", + "\tcontainerSCC.RequiredDropCapabilitiesPresent = NOK", + "\tif cut.SecurityContext != nil \u0026\u0026 cut.SecurityContext.Capabilities != nil {", + "\t\tvar sliceDropCapabilities []string", + "\t\tfor _, ncc := range cut.SecurityContext.Capabilities.Drop {", + "\t\t\tsliceDropCapabilities = append(sliceDropCapabilities, string(ncc))", + "\t\t}", + "", + "\t\t// Sort the slices", + "\t\tsort.Strings(sliceDropCapabilities)", + "\t\tsort.Strings(requiredDropCapabilities)", + "", + "\t\tif stringhelper.SubSlice(sliceDropCapabilities, requiredDropCapabilities) || slices.Equal(sliceDropCapabilities, dropAll) {", + "\t\t\tcontainerSCC.RequiredDropCapabilitiesPresent = OK", + "\t\t}", + "\t\t//nolint:gocritic", + "\t\tif len(cut.SecurityContext.Capabilities.Add) == 0 { // check if the len=0 this mean that is cat1", + "\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID1", + "\t\t} else if checkContainCategory(cut.SecurityContext.Capabilities.Add, category2AddCapabilities) {", + "\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID2", + "\t\t} else {", + "\t\t\tif checkContainCategory(cut.SecurityContext.Capabilities.Add, category3AddCapabilities) {", + "\t\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID3", + "\t\t\t} else {", + "\t\t\t\tcontainerSCC.CapabilitiesCategory = CategoryID4", + "\t\t\t}", + "\t\t}", + "\t} else {", + "\t\tcontainerSCC.CapabilitiesCategory = CategoryID1", + "\t}", + "}" + ] + } + ], + "globals": [ + { + "name": "Category1", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:103" + }, + { + "name": "Category1NoUID0", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:120" + }, + { + "name": "Category2", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:137" + }, + { + "name": "Category3", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:154" + }, + { + "name": "category2AddCapabilities", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:101" + }, + { + "name": "category3AddCapabilities", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:102" + }, + { + "name": "dropAll", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:100" + }, + { + "name": "requiredDropCapabilities", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:99" + } + ], + "consts": [ + { + "name": "CategoryID1", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:77" + }, + { + "name": "CategoryID1NoUID0", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:78" + }, + { + "name": "CategoryID1NoUID0String", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:185" + }, + { + "name": "CategoryID1String", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:184" + }, + { + "name": "CategoryID2", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:79" + }, + { + "name": "CategoryID2String", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:186" + }, + { + "name": "CategoryID3", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:80" + }, + { + "name": "CategoryID3String", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:187" + }, + { + "name": "CategoryID4", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:81" + }, + { + "name": "CategoryID4String", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:188" + }, + { + "name": "NOK", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:21" + }, + { + "name": "NOKString", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:27" + }, + { + "name": "OK", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:22" + }, + { + "name": "OKNOK", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:20" + }, + { + "name": "OKString", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:26" + }, + { + "name": "Undefined", + "exported": true, + "type": "CategoryID", + "position": "/Users/deliedit/dev/certsuite/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go:76" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "certification", + "files": 2, + "imports": [ + "context", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "github.com/redhat-best-practices-for-k8s/oct/pkg/certdb", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "LoadChecks", + "qualifiedName": "LoadChecks", + "exported": true, + "signature": "func()()", + "doc": "LoadChecks Loads the suite of certification checks\n\nThis function registers several checks for Helm, operators, Helm charts, and\ncontainer certifications by creating a group in the checks database. It\nattaches skip functions that prevent tests from running when necessary data\nis missing and assigns each check a callback to perform its validation logic.\nThe function logs its activity at debug level and relies on global\nenvironment and validator objects for execution.", + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:84", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "WithBeforeEachFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewChecksGroup", + "kind": "function", + "source": [ + "func NewChecksGroup(groupName string) *ChecksGroup {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\tif dbByGroup == nil {", + "\t\tdbByGroup = map[string]*ChecksGroup{}", + "\t}", + "", + "\tgroup, exists := dbByGroup[groupName]", + "\tif exists {", + "\t\treturn group", + "\t}", + "", + "\tgroup = \u0026ChecksGroup{", + "\t\tname: groupName,", + "\t\tchecks: []*Check{},", + "\t\tcurrentRunningCheckIdx: checkIdxNone,", + "\t}", + "\tdbByGroup[groupName] = group", + "", + "\treturn group", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "name": "testHelmVersion", + "kind": "function", + "source": [ + "func testHelmVersion(check *checksdb.Check) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tclients := clientsholder.GetClientsHolder()", + "\t// Get the Tiller pod in the specified namespace", + "\tpodList, err := clients.K8sClient.CoreV1().Pods(\"\").List(context.TODO(), metav1.ListOptions{", + "\t\tLabelSelector: \"app=helm,name=tiller\",", + "\t})", + "\tif err != nil {", + "\t\tcheck.LogError(\"Could not get Tiller pod, err=%v\", err)", + "\t}", + "", + "\tif len(podList.Items) == 0 {", + "\t\tcheck.LogInfo(\"Tiller pod not found in any namespaces. Helm version is v3.\")", + "\t\tfor _, helm := range env.HelmChartReleases {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, \"helm chart was installed with helm v3\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.LogError(\"Tiller pod found, Helm version is v2 but v3 required\")", + "\tfor i := range podList.Items {", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(podList.Items[i].Namespace, podList.Items[i].Name,", + "\t\t\t\"This pod is a Tiller pod. Helm Chart version is v2 but needs to be v3 due to the security risks associated with Tiller\", false))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "name": "testAllOperatorCertified", + "kind": "function", + "source": [ + "func testAllOperatorCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) {", + "\toperatorsUnderTest := env.Operators", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tocpMinorVersion := \"\"", + "\tif provider.IsOCPCluster() {", + "\t\t// Converts\tmajor.minor.patch version format to major.minor", + "\t\tconst majorMinorPatchCount = 3", + "\t\tsplitVersion := strings.SplitN(env.OpenshiftVersion, \".\", majorMinorPatchCount)", + "\t\tocpMinorVersion = splitVersion[0] + \".\" + splitVersion[1]", + "\t}", + "\tfor _, operator := range operatorsUnderTest {", + "\t\tcheck.LogInfo(\"Testing Operator %q\", operator)", + "\t\tisCertified := validator.IsOperatorCertified(operator.Name, ocpMinorVersion)", + "\t\tif !isCertified {", + "\t\t\tcheck.LogError(\"Operator %q (channel %q) failed to be certified for OpenShift %s\", operator.Name, operator.Channel, ocpMinorVersion)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"Operator failed to be certified for OpenShift\", false).", + "\t\t\t\tAddField(testhelper.OCPVersion, ocpMinorVersion).", + "\t\t\t\tAddField(testhelper.OCPChannel, operator.Channel))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Operator %q (channel %q) is certified for OpenShift %s\", operator.Name, operator.Channel, ocpMinorVersion)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"Operator certified OK\", true).", + "\t\t\t\tAddField(testhelper.OCPVersion, ocpMinorVersion).", + "\t\t\t\tAddField(testhelper.OCPChannel, operator.Channel))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "name": "testHelmCertified", + "kind": "function", + "source": [ + "func testHelmCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) {", + "\thelmchartsReleases := env.HelmChartReleases", + "", + "\t// Collect all of the failed helm charts", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, helm := range helmchartsReleases {", + "\t\tcheck.LogInfo(\"Testing Helm Chart Release %q\", helm.Name)", + "\t\tif !validator.IsHelmChartCertified(helm, env.K8sVersion) {", + "\t\t\tcheck.LogError(\"Helm Chart %q version %q is not certified.\", helm.Name, helm.Chart.Metadata.Version)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, \"helm chart is not certified\", false).", + "\t\t\t\tSetType(testhelper.HelmVersionType).", + "\t\t\t\tAddField(testhelper.Version, helm.Chart.Metadata.Version))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Helm Chart %q version %q is certified.\", helm.Name, helm.Chart.Metadata.Version)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, \"helm chart is certified\", true).", + "\t\t\t\tSetType(testhelper.HelmVersionType).", + "\t\t\t\tAddField(testhelper.Version, helm.Chart.Metadata.Version))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testContainerCertificationStatusByDigest", + "kind": "function", + "source": [ + "func testContainerCertificationStatusByDigest(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, c := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", c)", + "\t\tswitch {", + "\t\tcase c.ContainerImageIdentifier.Digest == \"\":", + "\t\t\tcheck.LogError(\"Container %q is missing digest field, failing validation (repo=%q image=%q)\", c, c.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, \"Missing digest field\", false).", + "\t\t\t\tAddField(testhelper.Repository, c.ContainerImageIdentifier.Registry).", + "\t\t\t\tAddField(testhelper.ImageName, c.ContainerImageIdentifier.Repository).", + "\t\t\t\tAddField(testhelper.ImageDigest, c.ContainerImageIdentifier.Digest))", + "\t\tcase !testContainerCertification(c.ContainerImageIdentifier, validator):", + "\t\t\tcheck.LogError(\"Container %q digest not found in database, failing validation (repo=%q image=%q tag=%q digest=%q)\", c,", + "\t\t\t\tc.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository,", + "\t\t\t\tc.ContainerImageIdentifier.Tag, c.ContainerImageIdentifier.Digest)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, \"Digest not found in database\", false).", + "\t\t\t\tAddField(testhelper.Repository, c.ContainerImageIdentifier.Registry).", + "\t\t\t\tAddField(testhelper.ImageName, c.ContainerImageIdentifier.Repository).", + "\t\t\t\tAddField(testhelper.ImageDigest, c.ContainerImageIdentifier.Digest))", + "\t\tdefault:", + "\t\t\tcheck.LogInfo(\"Container %q digest found in database, image certified (repo=%q image=%q tag=%q digest=%q)\", c,", + "\t\t\t\tc.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository,", + "\t\t\t\tc.ContainerImageIdentifier.Tag, c.ContainerImageIdentifier.Digest)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, \"Container is certified\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadInternalChecksDB", + "kind": "function", + "source": [ + "func LoadInternalChecksDB() {", + "\taccesscontrol.LoadChecks()", + "\tcertification.LoadChecks()", + "\tlifecycle.LoadChecks()", + "\tmanageability.LoadChecks()", + "\tnetworking.LoadChecks()", + "\tobservability.LoadChecks()", + "\tperformance.LoadChecks()", + "\tplatform.LoadChecks()", + "\toperator.LoadChecks()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AffiliatedCertTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmVersionIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\ttestHelmVersion(check)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoOperatorsFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAllOperatorCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHelmCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerIsCertifiedDigestIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerCertificationStatusByDigest(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "name": "getContainersToQuery", + "qualifiedName": "getContainersToQuery", + "exported": false, + "signature": "func(*provider.TestEnvironment)(map[provider.ContainerImageIdentifier]bool)", + "doc": "getContainersToQuery Creates a set of container image identifiers for querying\n\nThe function iterates over the containers defined in the test environment,\nadding each container's image identifier to a map with a true value. This map\nrepresents the collection of images that should be queried during testing. It\nreturns the constructed map.", + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:125", + "calls": [ + { + "name": "make", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getContainersToQuery(env *provider.TestEnvironment) map[provider.ContainerImageIdentifier]bool {", + "\tcontainersToQuery := make(map[provider.ContainerImageIdentifier]bool)", + "\tfor _, cut := range env.Containers {", + "\t\tcontainersToQuery[cut.ContainerImageIdentifier] = true", + "\t}", + "\treturn containersToQuery", + "}" + ] + }, + { + "name": "testAllOperatorCertified", + "qualifiedName": "testAllOperatorCertified", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment, certdb.CertificationStatusValidator)()", + "doc": "testAllOperatorCertified Verifies that all operators are certified for the current OpenShift version\n\nThe function iterates over every operator listed in the test environment,\ndetermining whether each is certified for the detected OpenShift minor\nrelease. It logs successes or failures and builds separate lists of compliant\nand non‑compliant report objects. Finally, it records these results in the\ncheck’s outcome.", + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:151", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "IsOCPCluster", + "kind": "function", + "source": [ + "func IsOCPCluster() bool {", + "\treturn env.OpenshiftVersion != autodiscover.NonOpenshiftClusterVersion", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "SplitN", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsOperatorCertified", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AffiliatedCertTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmVersionIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\ttestHelmVersion(check)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoOperatorsFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAllOperatorCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHelmCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerIsCertifiedDigestIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerCertificationStatusByDigest(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testAllOperatorCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) {", + "\toperatorsUnderTest := env.Operators", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tocpMinorVersion := \"\"", + "\tif provider.IsOCPCluster() {", + "\t\t// Converts\tmajor.minor.patch version format to major.minor", + "\t\tconst majorMinorPatchCount = 3", + "\t\tsplitVersion := strings.SplitN(env.OpenshiftVersion, \".\", majorMinorPatchCount)", + "\t\tocpMinorVersion = splitVersion[0] + \".\" + splitVersion[1]", + "\t}", + "\tfor _, operator := range operatorsUnderTest {", + "\t\tcheck.LogInfo(\"Testing Operator %q\", operator)", + "\t\tisCertified := validator.IsOperatorCertified(operator.Name, ocpMinorVersion)", + "\t\tif !isCertified {", + "\t\t\tcheck.LogError(\"Operator %q (channel %q) failed to be certified for OpenShift %s\", operator.Name, operator.Channel, ocpMinorVersion)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"Operator failed to be certified for OpenShift\", false).", + "\t\t\t\tAddField(testhelper.OCPVersion, ocpMinorVersion).", + "\t\t\t\tAddField(testhelper.OCPChannel, operator.Channel))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Operator %q (channel %q) is certified for OpenShift %s\", operator.Name, operator.Channel, ocpMinorVersion)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"Operator certified OK\", true).", + "\t\t\t\tAddField(testhelper.OCPVersion, ocpMinorVersion).", + "\t\t\t\tAddField(testhelper.OCPChannel, operator.Channel))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testContainerCertification", + "qualifiedName": "testContainerCertification", + "exported": false, + "signature": "func(provider.ContainerImageIdentifier, certdb.CertificationStatusValidator)(bool)", + "doc": "testContainerCertification Checks if a container image is certified in the database\n\nThis function accepts an image identifier and a validator, delegating to the\nvalidator's method to determine certification status. It returns true when\nthe image's registry, repository, tag, and digest match a certified record,\notherwise false. The result informs test logic that verifies container\ncompliance.", + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:140", + "calls": [ + { + "name": "IsContainerCertified", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "testContainerCertificationStatusByDigest", + "kind": "function", + "source": [ + "func testContainerCertificationStatusByDigest(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, c := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", c)", + "\t\tswitch {", + "\t\tcase c.ContainerImageIdentifier.Digest == \"\":", + "\t\t\tcheck.LogError(\"Container %q is missing digest field, failing validation (repo=%q image=%q)\", c, c.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, \"Missing digest field\", false).", + "\t\t\t\tAddField(testhelper.Repository, c.ContainerImageIdentifier.Registry).", + "\t\t\t\tAddField(testhelper.ImageName, c.ContainerImageIdentifier.Repository).", + "\t\t\t\tAddField(testhelper.ImageDigest, c.ContainerImageIdentifier.Digest))", + "\t\tcase !testContainerCertification(c.ContainerImageIdentifier, validator):", + "\t\t\tcheck.LogError(\"Container %q digest not found in database, failing validation (repo=%q image=%q tag=%q digest=%q)\", c,", + "\t\t\t\tc.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository,", + "\t\t\t\tc.ContainerImageIdentifier.Tag, c.ContainerImageIdentifier.Digest)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, \"Digest not found in database\", false).", + "\t\t\t\tAddField(testhelper.Repository, c.ContainerImageIdentifier.Registry).", + "\t\t\t\tAddField(testhelper.ImageName, c.ContainerImageIdentifier.Repository).", + "\t\t\t\tAddField(testhelper.ImageDigest, c.ContainerImageIdentifier.Digest))", + "\t\tdefault:", + "\t\t\tcheck.LogInfo(\"Container %q digest found in database, image certified (repo=%q image=%q tag=%q digest=%q)\", c,", + "\t\t\t\tc.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository,", + "\t\t\t\tc.ContainerImageIdentifier.Tag, c.ContainerImageIdentifier.Digest)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, \"Container is certified\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainerCertification(c provider.ContainerImageIdentifier, validator certdb.CertificationStatusValidator) bool {", + "\treturn validator.IsContainerCertified(c.Registry, c.Repository, c.Tag, c.Digest)", + "}" + ] + }, + { + "name": "testContainerCertificationStatusByDigest", + "qualifiedName": "testContainerCertificationStatusByDigest", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment, certdb.CertificationStatusValidator)()", + "doc": "testContainerCertificationStatusByDigest Validates container digests against certification database\n\nThe function iterates over containers in the test environment, checking that\neach has a digest and that the digest exists in the certification database.\nContainers missing a digest or with an unregistered digest are logged as\nerrors and added to non‑compliant results; compliant ones are recorded\naccordingly. Finally, it sets the check result with lists of compliant and\nnon‑compliant containers.", + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:221", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "testContainerCertification", + "kind": "function", + "source": [ + "func testContainerCertification(c provider.ContainerImageIdentifier, validator certdb.CertificationStatusValidator) bool {", + "\treturn validator.IsContainerCertified(c.Registry, c.Repository, c.Tag, c.Digest)", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AffiliatedCertTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmVersionIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\ttestHelmVersion(check)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoOperatorsFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAllOperatorCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHelmCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerIsCertifiedDigestIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerCertificationStatusByDigest(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainerCertificationStatusByDigest(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, c := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", c)", + "\t\tswitch {", + "\t\tcase c.ContainerImageIdentifier.Digest == \"\":", + "\t\t\tcheck.LogError(\"Container %q is missing digest field, failing validation (repo=%q image=%q)\", c, c.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, \"Missing digest field\", false).", + "\t\t\t\tAddField(testhelper.Repository, c.ContainerImageIdentifier.Registry).", + "\t\t\t\tAddField(testhelper.ImageName, c.ContainerImageIdentifier.Repository).", + "\t\t\t\tAddField(testhelper.ImageDigest, c.ContainerImageIdentifier.Digest))", + "\t\tcase !testContainerCertification(c.ContainerImageIdentifier, validator):", + "\t\t\tcheck.LogError(\"Container %q digest not found in database, failing validation (repo=%q image=%q tag=%q digest=%q)\", c,", + "\t\t\t\tc.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository,", + "\t\t\t\tc.ContainerImageIdentifier.Tag, c.ContainerImageIdentifier.Digest)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, \"Digest not found in database\", false).", + "\t\t\t\tAddField(testhelper.Repository, c.ContainerImageIdentifier.Registry).", + "\t\t\t\tAddField(testhelper.ImageName, c.ContainerImageIdentifier.Repository).", + "\t\t\t\tAddField(testhelper.ImageDigest, c.ContainerImageIdentifier.Digest))", + "\t\tdefault:", + "\t\t\tcheck.LogInfo(\"Container %q digest found in database, image certified (repo=%q image=%q tag=%q digest=%q)\", c,", + "\t\t\t\tc.ContainerImageIdentifier.Registry, c.ContainerImageIdentifier.Repository,", + "\t\t\t\tc.ContainerImageIdentifier.Tag, c.ContainerImageIdentifier.Digest)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(c.Namespace, c.Podname, c.Name, \"Container is certified\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testHelmCertified", + "qualifiedName": "testHelmCertified", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment, certdb.CertificationStatusValidator)()", + "doc": "testHelmCertified Verifies each Helm chart release against certification rules\n\nThe function iterates over all Helm chart releases in the test environment,\nlogging status for each. It uses a validator to determine if a chart is\ncertified for the current Kubernetes version and records compliant or\nnon‑compliant reports accordingly. Finally, it sets the overall test result\nwith lists of both compliant and non‑compliant objects.", + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:189", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsHelmChartCertified", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewHelmChartReportObject", + "kind": "function", + "source": [ + "func NewHelmChartReportObject(aNamespace, aHelmChartName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, HelmType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aHelmChartName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewHelmChartReportObject", + "kind": "function", + "source": [ + "func NewHelmChartReportObject(aNamespace, aHelmChartName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, HelmType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aHelmChartName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AffiliatedCertTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmVersionIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\ttestHelmVersion(check)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoOperatorsFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAllOperatorCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHelmCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerIsCertifiedDigestIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerCertificationStatusByDigest(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testHelmCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) {", + "\thelmchartsReleases := env.HelmChartReleases", + "", + "\t// Collect all of the failed helm charts", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, helm := range helmchartsReleases {", + "\t\tcheck.LogInfo(\"Testing Helm Chart Release %q\", helm.Name)", + "\t\tif !validator.IsHelmChartCertified(helm, env.K8sVersion) {", + "\t\t\tcheck.LogError(\"Helm Chart %q version %q is not certified.\", helm.Name, helm.Chart.Metadata.Version)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, \"helm chart is not certified\", false).", + "\t\t\t\tSetType(testhelper.HelmVersionType).", + "\t\t\t\tAddField(testhelper.Version, helm.Chart.Metadata.Version))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Helm Chart %q version %q is certified.\", helm.Name, helm.Chart.Metadata.Version)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, \"helm chart is certified\", true).", + "\t\t\t\tSetType(testhelper.HelmVersionType).", + "\t\t\t\tAddField(testhelper.Version, helm.Chart.Metadata.Version))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testHelmVersion", + "qualifiedName": "testHelmVersion", + "exported": false, + "signature": "func(*checksdb.Check)()", + "doc": "testHelmVersion Checks Helm release version compatibility\n\nThis routine inspects the cluster for a Tiller pod to determine whether Helm\nv2 or v3 is in use. If no Tiller pod exists, it records all installed charts\nas compliant with Helm v3 and logs that v3 is being used. When a Tiller pod\nis found, it flags each such pod as non‑compliant because the required\nversion is v3, then sets the test result accordingly.", + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:259", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "Pods", + "kind": "function" + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewHelmChartReportObject", + "kind": "function", + "source": [ + "func NewHelmChartReportObject(aNamespace, aHelmChartName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, HelmType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aHelmChartName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AffiliatedCertTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmVersionIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\ttestHelmVersion(check)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoOperatorsFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAllOperatorCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHelmCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerIsCertifiedDigestIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerCertificationStatusByDigest(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testHelmVersion(check *checksdb.Check) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tclients := clientsholder.GetClientsHolder()", + "\t// Get the Tiller pod in the specified namespace", + "\tpodList, err := clients.K8sClient.CoreV1().Pods(\"\").List(context.TODO(), metav1.ListOptions{", + "\t\tLabelSelector: \"app=helm,name=tiller\",", + "\t})", + "\tif err != nil {", + "\t\tcheck.LogError(\"Could not get Tiller pod, err=%v\", err)", + "\t}", + "", + "\tif len(podList.Items) == 0 {", + "\t\tcheck.LogInfo(\"Tiller pod not found in any namespaces. Helm version is v3.\")", + "\t\tfor _, helm := range env.HelmChartReleases {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewHelmChartReportObject(helm.Namespace, helm.Name, \"helm chart was installed with helm v3\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.LogError(\"Tiller pod found, Helm version is v2 but v3 required\")", + "\tfor i := range podList.Items {", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(podList.Items[i].Namespace, podList.Items[i].Name,", + "\t\t\t\"This pod is a Tiller pod. Helm Chart version is v2 but needs to be v3 due to the security risks associated with Tiller\", false))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "globals": [ + { + "name": "beforeEachFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:47" + }, + { + "name": "env", + "exported": false, + "type": "provider.TestEnvironment", + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:44" + }, + { + "name": "skipIfNoHelmChartReleasesFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:67" + }, + { + "name": "skipIfNoOperatorsFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:59" + }, + { + "name": "validator", + "exported": false, + "type": "certdb.CertificationStatusValidator", + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:45" + } + ], + "consts": [ + { + "name": "CertifiedOperator", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:39" + }, + { + "name": "Online", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/certification/suite.go:40" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "name": "common", + "files": 3, + "imports": [ + "path", + "time" + ], + "structs": null, + "interfaces": null, + "functions": null, + "globals": [ + { + "name": "DefaultTimeout", + "exported": true, + "type": "", + "doc": "DefaultTimeout for creating new interactive sessions (oc, ssh, tty)", + "position": "/Users/deliedit/dev/certsuite/tests/common/env.go:36" + }, + { + "name": "PathRelativeToRoot", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/common/env.go:26" + }, + { + "name": "RelativeSchemaPath", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/common/env.go:29" + }, + { + "name": "schemaPath", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/common/env.go:32" + } + ], + "consts": [ + { + "name": "AccessControlTestKey", + "exported": true, + "doc": "Constants shared by multiple test suite packages", + "position": "/Users/deliedit/dev/certsuite/tests/common/constant.go:22" + }, + { + "name": "AffiliatedCertTestKey", + "exported": true, + "doc": "Constants shared by multiple test suite packages", + "position": "/Users/deliedit/dev/certsuite/tests/common/constant.go:25" + }, + { + "name": "LifecycleTestKey", + "exported": true, + "doc": "Constants shared by multiple test suite packages", + "position": "/Users/deliedit/dev/certsuite/tests/common/constant.go:23" + }, + { + "name": "ManageabilityTestKey", + "exported": true, + "doc": "Constants shared by multiple test suite packages", + "position": "/Users/deliedit/dev/certsuite/tests/common/constant.go:24" + }, + { + "name": "NetworkingTestKey", + "exported": true, + "doc": "Constants shared by multiple test suite packages", + "position": "/Users/deliedit/dev/certsuite/tests/common/constant.go:26" + }, + { + "name": "ObservabilityTestKey", + "exported": true, + "doc": "Constants shared by multiple test suite packages", + "position": "/Users/deliedit/dev/certsuite/tests/common/constant.go:27" + }, + { + "name": "OperatorTestKey", + "exported": true, + "doc": "Constants shared by multiple test suite packages", + "position": "/Users/deliedit/dev/certsuite/tests/common/constant.go:28" + }, + { + "name": "PerformanceTestKey", + "exported": true, + "doc": "Constants shared by multiple test suite packages", + "position": "/Users/deliedit/dev/certsuite/tests/common/constant.go:30" + }, + { + "name": "PlatformAlterationTestKey", + "exported": true, + "doc": "Constants shared by multiple test suite packages", + "position": "/Users/deliedit/dev/certsuite/tests/common/constant.go:29" + }, + { + "name": "PreflightTestKey", + "exported": true, + "doc": "Constants shared by multiple test suite packages", + "position": "/Users/deliedit/dev/certsuite/tests/common/constant.go:31" + }, + { + "name": "defaultTimeoutSeconds", + "exported": false, + "doc": "Constants shared by multiple test suite packages", + "position": "/Users/deliedit/dev/certsuite/tests/common/constant.go:21" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/common/rbac", + "name": "rbac", + "files": 2, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "k8s.io/api/rbac/v1", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1", + "k8s.io/client-go/kubernetes/typed/core/v1", + "strings" + ], + "structs": [ + { + "name": "CrdResource", + "exported": true, + "doc": "CrdResource Represents a custom resource definition's identity within RBAC\n\nThis struct holds the group and names of a CRD, including singular, plural,\nand short forms. It is used to match resources against role rules when\ndetermining permissions for custom resources.", + "position": "/Users/deliedit/dev/certsuite/tests/common/rbac/roles.go:54", + "fields": { + "Group": "string", + "PluralName": "string", + "ShortNames": "[]string", + "SingularName": "string" + }, + "methodNames": null, + "source": [ + "type CrdResource struct {", + "\tGroup, SingularName, PluralName string", + "\tShortNames []string", + "}" + ] + }, + { + "name": "RoleResource", + "exported": true, + "doc": "RoleResource Represents an RBAC resource with its API group and kind\n\nThis structure holds the API group and the resource name used in Role or\nClusterRole rules. It allows code to identify which Kubernetes resource a\nrule applies to, such as \"apps\" for deployments or \"core\" for pods. The\nfields are simple strings that can be populated from YAML manifests or\nconstructed programmatically.", + "position": "/Users/deliedit/dev/certsuite/tests/common/rbac/roles.go:45", + "fields": { + "Group": "string", + "Name": "string" + }, + "methodNames": null, + "source": [ + "type RoleResource struct {", + "\tGroup, Name string", + "}" + ] + }, + { + "name": "RoleRule", + "exported": true, + "doc": "RoleRule Represents a single permission within a role\n\nThis structure pairs an API resource, identified by its group and name, with\na verb that defines the action allowed on that resource. It is used\nthroughout the package to flatten complex Role objects into individual rules\nfor easier comparison and filtering. Each instance encapsulates one specific\npermission granted by a Kubernetes RBAC Role.", + "position": "/Users/deliedit/dev/certsuite/tests/common/rbac/roles.go:33", + "fields": { + "Resource": "RoleResource", + "Verb": "string" + }, + "methodNames": null, + "source": [ + "type RoleRule struct {", + "\tResource RoleResource", + "\tVerb string", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "EvaluateAutomountTokens", + "qualifiedName": "EvaluateAutomountTokens", + "exported": true, + "signature": "func(corev1typed.CoreV1Interface, *provider.Pod)(bool, string)", + "doc": "EvaluateAutomountTokens Checks if a Pod’s automount service account token is correctly disabled\n\nThe function inspects the Pod specification for an explicit\nautomountServiceAccountToken setting, returning failure if it is true. If\nunset, it retrieves the associated ServiceAccount’s setting; a false value\nor absence of the field indicates compliance, while true triggers failure\nwith a descriptive message. The result consists of a boolean success flag and\nan error string explaining any misconfiguration.\n\nnolint:gocritic", + "position": "/Users/deliedit/dev/certsuite/tests/common/rbac/automount.go:36", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "IsAutomountServiceAccountSetOnSA", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testAutomountServiceToken", + "kind": "function", + "source": [ + "func testAutomountServiceToken(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif put.Spec.ServiceAccountName == defaultServiceAccount {", + "\t\t\tcheck.LogError(\"Pod %q uses the default service account name.\", put.Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found with default service account name\", false))", + "\t\t\tbreak", + "\t\t}", + "", + "\t\t// Evaluate the pod's automount service tokens and any attached service accounts", + "\t\tclient := clientsholder.GetClientsHolder()", + "\t\tpodPassed, newMsg := rbac.EvaluateAutomountTokens(client.K8sClient.CoreV1(), put)", + "\t\tif !podPassed {", + "\t\t\tcheck.LogError(\"%s\", newMsg)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, newMsg, false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q does not have automount service tokens set to true\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod does not have automount service tokens set to true\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func EvaluateAutomountTokens(client corev1typed.CoreV1Interface, put *provider.Pod) (bool, string) {", + "\t// The token can be specified in the pod directly", + "\t// or it can be specified in the service account of the pod", + "\t// if no service account is configured, then the pod will use the configuration", + "\t// of the default service account in that namespace", + "\t// the token defined in the pod has takes precedence", + "\t// the test would pass iif token is explicitly set to false", + "\t// if the token is set to true in the pod, the test would fail right away", + "\tif put.Spec.AutomountServiceAccountToken != nil \u0026\u0026 *put.Spec.AutomountServiceAccountToken {", + "\t\treturn false, fmt.Sprintf(\"Pod %s:%s is configured with automountServiceAccountToken set to true\", put.Namespace, put.Name)", + "\t}", + "", + "\t// Collect information about the service account attached to the pod.", + "\tsaAutomountServiceAccountToken, err := put.IsAutomountServiceAccountSetOnSA()", + "\tif err != nil {", + "\t\treturn false, err.Error()", + "\t}", + "", + "\t// The pod token is false means the pod is configured properly", + "\t// The pod is not configured and the service account is configured with false means", + "\t// the pod will inherit the behavior `false` and the test would pass", + "\tif (put.Spec.AutomountServiceAccountToken != nil \u0026\u0026 !*put.Spec.AutomountServiceAccountToken) || (saAutomountServiceAccountToken != nil \u0026\u0026 !*saAutomountServiceAccountToken) {", + "\t\treturn true, \"\"", + "\t}", + "", + "\t// the service account is configured with true means all the pods", + "\t// using this service account are not configured properly, register the error", + "\t// message and fail", + "\tif saAutomountServiceAccountToken != nil \u0026\u0026 *saAutomountServiceAccountToken {", + "\t\treturn false, fmt.Sprintf(\"serviceaccount %s:%s is configured with automountServiceAccountToken set to true, impacting pod %s\", put.Namespace, put.Spec.ServiceAccountName, put.Name)", + "\t}", + "", + "\t// the token should be set explicitly to false, otherwise, it's a failure", + "\t// register the error message and check the next pod", + "\tif saAutomountServiceAccountToken == nil {", + "\t\treturn false, fmt.Sprintf(\"serviceaccount %s:%s is not configured with automountServiceAccountToken set to false, impacting pod %s\", put.Namespace, put.Spec.ServiceAccountName, put.Name)", + "\t}", + "", + "\treturn true, \"\" // Pod has passed all checks", + "}" + ] + }, + { + "name": "FilterRulesNonMatchingResources", + "qualifiedName": "FilterRulesNonMatchingResources", + "exported": true, + "signature": "func([]RoleRule, []CrdResource)([]RoleRule)", + "doc": "FilterRulesNonMatchingResources Separates role rules into those that match CRD resources\n\nThis routine examines each rule against a list of CRD resources, collecting\nany rule whose resource group and plural name align with a CRD. Rules that do\nnot find a match are returned separately by computing the difference from the\noriginal list. The output consists of two slices: one for matching rules and\none for non‑matching ones.", + "position": "/Users/deliedit/dev/certsuite/tests/common/rbac/roles.go:121", + "calls": [ + { + "name": "isResourceInRoleRule", + "kind": "function", + "source": [ + "func isResourceInRoleRule(crd CrdResource, roleRule RoleRule) bool {", + "\t// remove subresources to keep only resource (plural) name", + "\truleResourcePluralName := strings.Split(roleRule.Resource.Name, \"/\")[0]", + "", + "\treturn crd.Group == roleRule.Resource.Group \u0026\u0026 crd.PluralName == ruleResourcePluralName", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "SliceDifference", + "kind": "function", + "source": [ + "func SliceDifference(s1, s2 []RoleRule) (diff []RoleRule) {", + "\tvar temp []RoleRule", + "\tif len(s2) \u003e len(s1) {", + "\t\ttemp = s1", + "\t\ts1 = s2", + "\t\ts2 = temp", + "\t}", + "\tfor _, v1 := range s1 {", + "\t\tmissing := true", + "\t\tfor _, v2 := range s2 {", + "\t\t\tif v1 == v2 {", + "\t\t\t\tmissing = false", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif missing {", + "\t\t\tdiff = append(diff, v1)", + "\t\t}", + "\t}", + "\treturn diff", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testCrdRoles", + "kind": "function", + "source": [ + "func testCrdRoles(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcrdResources := rbac.GetCrdResources(env.Crds)", + "\tfor roleIndex := range env.Roles {", + "\t\tif !stringhelper.StringInSlice[string](env.Namespaces, env.Roles[roleIndex].Namespace, false) {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tallRules := rbac.GetAllRules(\u0026env.Roles[roleIndex])", + "", + "\t\tmatchingRules, nonMatchingRules := rbac.FilterRulesNonMatchingResources(allRules, crdResources)", + "\t\tif len(matchingRules) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor _, aRule := range matchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) applies to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"This applies to CRDs under test\", testhelper.RoleRuleType, true, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "\t\tfor _, aRule := range nonMatchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) does not apply to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"This rule does not apply to CRDs under test\", testhelper.RoleRuleType, false, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "", + "\t\tif len(nonMatchingRules) == 0 {", + "\t\t\tcheck.LogInfo(\"Role %q rules only apply to CRDs under test\", env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules only apply to CRDs under test\",", + "\t\t\t\ttesthelper.RoleType, true, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Role %q rules apply to a mix of CRDs under test and others.\", env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules apply to a mix of CRDs under test and others. See non compliant role rule objects.\",", + "\t\t\t\ttesthelper.RoleType, false, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func FilterRulesNonMatchingResources(ruleList []RoleRule, resourceList []CrdResource) (matching, nonMatching []RoleRule) {", + "\tfor _, aRule := range ruleList {", + "\t\tfor _, aResource := range resourceList {", + "\t\t\tif isResourceInRoleRule(aResource, aRule) {", + "\t\t\t\tmatching = append(matching, aRule)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tnonMatching = SliceDifference(ruleList, matching)", + "\treturn matching, nonMatching", + "}" + ] + }, + { + "name": "GetAllRules", + "qualifiedName": "GetAllRules", + "exported": true, + "signature": "func(*rbacv1.Role)([]RoleRule)", + "doc": "GetAllRules Collects every rule from a role into individual entries\n\nThe function iterates over each rule in the supplied role, expanding its API\ngroups, resources, and verbs into separate RoleRule objects. Each combination\nof group, resource name, and verb is appended to a slice, which is returned.\nThe resulting list can be used for detailed policy analysis or filtering.", + "position": "/Users/deliedit/dev/certsuite/tests/common/rbac/roles.go:84", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testCrdRoles", + "kind": "function", + "source": [ + "func testCrdRoles(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcrdResources := rbac.GetCrdResources(env.Crds)", + "\tfor roleIndex := range env.Roles {", + "\t\tif !stringhelper.StringInSlice[string](env.Namespaces, env.Roles[roleIndex].Namespace, false) {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tallRules := rbac.GetAllRules(\u0026env.Roles[roleIndex])", + "", + "\t\tmatchingRules, nonMatchingRules := rbac.FilterRulesNonMatchingResources(allRules, crdResources)", + "\t\tif len(matchingRules) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor _, aRule := range matchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) applies to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"This applies to CRDs under test\", testhelper.RoleRuleType, true, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "\t\tfor _, aRule := range nonMatchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) does not apply to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"This rule does not apply to CRDs under test\", testhelper.RoleRuleType, false, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "", + "\t\tif len(nonMatchingRules) == 0 {", + "\t\t\tcheck.LogInfo(\"Role %q rules only apply to CRDs under test\", env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules only apply to CRDs under test\",", + "\t\t\t\ttesthelper.RoleType, true, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Role %q rules apply to a mix of CRDs under test and others.\", env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules apply to a mix of CRDs under test and others. See non compliant role rule objects.\",", + "\t\t\t\ttesthelper.RoleType, false, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetAllRules(aRole *rbacv1.Role) (ruleList []RoleRule) {", + "\tfor _, aRule := range aRole.Rules {", + "\t\tfor _, aGroup := range aRule.APIGroups {", + "\t\t\tfor _, aResource := range aRule.Resources {", + "\t\t\t\tfor _, aVerb := range aRule.Verbs {", + "\t\t\t\t\tvar aRoleRule RoleRule", + "\t\t\t\t\taRoleRule.Resource.Group = aGroup", + "\t\t\t\t\taRoleRule.Resource.Name = aResource", + "\t\t\t\t\taRoleRule.Verb = aVerb", + "\t\t\t\t\truleList = append(ruleList, aRoleRule)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn ruleList", + "}" + ] + }, + { + "name": "GetCrdResources", + "qualifiedName": "GetCrdResources", + "exported": true, + "signature": "func([]*apiextv1.CustomResourceDefinition)([]CrdResource)", + "doc": "GetCrdResources Converts CRD definitions into a slice of resource descriptors\n\nThis function iterates over each CustomResourceDefinition provided,\nextracting the group, singular name, plural name, and short names from its\nspecification. For every CRD it creates a CrdResource struct populated with\nthese fields and appends it to a list. The resulting slice is returned for\nuse in permission checks or reporting.", + "position": "/Users/deliedit/dev/certsuite/tests/common/rbac/roles.go:66", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testCrdRoles", + "kind": "function", + "source": [ + "func testCrdRoles(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcrdResources := rbac.GetCrdResources(env.Crds)", + "\tfor roleIndex := range env.Roles {", + "\t\tif !stringhelper.StringInSlice[string](env.Namespaces, env.Roles[roleIndex].Namespace, false) {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tallRules := rbac.GetAllRules(\u0026env.Roles[roleIndex])", + "", + "\t\tmatchingRules, nonMatchingRules := rbac.FilterRulesNonMatchingResources(allRules, crdResources)", + "\t\tif len(matchingRules) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor _, aRule := range matchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) applies to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(\"This applies to CRDs under test\", testhelper.RoleRuleType, true, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "\t\tfor _, aRule := range nonMatchingRules {", + "\t\t\tcheck.LogInfo(\"Rule (resource-name=%q, resource-group=%q, verb=%q, role-name=%q) does not apply to CRDs under test\",", + "\t\t\t\taRule.Resource.Name, aRule.Resource.Group, aRule.Verb, env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(\"This rule does not apply to CRDs under test\", testhelper.RoleRuleType, false, env.Roles[roleIndex].Namespace).", + "\t\t\t\tAddField(testhelper.RoleName, env.Roles[roleIndex].Name).", + "\t\t\t\tAddField(testhelper.Group, aRule.Resource.Group).", + "\t\t\t\tAddField(testhelper.ResourceName, aRule.Resource.Name).", + "\t\t\t\tAddField(testhelper.Verb, aRule.Verb))", + "\t\t}", + "", + "\t\tif len(nonMatchingRules) == 0 {", + "\t\t\tcheck.LogInfo(\"Role %q rules only apply to CRDs under test\", env.Roles[roleIndex].Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules only apply to CRDs under test\",", + "\t\t\t\ttesthelper.RoleType, true, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Role %q rules apply to a mix of CRDs under test and others.\", env.Roles[roleIndex].Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedNamedReportObject(\"This role's rules apply to a mix of CRDs under test and others. See non compliant role rule objects.\",", + "\t\t\t\ttesthelper.RoleType, false, env.Roles[roleIndex].Namespace, env.Roles[roleIndex].Name))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetCrdResources(crds []*apiextv1.CustomResourceDefinition) (resourceList []CrdResource) {", + "\tfor _, crd := range crds {", + "\t\tvar aResource CrdResource", + "\t\taResource.Group = crd.Spec.Group", + "\t\taResource.SingularName = crd.Spec.Names.Singular", + "\t\taResource.PluralName = crd.Spec.Names.Plural", + "\t\taResource.ShortNames = crd.Spec.Names.ShortNames", + "\t\tresourceList = append(resourceList, aResource)", + "\t}", + "\treturn resourceList", + "}" + ] + }, + { + "name": "SliceDifference", + "qualifiedName": "SliceDifference", + "exported": true, + "signature": "func([]RoleRule, []RoleRule)([]RoleRule)", + "doc": "SliceDifference identifies RoleRule entries present in one slice but absent from another\n\nThe function takes two slices of RoleRule values and returns a new slice\ncontaining elements that exist in the first slice but not in the second. It\nswaps the slices if the second is longer to reduce comparisons, then iterates\nthrough each element of the larger slice, checking for equality against all\nelements of the other slice. Matching items are omitted; non‑matching ones\nare appended to the result, which is returned.", + "position": "/Users/deliedit/dev/certsuite/tests/common/rbac/roles.go:141", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/common/rbac", + "name": "FilterRulesNonMatchingResources", + "kind": "function", + "source": [ + "func FilterRulesNonMatchingResources(ruleList []RoleRule, resourceList []CrdResource) (matching, nonMatching []RoleRule) {", + "\tfor _, aRule := range ruleList {", + "\t\tfor _, aResource := range resourceList {", + "\t\t\tif isResourceInRoleRule(aResource, aRule) {", + "\t\t\t\tmatching = append(matching, aRule)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tnonMatching = SliceDifference(ruleList, matching)", + "\treturn matching, nonMatching", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SliceDifference(s1, s2 []RoleRule) (diff []RoleRule) {", + "\tvar temp []RoleRule", + "\tif len(s2) \u003e len(s1) {", + "\t\ttemp = s1", + "\t\ts1 = s2", + "\t\ts2 = temp", + "\t}", + "\tfor _, v1 := range s1 {", + "\t\tmissing := true", + "\t\tfor _, v2 := range s2 {", + "\t\t\tif v1 == v2 {", + "\t\t\t\tmissing = false", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif missing {", + "\t\t\tdiff = append(diff, v1)", + "\t\t}", + "\t}", + "\treturn diff", + "}" + ] + }, + { + "name": "isResourceInRoleRule", + "qualifiedName": "isResourceInRoleRule", + "exported": false, + "signature": "func(CrdResource, RoleRule)(bool)", + "doc": "isResourceInRoleRule Determines if a CRD matches a role rule by group and resource name\n\nThe function receives a custom resource definition and a role rule, extracts\nthe base resource name from the rule by removing any subresource part, and\nthen compares the API group and plural name of the CRD to those of the rule.\nIf both match exactly, it returns true; otherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/common/rbac/roles.go:107", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/common/rbac", + "name": "FilterRulesNonMatchingResources", + "kind": "function", + "source": [ + "func FilterRulesNonMatchingResources(ruleList []RoleRule, resourceList []CrdResource) (matching, nonMatching []RoleRule) {", + "\tfor _, aRule := range ruleList {", + "\t\tfor _, aResource := range resourceList {", + "\t\t\tif isResourceInRoleRule(aResource, aRule) {", + "\t\t\t\tmatching = append(matching, aRule)", + "\t\t\t}", + "\t\t}", + "\t}", + "\tnonMatching = SliceDifference(ruleList, matching)", + "\treturn matching, nonMatching", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isResourceInRoleRule(crd CrdResource, roleRule RoleRule) bool {", + "\t// remove subresources to keep only resource (plural) name", + "\truleResourcePluralName := strings.Split(roleRule.Resource.Name, \"/\")[0]", + "", + "\treturn crd.Group == roleRule.Resource.Group \u0026\u0026 crd.PluralName == ruleResourcePluralName", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "identifiers", + "files": 6, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite-claim/pkg/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "AddCatalogEntry", + "qualifiedName": "AddCatalogEntry", + "exported": true, + "signature": "func(string, string, string, string, string, string, bool, map[string]string, ...string)(claim.Identifier)", + "doc": "AddCatalogEntry Creates a test case entry in the catalog\n\nThis function registers a new test by building a descriptive record from its\nID, suite name, description, remediation, and other metadata. It applies\ndefaults for missing exception or reference strings, ensures at least one tag\nis present, then calls the claim builder to generate a structured test case\ndescription. The resulting identifier and description are stored in global\nmaps for later retrieval during test execution.", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:72", + "calls": [ + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite-claim/pkg/claim", + "name": "BuildTestCaseDescription", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog", + "name": "addPreflightTestsToCatalog", + "kind": "function", + "source": [ + "func addPreflightTestsToCatalog() {", + "\tconst dummy = \"dummy\"", + "\t// Create artifacts handler", + "\tartifactsWriter, err := artifacts.NewMapWriter()", + "\tif err != nil {", + "\t\tlog.Error(\"Error creating artifact, failed to add preflight tests to catalog: %v\", err)", + "\t\treturn", + "\t}", + "\tctx := artifacts.ContextWithWriter(context.TODO(), artifactsWriter)", + "\toptsOperator := []plibOperator.Option{}", + "\toptsContainer := []plibContainer.Option{}", + "\tcheckOperator := plibOperator.NewCheck(dummy, dummy, []byte(\"\"), optsOperator...)", + "\tcheckContainer := plibContainer.NewCheck(dummy, optsContainer...)", + "\t_, checksOperator, err := checkOperator.List(ctx)", + "\tif err != nil {", + "\t\tlog.Error(\"Error getting preflight operator tests: %v\", err)", + "\t}", + "\t_, checksContainer, err := checkContainer.List(ctx)", + "\tif err != nil {", + "\t\tlog.Error(\"Error getting preflight container tests: %v\", err)", + "\t}", + "", + "\tallChecks := checksOperator", + "\tallChecks = append(allChecks, checksContainer...)", + "", + "\tfor _, c := range allChecks {", + "\t\tremediation := c.Help().Suggestion", + "", + "\t\t// Custom override for specific preflight test remediation", + "\t\tif c.Name() == \"FollowsRestrictedNetworkEnablementGuidelines\" {", + "\t\t\tremediation = \"If consumers of your operator may need to do so on a restricted network, implement the guidelines outlined in OCP documentation: https://docs.redhat.com/en/documentation/openshift_container_platform/latest/html/disconnected_environments/olm-restricted-networks\"", + "\t\t}", + "", + "\t\t_ = identifiers.AddCatalogEntry(", + "\t\t\tc.Name(),", + "\t\t\tcommon.PreflightTestKey,", + "\t\t\tc.Metadata().Description,", + "\t\t\tremediation,", + "\t\t\tidentifiers.NoDocumentedProcess,", + "\t\t\tidentifiers.NoDocLink,", + "\t\t\ttrue,", + "\t\t\tmap[string]string{", + "\t\t\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\t\t\tidentifiers.Telco: identifiers.Optional,", + "\t\t\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\t\t\tidentifiers.Extended: identifiers.Optional,", + "\t\t\t},", + "\t\t\tidentifiers.TagCommon)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "InitCatalog", + "kind": "function", + "source": [ + "func InitCatalog() map[claim.Identifier]claim.TestCaseDescription {", + "\tTestNetworkPolicyDenyAllIdentifier = AddCatalogEntry(", + "\t\t\"network-policy-deny-all\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Check that network policies attached to namespaces running workload pods contain a default deny-all rule for both ingress and egress traffic`,", + "\t\tNetworkPolicyDenyAllRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestNetworkPolicyDenyAllIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTest1337UIDIdentifier = AddCatalogEntry(", + "\t\t\"no-1337-uid\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks that all pods are not using the securityContext UID 1337`,", + "\t\tUID1337Remediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTest1337UIDIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestLimitedUseOfExecProbesIdentifier = AddCatalogEntry(", + "\t\t\"max-resources-exec-probes\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Checks that less than 10 exec probes are configured in the cluster for this workload. Also checks that the periodSeconds parameter for each probe is superior or equal to 10.`,", + "\t\tLimitedUseOfExecProbesRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestLimitedUseOfExecProbesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional},", + "\t\tTagFarEdge)", + "", + "\tTestHelmVersionIdentifier = AddCatalogEntry(", + "\t\t\"helm-version\",", + "\t\tcommon.AffiliatedCertTestKey,", + "\t\t`Test to check if the helm chart is v3`,", + "\t\tHelmVersionV3Remediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestHelmVersionIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\t// TestContainerIsCertifiedDigestIdentifier tests whether the container has passed Container Certification.", + "\tTestContainerIsCertifiedDigestIdentifier = AddCatalogEntry(", + "\t\t\"container-is-certified-digest\",", + "\t\tcommon.AffiliatedCertTestKey,", + "\t\t`Tests whether container images that are autodiscovered have passed the Red Hat Container Certification Program by their digest(CCP).`,", + "\t\tContainerIsCertifiedDigestRemediation,", + "\t\tAffiliatedCert,", + "\t\tTestContainerIsCertifiedDigestIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHugePages2M = AddCatalogEntry(", + "\t\t\"hugepages-2m-only\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Check that pods using hugepages only use 2Mi size`,", + "\t\tPodHugePages2MRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestPodHugePages2MDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestPodHugePages1G = AddCatalogEntry(", + "\t\t\"hugepages-1g-only\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Check that pods using hugepages only use 1Gi size`,", + "\t\tPodHugePages1GRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestPodHugePages1GDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestHyperThreadEnable = AddCatalogEntry(", + "\t\t\"hyperthread-enable\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Check that baremetal workers have hyperthreading enabled`,", + "\t\tHyperThreadEnable,", + "\t\tNoDocumentedProcess,", + "\t\tTestHyperThreadEnableDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestReservedExtendedPartnerPorts = AddCatalogEntry(", + "\t\t\"reserved-partner-ports\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that pods and containers are not consuming ports designated as reserved by partner`,", + "\t\tReservedPartnerPortsRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestReservedExtendedPartnerPortsDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestAffinityRequiredPods = AddCatalogEntry(", + "\t\t\"affinity-required-pods\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Checks that affinity rules are in place if AffinityRequired: 'true' labels are set on Pods.`,", + "\t\tAffinityRequiredRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestAffinityRequiredPodsDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestStorageProvisioner = AddCatalogEntry(", + "\t\t\"storage-provisioner\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Checks that pods do not place persistent volumes on local storage in multinode clusters. Local storage is recommended for single node clusters, but only one type of local storage should be installed (lvms or noprovisioner).`,", + "\t\tCheckStorageProvisionerRemediation,", + "\t\tNoExceptions,", + "\t\tTestStorageProvisionerDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestContainerPostStartIdentifier = AddCatalogEntry(", + "\t\t\"container-poststart\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensure that the containers lifecycle postStart management feature is configured. A container must receive important events from the platform and conform/react to these events properly. For example, a container should catch SIGTERM or SIGKILL from the platform and shutdown as quickly as possible. Other typically important events from the platform are PostStart to initialize before servicing requests and PreStop to release resources cleanly before shutting down.`, //nolint:lll", + "\t\t`PostStart is normally used to configure the container, set up dependencies, and record the new creation. You could use this event to check that a required API is available before the container’s main work begins. Kubernetes will not change the container’s state to Running until the PostStart script has executed successfully. For details, see https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. PostStart is used to configure container, set up dependencies, record new creation. It can also be used to check that a required API is available before the container’s work begins.`, //nolint:lll", + "\t\tContainerPostStartIdentifierRemediation,", + "\t\tTestContainerPostStartIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestContainerPrestopIdentifier = AddCatalogEntry(", + "\t\t\"container-prestop\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensure that the containers lifecycle preStop management feature is configured. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. When pods are shut down by the platform they are sent a SIGTERM signal which means that the process in the container should start shutting down, closing connections and stopping all activity. If the pod doesn’t shut down within the default 30 seconds then the platform may send a SIGKILL signal which will stop the pod immediately. This method isn’t as clean and the default time between the SIGTERM and SIGKILL messages can be modified based on the requirements of the application. Containers should respond to SIGTERM/SIGKILL with graceful shutdown.`, //nolint:lll", + "\t\t`The preStop can be used to gracefully stop the container and clean resources (e.g., DB connection). For details, see https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. All pods must respond to SIGTERM signal and shutdown gracefully with a zero exit code.`, //nolint:lll", + "\t\tContainerPrestopIdentifierRemediation,", + "\t\tTestContainerPrestopIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestDpdkCPUPinningExecProbe = AddCatalogEntry(", + "\t\t\"dpdk-cpu-pinning-exec-probe\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`If a workload is doing CPU pinning, exec probes may not be used.`,", + "\t\tDpdkCPUPinningExecProbeRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestDpdkCPUPinningExecProbeDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestNetAdminIdentifier = AddCatalogEntry(", + "\t\t\"net-admin-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use NET_ADMIN capability. `+iptablesNftablesImplicitCheck,", + "\t\tSecConRemediation,", + "\t\t`Exception will be considered for user plane or networking functions (e.g. SR-IOV, Multicast). Must identify which container requires the capability and detail why.`,", + "\t\tTestNetAdminIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestSysAdminIdentifier = AddCatalogEntry(", + "\t\t\"sys-admin-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use SYS_ADMIN capability`,", + "\t\tSecConRemediation+\" Containers should not use the SYS_ADMIN Linux capability.\",", + "\t\tNoExceptions,", + "\t\tTestSysAdminIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestIpcLockIdentifier = AddCatalogEntry(", + "\t\t\"ipc-lock-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use IPC_LOCK capability. Workloads should avoid accessing host resources - spec.HostIpc should be false.`,", + "\t\tSecConRemediation,", + "\t\t`Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why.`,", + "\t\tTestIpcLockIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestNetRawIdentifier = AddCatalogEntry(", + "\t\t\"net-raw-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use NET_RAW capability. `+iptablesNftablesImplicitCheck,", + "\t\tSecConRemediation,", + "\t\t`Exception will be considered for user plane or networking functions. Must identify which container requires the capability and detail why.`,", + "\t\tTestNetRawIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestBpfIdentifier = AddCatalogEntry(", + "\t\t\"bpf-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use BPF capability. Workloads should avoid loading eBPF filters`,", + "\t\tBpfCapabilityRemediation,", + "\t\t`Exception can be considered. Must identify which container requires the capability and detail why.`,", + "\t\tTestBpfIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestExclusiveCPUPoolIdentifier = AddCatalogEntry(", + "\t\t\"exclusive-cpu-pool\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that if one container in a Pod selects an exclusive CPU pool the rest select the same type of CPU pool`,", + "\t\tExclusiveCPUPoolRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestExclusiveCPUPoolIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestSharedCPUPoolSchedulingPolicy = AddCatalogEntry(", + "\t\t\"shared-cpu-pool-non-rt-scheduling-policy\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that if application workload runs in shared CPU pool, it chooses non-RT CPU schedule policy to always share the CPU with other applications and kernel threads.`,", + "\t\tSharedCPUPoolSchedulingPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestSharedCPUPoolSchedulingPolicyDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestExclusiveCPUPoolSchedulingPolicy = AddCatalogEntry(", + "\t\t\"exclusive-cpu-pool-rt-scheduling-policy\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that if application workload runs in exclusive CPU pool, it chooses RT CPU schedule policy and set the priority less than 10.`,", + "\t\tExclusiveCPUPoolSchedulingPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestExclusiveCPUPoolSchedulingPolicyDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestIsolatedCPUPoolSchedulingPolicy = AddCatalogEntry(", + "\t\t\"isolated-cpu-pool-rt-scheduling-policy\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that a workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy`,", + "\t\tIsolatedCPUPoolSchedulingPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestIsolatedCPUPoolSchedulingPolicyDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestRtAppNoExecProbes = AddCatalogEntry(", + "\t\t\"rt-apps-no-exec-probes\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that if one container runs a real time application exec probes are not used`,", + "\t\tRtAppNoExecProbesRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestRtAppNoExecProbesDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestRestartOnRebootLabelOnPodsUsingSRIOV = AddCatalogEntry(", + "\t\t\"restart-on-reboot-sriov-pod\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Ensures that the label restart-on-reboot exists on pods that use SRIOV network interfaces.`,", + "\t\tSRIOVPodsRestartOnRebootLabelRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestRestartOnRebootLabelOnPodsUsingSRIOVDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestNetworkAttachmentDefinitionSRIOVUsingMTU = AddCatalogEntry(", + "\t\t\"network-attachment-definition-sriov-mtu\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Ensures that MTU values are set correctly in NetworkAttachmentDefinitions for SRIOV network interfaces.`,", + "\t\tSRIOVNetworkAttachmentDefinitionMTURemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestNetworkAttachmentDefinitionSRIOVUsingMTUDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestSecConNonRootUserIDIdentifier = AddCatalogEntry(", + "\t\t\"security-context-non-root-user-id-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks securityContext's runAsNonRoot and runAsUser fields at pod and container level to make sure containers are not run as root.`,", + "\t\tSecConRunAsNonRootUserRemediation,", + "\t\tSecConNonRootUserExceptionProcess,", + "\t\tTestSecConNonRootUserIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSecConReadOnlyFilesystem = AddCatalogEntry(", + "\t\t\"security-context-read-only-file-system\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks the security context readOnlyFileSystem in containers is enabled. Containers should not try modify its own filesystem.`,", + "\t\tSecConNonRootUserExceptionProcess,", + "\t\tNoExceptions,", + "\t\tTestSecContextIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSecContextIdentifier = AddCatalogEntry(", + "\t\t\"security-context\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks the security context matches one of the 4 categories`,", + "\t\t`Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and document why. If the container had the right configuration of the allowed category from the 4 approved list then the test will pass. The 4 categories are defined in Requirement ID 94118 [here](#security-context-categories)`, //nolint:lll", + "\t\t`no exception needed for optional/extended test`,", + "\t\tTestSecContextIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestSecConPrivilegeEscalation = AddCatalogEntry(", + "\t\t\"security-context-privilege-escalation\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks if privileged escalation is enabled (AllowPrivilegeEscalation=true).`,", + "\t\tSecConPrivilegeRemediation,", + "\t\tNoExceptions,", + "\t\tTestSecConPrivilegeEscalationDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestContainerHostPort = AddCatalogEntry(", + "\t\t\"container-host-port\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies if containers define a hostPort.`,", + "\t\tContainerHostPortRemediation,", + "\t\t\"Exception for host resource access tests will only be considered in rare cases where it is absolutely needed\",", + "\t\tTestContainerHostPortDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHostNetwork = AddCatalogEntry(", + "\t\t\"pod-host-network\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies that the spec.HostNetwork parameter is not set (not present)`,", + "\t\tPodHostNetworkRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestPodHostNetworkDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHostPath = AddCatalogEntry(", + "\t\t\"pod-host-path\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies that the spec.HostPath parameter is not set (not present)`,", + "\t\tPodHostPathRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestPodHostPathDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHostIPC = AddCatalogEntry(", + "\t\t\"pod-host-ipc\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies that the spec.HostIpc parameter is set to false`,", + "\t\tPodHostIPCRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestPodHostIPCDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHostPID = AddCatalogEntry(", + "\t\t\"pod-host-pid\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies that the spec.HostPid parameter is set to false`,", + "\t\tPodHostPIDRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestPodHostPIDDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestHugepagesNotManuallyManipulated = AddCatalogEntry(", + "\t\t\"hugepages-config\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Checks to see that HugePage settings have been configured through MachineConfig, and not manually on the underlying Node. This test case applies only to Nodes that are configured with the \"worker\" MachineConfigSet. First, the \"worker\" MachineConfig is polled, and the Hugepage settings are extracted. Next, the underlying Nodes are polled for configured HugePages through inspection of /proc/meminfo. The results are compared, and the test passes only if they are the same.`, //nolint:lll", + "\t\tHugepagesNotManuallyManipulatedRemediation,", + "\t\tNoExceptions,", + "\t\tTestHugepagesNotManuallyManipulatedDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestICMPv4ConnectivityIdentifier = AddCatalogEntry(", + "\t\t\"icmpv4-connectivity\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that each workload Container is able to communicate via ICMPv4 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.`, //nolint:lll", + "\t\t`Ensure that the workload is able to communicate via the Default OpenShift network. In some rare cases, workloads may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence.`, //nolint:lll", + "\t\t`No exceptions - must be able to communicate on default network using IPv4`,", + "\t\tTestICMPv4ConnectivityIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestICMPv6ConnectivityIdentifier = AddCatalogEntry(", + "\t\t\"icmpv6-connectivity\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that each workload Container is able to communicate via ICMPv6 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.`, //nolint:lll", + "\t\tICMPv6ConnectivityRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestICMPv6ConnectivityIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestICMPv4ConnectivityMultusIdentifier = AddCatalogEntry(", + "\t\t\"icmpv4-connectivity-multus\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that each workload Container is able to communicate via ICMPv4 on the Multus network(s). This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.`, //nolint:lll", + "\t\tICMPv4ConnectivityMultusRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestICMPv4ConnectivityMultusIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestICMPv6ConnectivityMultusIdentifier = AddCatalogEntry(", + "\t\t\"icmpv6-connectivity-multus\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that each workload Container is able to communicate via ICMPv6 on the Multus network(s). This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.`, //nolint:lll", + "\t\tICMPv6ConnectivityMultusRemediation+` Not applicable if IPv6/MULTUS is not supported.`,", + "\t\tNoDocumentedProcess,", + "\t\tTestICMPv6ConnectivityMultusIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestServiceDualStackIdentifier = AddCatalogEntry(", + "\t\t\"dual-stack-service\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that all services in namespaces under test are either ipv6 single stack or dual stack. This test case requires the deployment of the probe daemonset.`,", + "\t\tTestServiceDualStackRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestServiceDualStackIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestNamespaceBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"namespace\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Tests that all workload resources (PUTs and CRs) belong to valid namespaces. A valid namespace meets", + "the following conditions: (1) It was declared in the yaml config file under the targetNameSpaces", + "tag. (2) It does not have any of the following prefixes: default, openshift-, istio- and aspenmesh-`,", + "\t\tNamespaceBestPracticesRemediation,", + "\t\tNoExceptions,", + "\t\tTestNamespaceBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestNonTaintedNodeKernelsIdentifier = AddCatalogEntry(", + "\t\t\"tainted-node-kernel\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Ensures that the Node(s) hosting workloads do not utilize tainted kernels. This test case is especially", + "important to support Highly Available workloads, since when a workload is re-instantiated on a backup Node,", + "that Node's kernel may not have the same hacks.'`,", + "\t\tNonTaintedNodeKernelsRemediation,", + "\t\t`If taint is necessary, document details of the taint and why it's needed by workload or environment.`,", + "\t\tTestNonTaintedNodeKernelsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorInstallStatusSucceededIdentifier = AddCatalogEntry(", + "\t\t\"install-status-succeeded\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Ensures that the target workload operators report \"Succeeded\" as their installation status.`,", + "\t\tOperatorInstallStatusSucceededRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorInstallStatusSucceededIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorNoSCCAccess = AddCatalogEntry(", + "\t\t\"install-status-no-privileges\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Checks whether the operator needs access to Security Context Constraints. Test passes if clusterPermissions is not present in the CSV manifest or is present with no RBAC rules related to SCCs.`,", + "\t\tOperatorNoPrivilegesRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorNoPrivilegesDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorIsCertifiedIdentifier = AddCatalogEntry(", + "\t\t\"operator-is-certified\",", + "\t\tcommon.AffiliatedCertTestKey,", + "\t\t`Tests whether the workload Operators listed in the configuration file have passed the Red Hat Operator Certification Program (OCP).`,", + "\t\tOperatorIsCertifiedRemediation,", + "\t\tAffiliatedCert,", + "\t\tTestOperatorIsCertifiedIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestHelmIsCertifiedIdentifier = AddCatalogEntry(", + "\t\t\"helmchart-is-certified\",", + "\t\tcommon.AffiliatedCertTestKey,", + "\t\t`Tests whether helm charts listed in the cluster passed the Red Hat Helm Certification Program.`,", + "\t\tHelmIsCertifiedRemediation,", + "\t\tAffiliatedCert,", + "\t\tTestHelmIsCertifiedIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorIsInstalledViaOLMIdentifier = AddCatalogEntry(", + "\t\t\"install-source\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether a workload Operator is installed via OLM.`,", + "\t\tOperatorIsInstalledViaOLMRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorIsInstalledViaOLMIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace = AddCatalogEntry(", + "\t\t\"single-or-multi-namespaced-allowed-in-tenant-namespaces\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Verifies that only single/multi namespaced operators are installed in a tenant-dedicated namespace. The test fails if this namespace contains any installed operator with Own/All-namespaced install mode, unlabeled operators, operands of any operator installed elsewhere, or pods unrelated to any operator.`, //nolint:lll", + "\t\tSingleOrMultiNamespacedOperatorInstallationInTenantNamespaceRemediation,", + "\t\tNoExceptions,", + "\t\tTestSingleOrMultiNamespacedOperatorInstallationInTenantNamespaceDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestOperatorHasSemanticVersioningIdentifier = AddCatalogEntry(", + "\t\t\"semantic-versioning\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether an application Operator has a valid semantic versioning.`,", + "\t\tOperatorHasSemanticVersioningRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorHasSemanticVersioningIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorOlmSkipRange = AddCatalogEntry(", + "\t\t\"olm-skip-range\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Test that checks the operator has a valid olm skip range.`,", + "\t\tOperatorOlmSkipRangeRemediation,", + "\t\tOperatorSkipRangeExceptionProcess,", + "\t\tTestOperatorOlmSkipRangeDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorCrdVersioningIdentifier = AddCatalogEntry(", + "\t\t\"crd-versioning\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether the Operator CRD has a valid versioning.`,", + "\t\tOperatorCrdVersioningRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorCrdVersioningIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorCrdSchemaIdentifier = AddCatalogEntry(", + "\t\t\"crd-openapi-schema\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether an application Operator CRD is defined with OpenAPI spec.`,", + "\t\tOperatorCrdSchemaIdentifierRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorCrdSchemaIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorSingleCrdOwnerIdentifier = AddCatalogEntry(", + "\t\t\"single-crd-owner\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether a CRD is owned by a single Operator.`,", + "\t\tOperatorSingleCrdOwnerRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorSingleCrdOwnerIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorPodsNoHugepages = AddCatalogEntry(", + "\t\t\"pods-no-hugepages\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests that the pods do not have hugepages enabled.`,", + "\t\tOperatorPodsNoHugepagesRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorPodsNoHugepagesDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorCatalogSourceBundleCountIdentifier = AddCatalogEntry(", + "\t\t\"catalogsource-bundle-count\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests operator catalog source bundle count is less than 1000`,", + "\t\tOperatorCatalogSourceBundleCountRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorCatalogSourceBundleCountIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestMultipleSameOperatorsIdentifier = AddCatalogEntry(", + "\t\t\"multiple-same-operators\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether multiple instances of the same Operator CSV are installed.`,", + "\t\tMultipleSameOperatorsRemediation,", + "\t\tNoExceptions,", + "\t\tTestMultipleSameOperatorsIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodNodeSelectorAndAffinityBestPractices = AddCatalogEntry(", + "\t\t\"pod-scheduling\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensures that workload Pods do not specify nodeSelector or nodeAffinity. In most cases, Pods should allow for instantiation on any underlying Node. Workloads shall not use node selectors nor taints/tolerations to assign pod location.`,", + "\t\tPodNodeSelectorAndAffinityBestPracticesRemediation,", + "\t\t`Exception will only be considered if application requires specialized hardware. Must specify which container requires special hardware and why.`,", + "\t\tTestPodNodeSelectorAndAffinityBestPracticesDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPodHighAvailabilityBestPractices = AddCatalogEntry(", + "\t\t\"pod-high-availability\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensures that workloads Pods specify podAntiAffinity rules and replica value is set to more than 1.`,", + "\t\tPodHighAvailabilityBestPracticesRemediation,", + "\t\tNoDocumentedProcess+NotApplicableSNO,", + "\t\tTestPodHighAvailabilityBestPracticesDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodClusterRoleBindingsBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"cluster-role-bindings\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Tests that a Pod does not specify ClusterRoleBindings.`,", + "\t\tPodClusterRoleBindingsBestPracticesRemediation,", + "\t\t\"Exception possible only for workloads that's cluster wide in nature and absolutely needs cluster level roles \u0026 role bindings\",", + "\t\tTestPodClusterRoleBindingsBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPodDeploymentBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"pod-owner-type\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that the workload Pods are deployed as part of a ReplicaSet(s)/StatefulSet(s).`,", + "\t\tPodDeploymentBestPracticesRemediation,", + "\t\tNoDocumentedProcess+` Pods should not be deployed as DaemonSet or naked pods.`,", + "\t\tTestPodDeploymentBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestDeploymentScalingIdentifier = AddCatalogEntry(", + "\t\t\"deployment-scaling\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that workload deployments support scale in/out operations. First, the test starts getting the current replicaCount (N) of the deployment/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the deployment/s. In case of deployments that are managed by HPA the test is changing the min and max value to deployment Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the deployment/s`, //nolint:lll", + "\t\tDeploymentScalingRemediation,", + "\t\tNoDocumentedProcess+NotApplicableSNO,", + "\t\tTestDeploymentScalingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestStatefulSetScalingIdentifier = AddCatalogEntry(", + "\t\t\"statefulset-scaling\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that workload statefulsets support scale in/out operations. First, the test starts getting the current replicaCount (N) of the statefulset/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the statefulset/s. In case of statefulsets that are managed by HPA the test is changing the min and max value to statefulset Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the statefulset/s`, //nolint:lll", + "\t\tStatefulSetScalingRemediation,", + "\t\tNoDocumentedProcess+NotApplicableSNO,", + "\t\tTestStatefulSetScalingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestImagePullPolicyIdentifier = AddCatalogEntry(", + "\t\t\"image-pull-policy\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensure that the containers under test are using IfNotPresent as Image Pull Policy. If there is a situation where the container dies and needs to be restarted, the image pull policy becomes important. PullIfNotPresent is recommended so that a loss of image registry access does not prevent the pod from restarting.`, //nolint:lll", + "\t\tImagePullPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestImagePullPolicyIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPodRecreationIdentifier = AddCatalogEntry(", + "\t\t\"pod-recreation\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that a workload is configured to support High Availability. First, this test cordons and drains a Node that hosts the workload Pod. Next, the test ensures that OpenShift can re-instantiate the Pod on another Node, and that the actual replica count matches the desired replica count.`, //nolint:lll", + "\t\tPodRecreationRemediation,", + "\t\t`No exceptions - workloads should be able to be restarted/recreated.`,", + "\t\tTestPodRecreationIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodRoleBindingsBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"pod-role-bindings\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that a workload does not utilize RoleBinding(s) in a non-workload Namespace.`,", + "\t\tPodRoleBindingsBestPracticesRemediation,", + "\t\tNoExceptions,", + "\t\tTestPodRoleBindingsBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodServiceAccountBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"pod-service-account\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Tests that each workload Pod utilizes a valid Service Account. Default or empty service account is not valid.`,", + "\t\tPodServiceAccountBestPracticesRemediation,", + "\t\tNoExceptions,", + "\t\tTestPodServiceAccountBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodAutomountServiceAccountIdentifier = AddCatalogEntry(", + "\t\t\"pod-automount-service-account-token\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that all pods under test have automountServiceAccountToken set to false. Only pods that require access to the kubernetes API server should have automountServiceAccountToken set to true`,", + "\t\tAutomountServiceTokenRemediation,", + "\t\t`Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used.`,", + "\t\tTestPodAutomountServiceAccountIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestServicesDoNotUseNodeportsIdentifier = AddCatalogEntry(", + "\t\t\"service-type\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Tests that each workload Service does not utilize NodePort(s).`,", + "\t\tServicesDoNotUseNodeportsRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestServicesDoNotUseNodeportsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestUnalteredBaseImageIdentifier = AddCatalogEntry(", + "\t\t\"base-image\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Ensures that the Container Base Image is not altered post-startup. This test is a heuristic, and ensures that there are no changes to the following directories: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64`, //nolint:lll", + "\t\tUnalteredBaseImageRemediation,", + "\t\tNoExceptions,", + "\t\tTestUnalteredBaseImageIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestUnalteredStartupBootParamsIdentifier = AddCatalogEntry(", + "\t\t\"boot-params\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that boot parameters are set through the MachineConfigOperator, and not set manually on the Node.`,", + "\t\tUnalteredStartupBootParamsRemediation,", + "\t\tNoExceptions,", + "\t\tTestUnalteredStartupBootParamsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestLoggingIdentifier = AddCatalogEntry(", + "\t\t\"container-logging\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Check that all containers under test use standard input output and standard error when logging. A container must provide APIs for the platform to observe the container health and act accordingly. These APIs include health checks (liveness and readiness), logging to stderr and stdout for log aggregation (by tools such as Logstash or Filebeat), and integrate with tracing and metrics-gathering libraries (such as Prometheus or Metricbeat).`, //nolint:lll", + "\t\tLoggingRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestLoggingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestTerminationMessagePolicyIdentifier = AddCatalogEntry(", + "\t\t\"termination-policy\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Check that all containers are using terminationMessagePolicy: FallbackToLogsOnError. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.`, //nolint:lll", + "\t\tTerminationMessagePolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestTerminationMessagePolicyIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestCrdsStatusSubresourceIdentifier = AddCatalogEntry(", + "\t\t\"crd-status\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Checks that all CRDs have a status sub-resource specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”]).`,", + "\t\tCrdsStatusSubresourceRemediation,", + "\t\tNoExceptions,", + "\t\tTestCrdsStatusSubresourceIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSysctlConfigsIdentifier = AddCatalogEntry(", + "\t\t\"sysctl-config\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that no one has changed the node's sysctl configs after the node was created, the tests works by checking if the sysctl configs are consistent with the MachineConfig CR which defines how the node should be configured`,", + "\t\tSysctlConfigsRemediation,", + "\t\tNoExceptions,", + "\t\tTestSysctlConfigsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestServiceMeshIdentifier = AddCatalogEntry(", + "\t\t\"service-mesh-usage\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Checks if the istio namespace (\"istio-system\") is present. If it is present, checks that the istio sidecar is present in all pods under test.`,", + "\t\tServiceMeshRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestServiceMeshIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestOCPLifecycleIdentifier = AddCatalogEntry(", + "\t\t\"ocp-lifecycle\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that the running OCP version is not end of life.`,", + "\t\tOCPLifecycleRemediation,", + "\t\tNoExceptions,", + "\t\tTestOCPLifecycleIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestNodeOperatingSystemIdentifier = AddCatalogEntry(", + "\t\t\"ocp-node-os-lifecycle\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that the nodes running in the cluster have operating systems that are compatible with the deployed version of OpenShift.`,", + "\t\tNodeOperatingSystemRemediation,", + "\t\tNoExceptions,", + "\t\tTestNodeOperatingSystemIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestIsRedHatReleaseIdentifier = AddCatalogEntry(", + "\t\t\"isredhat-release\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`verifies if the container base image is redhat.`,", + "\t\tIsRedHatReleaseRemediation,", + "\t\tNoExceptions,", + "\t\tTestIsRedHatReleaseIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestClusterOperatorHealth = AddCatalogEntry(", + "\t\t\"cluster-operator-health\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that all cluster operators are healthy.`,", + "\t\tClusterOperatorHealthRemediation,", + "\t\tNoExceptions,", + "\t\tTestClusterOperatorHealthDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestIsSELinuxEnforcingIdentifier = AddCatalogEntry(", + "\t\t\"is-selinux-enforcing\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`verifies that all openshift platform/cluster nodes have selinux in \"Enforcing\" mode.`,", + "\t\tIsSELinuxEnforcingRemediation,", + "\t\tNoExceptions,", + "\t\tTestIsSELinuxEnforcingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestUndeclaredContainerPortsUsage = AddCatalogEntry(", + "\t\t\"undeclared-container-ports-usage\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Check that containers do not listen on ports that weren't declared in their specification. Platforms may be configured to block undeclared ports.`,", + "\t\tUndeclaredContainerPortsRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestUndeclaredContainerPortsUsageDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestOCPReservedPortsUsage = AddCatalogEntry(", + "\t\t\"ocp-reserved-ports-usage\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Check that containers do not listen on ports that are reserved by OpenShift`,", + "\t\tOCPReservedPortsUsageRemediation,", + "\t\tNoExceptions,", + "\t\tTestOCPReservedPortsUsageDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestLivenessProbeIdentifier = AddCatalogEntry(", + "\t\t\"liveness-probe\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that all containers under test have liveness probe defined. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. When starting up, health probes like liveness and readiness checks can be put into place to ensure the application is functioning properly.`, //nolint:lll", + "\t\tLivenessProbeRemediation+` workloads shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum.`, //nolint:lll", + "\t\tNoDocumentedProcess,", + "\t\tTestLivenessProbeIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestReadinessProbeIdentifier = AddCatalogEntry(", + "\t\t\"readiness-probe\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that all containers under test have readiness probe defined. There are different ways a pod can stop on on OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.`, //nolint:lll", + "\t\tReadinessProbeRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestReadinessProbeIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestStartupProbeIdentifier = AddCatalogEntry(", + "\t\t\"startup-probe\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that all containers under test have startup probe defined. Workloads shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum.`, //nolint:lll", + "\t\tStartupProbeRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestStartupProbeIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestOneProcessPerContainerIdentifier = AddCatalogEntry(", + "\t\t\"one-process-per-container\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that all containers under test have only one process running`,", + "\t\tOneProcessPerContainerRemediation,", + "\t\tNoExceptionProcessForExtendedTests+NotApplicableSNO,", + "\t\tTestOneProcessPerContainerIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSYSNiceRealtimeCapabilityIdentifier = AddCatalogEntry(", + "\t\t\"sys-nice-realtime-capability\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that pods running on nodes with realtime kernel enabled have the SYS_NICE capability enabled in their spec. In the case that a workolad is running on a node using the real-time kernel, SYS_NICE will be used to allow DPDK application to switch to SCHED_FIFO.`, //nolint:lll", + "\t\tSYSNiceRealtimeCapabilityRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestSYSNiceRealtimeCapabilityIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestSysPtraceCapabilityIdentifier = AddCatalogEntry(", + "\t\t\"sys-ptrace-capability\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that if process namespace sharing is enabled for a Pod then the SYS_PTRACE capability is allowed. This capability is required when using Process Namespace Sharing. This is used when processes from one Container need to be exposed to another Container. For example, to send signals like SIGHUP from a process in a Container to another process in another Container. For more information on these capabilities refer to https://cloud.redhat.com/blog/linux-capabilities-in-openshift and https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/`, //nolint:lll", + "\t\tSysPtraceCapabilityRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestSysPtraceCapabilityIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPodRequestsIdentifier = AddCatalogEntry(", + "\t\t\"requests\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that containers have resource requests specified in their spec. Set proper resource requests based on container use case.`,", + "\t\tRequestsRemediation,", + "\t\tRequestsExceptionProcess,", + "\t\tTestPodRequestsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestNamespaceResourceQuotaIdentifier = AddCatalogEntry(", + "\t\t\"namespace-resource-quota\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks to see if workload pods are running in namespaces that have resource quotas applied.`,", + "\t\tNamespaceResourceQuotaRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestNamespaceResourceQuotaIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestPodDisruptionBudgetIdentifier = AddCatalogEntry(", + "\t\t\"pod-disruption-budget\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Checks to see if pod disruption budgets have allowed values for minAvailable and maxUnavailable`,", + "\t\tPodDisruptionBudgetRemediation,", + "\t\tNoExceptions,", + "\t\tTestPodDisruptionBudgetIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestAPICompatibilityWithNextOCPReleaseIdentifier = AddCatalogEntry(", + "\t\t\"compatibility-with-next-ocp-release\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Checks to ensure if the APIs the workload uses are compatible with the next OCP version`,", + "\t\tAPICompatibilityWithNextOCPReleaseRemediation,", + "\t\tNoExceptions,", + "\t\tTestAPICompatibilityWithNextOCPReleaseIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodTolerationBypassIdentifier = AddCatalogEntry(", + "\t\t\"pod-toleration-bypass\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that pods do not have NoExecute, PreferNoSchedule, or NoSchedule tolerations that have been modified from the default.`,", + "\t\tPodTolerationBypassRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestPodTolerationBypassIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPersistentVolumeReclaimPolicyIdentifier = AddCatalogEntry(", + "\t\t\"persistent-volume-reclaim-policy\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that the persistent volumes the workloads pods are using have a reclaim policy of delete. Network Functions should clear persistent storage by deleting their PVs when removing their application from a cluster.`,", + "\t\tPersistentVolumeReclaimPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestPersistentVolumeReclaimPolicyIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestContainersImageTag = AddCatalogEntry(", + "\t\t\"containers-image-tag\",", + "\t\tcommon.ManageabilityTestKey,", + "\t\t`Check that image tag exists on containers.`,", + "\t\tContainersImageTagRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestContainersImageTagDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestNoSSHDaemonsAllowedIdentifier = AddCatalogEntry(", + "\t\t\"ssh-daemons\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that pods do not run SSH daemons.`,", + "\t\tNoSSHDaemonsAllowedRemediation,", + "\t\t`No exceptions - special consideration can be given to certain containers which run as utility tool daemon`,", + "\t\tTestNoSSHDaemonsAllowedIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestCPUIsolationIdentifier = AddCatalogEntry(", + "\t\t\"cpu-isolation\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`CPU isolation requires: For each container within the pod, resource requests and limits must be identical. If cpu requests and limits are not identical and in whole units (Guaranteed pods with exclusive cpus), your pods will not be tested for compliance. The runTimeClassName must be specified. Annotations required disabling CPU and IRQ load-balancing.`, //nolint:lll", + "\t\tCPUIsolationRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestCPUIsolationIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestContainerPortNameFormat = AddCatalogEntry(", + "\t\t\"container-port-name-format\",", + "\t\tcommon.ManageabilityTestKey,", + "\t\t\"Check that the container's ports name follow the naming conventions. Name field in ContainerPort section must be of form `\u003cprotocol\u003e[-\u003csuffix\u003e]`. More naming convention requirements may be released in future\",", + "\t\tContainerPortNameFormatRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestContainerPortNameFormatDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestCrdScalingIdentifier = AddCatalogEntry(", + "\t\t\"crd-scaling\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that a workload's CRD support scale in/out operations. First, the test starts getting the current replicaCount (N) of the crd/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the crd/s. In case of crd that are managed by HPA the test is changing the min and max value to crd Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the crd/s`, //nolint:lll", + "\t\tCrdScalingRemediation,", + "\t\tNoDocumentedProcess+NotApplicableSNO,", + "\t\tTestCrdScalingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon,", + "\t)", + "", + "\tTestCrdRoleIdentifier = AddCatalogEntry(", + "\t\t\"crd-roles\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t\"If an application creates CRDs it must supply a role to access those CRDs and no other API resources/permission. This test checks that there is at least one role present in each namespaces under test that only refers to CRDs under test.\",", + "\t\t\"Roles providing access to CRDs should not refer to any other api or resources. Change the generation of the CRD role accordingly\",", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\t\"https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-custom-role-to-access-application-crds\",", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\t//nolint:gocritic", + "\t// TestPodDeleteIdentifier = AddCatalogEntry(", + "\t// \t\"pod-delete\",", + "\t// \tcommon.ChaosTesting,", + "\t// \t\"Chaos test suite is under construction.\",", + "\t// \t\"\",", + "\t// \tNoDocumentedProcess,", + "\t// \t\"\",", + "\t// \tfalse,", + "\t// \tmap[string]string{", + "\t// \t\tFarEdge: Optional,", + "\t// \t\tTelco: Optional,", + "\t// \t\tNonTelco: Optional,", + "\t// \t\tExtended: Optional,", + "\t// \t},", + "\t// \tTagCommon)", + "", + "\treturn Catalog", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "generatePreflightContainerCnfCertTest", + "kind": "function", + "source": [ + "func generatePreflightContainerCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, containers []*provider.Container) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "\t\t\tfor _, cut := range containers {", + "\t\t\t\tfor _, r := range cut.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Container %q has passed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has failed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has errored Preflight test %q, err: %v\", cut, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Container has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "generatePreflightOperatorCnfCertTest", + "kind": "function", + "source": [ + "func generatePreflightOperatorCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, operators []*provider.Operator) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t\t\tfor _, op := range operators {", + "\t\t\t\tfor _, r := range op.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Operator %q has passed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has failed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has errored Preflight test %q, err: %v\", op, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, fmt.Sprintf(\"Operator has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "GetTestIDAndLabels", + "qualifiedName": "GetTestIDAndLabels", + "exported": true, + "signature": "func(claim.Identifier)(string, []string)", + "doc": "GetTestIDAndLabels Transforms a claim identifier into a test ID and associated labels\n\nThe function splits the Tags field of a claim.Identifier by commas to create\nlabel slices, then appends the identifier's Id and Suite values to that list.\nIt stores the full identifier in a global map keyed by Id for later lookup,\nand returns the Id as the test ID along with the constructed label slice.", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:1862", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper", + "name": "SanitizeClaimFile", + "kind": "function", + "source": [ + "func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error) {", + "\tlog.Info(\"Sanitizing claim file %s\", claimFileName)", + "\tdata, err := ReadClaimFile(claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"ReadClaimFile failed with err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tvar aRoot claim.Root", + "\tUnmarshalClaim(data, \u0026aRoot)", + "", + "\t// Remove the results that do not match the labels filter", + "\tfor testID := range aRoot.Claim.Results {", + "\t\tevaluator, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to create labels expression evaluator: %v\", err)", + "\t\t\treturn \"\", err", + "\t\t}", + "", + "\t\t_, gatheredLabels := identifiers.GetTestIDAndLabels(*aRoot.Claim.Results[testID].TestID)", + "", + "\t\tif !evaluator.Eval(gatheredLabels) {", + "\t\t\tlog.Info(\"Removing test ID: %s from the claim\", testID)", + "\t\t\tdelete(aRoot.Claim.Results, testID)", + "\t\t}", + "\t}", + "", + "\tWriteClaimOutput(claimFileName, MarshalClaimOutput(\u0026aRoot))", + "\treturn claimFileName, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AccessControlTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecContextIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetAdminIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetAdminCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetRawIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetRawCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIpcLockIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIpcLockCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestBpfIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestBpfCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConNonRootUserIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConRunAsNonRoot(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConPrivilegeEscalation)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConPrivilegeEscalation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSecConReadOnlyFilesystem)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSecConReadOnlyFilesystem(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerHostPort)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerHostPort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostNetwork)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostNetwork(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPath)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPath(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostIPC)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostIPC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHostPID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHostPID(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNamespacesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespace(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodServiceAccountBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodServiceAccount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodClusterRoleBindingsBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodClusterRoleBindings(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodAutomountServiceAccountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAutomountServiceToken(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOneProcessPerContainerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOneProcessPerContainer(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSYSNiceRealtimeCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(testhelper.GetNoNodesWithRealtimeKernelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSYSNiceRealtimeCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetSharedProcessNamespacePodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysPtraceCapability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNamespaceResourceQuotaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNamespaceResourceQuota(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNoSSHDaemonsAllowedIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNoSSHDaemonsAllowed(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRequestsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodRequests(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.Test1337UIDIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttest1337UIDs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServicesDoNotUseNodeportsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodePort(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdRoleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env), testhelper.GetNoNamespacesSkipFn(\u0026env), testhelper.GetNoRolesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrdRoles(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.AffiliatedCertTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.AffiliatedCertTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmVersionIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\ttestHelmVersion(check)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoOperatorsFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAllOperatorCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmIsCertifiedIdentifier)).", + "\t\tWithSkipCheckFn(skipIfNoHelmChartReleasesFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHelmCertified(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerIsCertifiedDigestIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerCertificationStatusByDigest(c, \u0026env, validator)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/manageability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ManageabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ManageabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainersImageTag)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImageTag(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPortNameFormat)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerPortNameFormat(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PerformanceTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestExclusiveCPUPool(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRtAppNoExecProbes)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUs).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestRtAppsNoExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSharedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoNonGuaranteedPodContainersWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetNonGuaranteedPodContainersWithoutHostPID(), scheduling.SharedCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsolatedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLimitedUseOfExecProbesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestLimitedUseOfExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "generatePreflightContainerCnfCertTest", + "kind": "function", + "source": [ + "func generatePreflightContainerCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, containers []*provider.Container) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "\t\t\tfor _, cut := range containers {", + "\t\t\t\tfor _, r := range cut.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Container %q has passed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has failed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has errored Preflight test %q, err: %v\", cut, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Container has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "generatePreflightOperatorCnfCertTest", + "kind": "function", + "source": [ + "func generatePreflightOperatorCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, operators []*provider.Operator) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t\t\tfor _, op := range operators {", + "\t\t\t\tfor _, r := range op.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Operator %q has passed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has failed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has errored Preflight test %q, err: %v\", op, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, fmt.Sprintf(\"Operator has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "name": "InitCatalog", + "qualifiedName": "InitCatalog", + "exported": true, + "signature": "func()(map[claim.Identifier]claim.TestCaseDescription)", + "doc": "InitCatalog Initializes the test case catalog with predefined identifiers\n\nThis routine registers a series of test case descriptions into the global\nCatalog map by calling AddCatalogEntry for each known identifier. Each call\nsupplies metadata such as test ID, suite key, description, remediation logic,\nexception handling, reference link, query‑enabled flag, classification\ntags, and category classification. The function returns the populated catalog\nmapping identifiers to their corresponding TestCaseDescription objects.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:209", + "calls": [ + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "init", + "kind": "function", + "source": [ + "func init() {", + "\tInitCatalog()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func InitCatalog() map[claim.Identifier]claim.TestCaseDescription {", + "\tTestNetworkPolicyDenyAllIdentifier = AddCatalogEntry(", + "\t\t\"network-policy-deny-all\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Check that network policies attached to namespaces running workload pods contain a default deny-all rule for both ingress and egress traffic`,", + "\t\tNetworkPolicyDenyAllRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestNetworkPolicyDenyAllIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTest1337UIDIdentifier = AddCatalogEntry(", + "\t\t\"no-1337-uid\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks that all pods are not using the securityContext UID 1337`,", + "\t\tUID1337Remediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTest1337UIDIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestLimitedUseOfExecProbesIdentifier = AddCatalogEntry(", + "\t\t\"max-resources-exec-probes\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Checks that less than 10 exec probes are configured in the cluster for this workload. Also checks that the periodSeconds parameter for each probe is superior or equal to 10.`,", + "\t\tLimitedUseOfExecProbesRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestLimitedUseOfExecProbesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional},", + "\t\tTagFarEdge)", + "", + "\tTestHelmVersionIdentifier = AddCatalogEntry(", + "\t\t\"helm-version\",", + "\t\tcommon.AffiliatedCertTestKey,", + "\t\t`Test to check if the helm chart is v3`,", + "\t\tHelmVersionV3Remediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestHelmVersionIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\t// TestContainerIsCertifiedDigestIdentifier tests whether the container has passed Container Certification.", + "\tTestContainerIsCertifiedDigestIdentifier = AddCatalogEntry(", + "\t\t\"container-is-certified-digest\",", + "\t\tcommon.AffiliatedCertTestKey,", + "\t\t`Tests whether container images that are autodiscovered have passed the Red Hat Container Certification Program by their digest(CCP).`,", + "\t\tContainerIsCertifiedDigestRemediation,", + "\t\tAffiliatedCert,", + "\t\tTestContainerIsCertifiedDigestIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHugePages2M = AddCatalogEntry(", + "\t\t\"hugepages-2m-only\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Check that pods using hugepages only use 2Mi size`,", + "\t\tPodHugePages2MRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestPodHugePages2MDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestPodHugePages1G = AddCatalogEntry(", + "\t\t\"hugepages-1g-only\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Check that pods using hugepages only use 1Gi size`,", + "\t\tPodHugePages1GRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestPodHugePages1GDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestHyperThreadEnable = AddCatalogEntry(", + "\t\t\"hyperthread-enable\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Check that baremetal workers have hyperthreading enabled`,", + "\t\tHyperThreadEnable,", + "\t\tNoDocumentedProcess,", + "\t\tTestHyperThreadEnableDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestReservedExtendedPartnerPorts = AddCatalogEntry(", + "\t\t\"reserved-partner-ports\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that pods and containers are not consuming ports designated as reserved by partner`,", + "\t\tReservedPartnerPortsRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestReservedExtendedPartnerPortsDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestAffinityRequiredPods = AddCatalogEntry(", + "\t\t\"affinity-required-pods\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Checks that affinity rules are in place if AffinityRequired: 'true' labels are set on Pods.`,", + "\t\tAffinityRequiredRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestAffinityRequiredPodsDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestStorageProvisioner = AddCatalogEntry(", + "\t\t\"storage-provisioner\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Checks that pods do not place persistent volumes on local storage in multinode clusters. Local storage is recommended for single node clusters, but only one type of local storage should be installed (lvms or noprovisioner).`,", + "\t\tCheckStorageProvisionerRemediation,", + "\t\tNoExceptions,", + "\t\tTestStorageProvisionerDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestContainerPostStartIdentifier = AddCatalogEntry(", + "\t\t\"container-poststart\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensure that the containers lifecycle postStart management feature is configured. A container must receive important events from the platform and conform/react to these events properly. For example, a container should catch SIGTERM or SIGKILL from the platform and shutdown as quickly as possible. Other typically important events from the platform are PostStart to initialize before servicing requests and PreStop to release resources cleanly before shutting down.`, //nolint:lll", + "\t\t`PostStart is normally used to configure the container, set up dependencies, and record the new creation. You could use this event to check that a required API is available before the container’s main work begins. Kubernetes will not change the container’s state to Running until the PostStart script has executed successfully. For details, see https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. PostStart is used to configure container, set up dependencies, record new creation. It can also be used to check that a required API is available before the container’s work begins.`, //nolint:lll", + "\t\tContainerPostStartIdentifierRemediation,", + "\t\tTestContainerPostStartIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestContainerPrestopIdentifier = AddCatalogEntry(", + "\t\t\"container-prestop\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensure that the containers lifecycle preStop management feature is configured. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. When pods are shut down by the platform they are sent a SIGTERM signal which means that the process in the container should start shutting down, closing connections and stopping all activity. If the pod doesn’t shut down within the default 30 seconds then the platform may send a SIGKILL signal which will stop the pod immediately. This method isn’t as clean and the default time between the SIGTERM and SIGKILL messages can be modified based on the requirements of the application. Containers should respond to SIGTERM/SIGKILL with graceful shutdown.`, //nolint:lll", + "\t\t`The preStop can be used to gracefully stop the container and clean resources (e.g., DB connection). For details, see https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. All pods must respond to SIGTERM signal and shutdown gracefully with a zero exit code.`, //nolint:lll", + "\t\tContainerPrestopIdentifierRemediation,", + "\t\tTestContainerPrestopIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestDpdkCPUPinningExecProbe = AddCatalogEntry(", + "\t\t\"dpdk-cpu-pinning-exec-probe\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`If a workload is doing CPU pinning, exec probes may not be used.`,", + "\t\tDpdkCPUPinningExecProbeRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestDpdkCPUPinningExecProbeDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestNetAdminIdentifier = AddCatalogEntry(", + "\t\t\"net-admin-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use NET_ADMIN capability. `+iptablesNftablesImplicitCheck,", + "\t\tSecConRemediation,", + "\t\t`Exception will be considered for user plane or networking functions (e.g. SR-IOV, Multicast). Must identify which container requires the capability and detail why.`,", + "\t\tTestNetAdminIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestSysAdminIdentifier = AddCatalogEntry(", + "\t\t\"sys-admin-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use SYS_ADMIN capability`,", + "\t\tSecConRemediation+\" Containers should not use the SYS_ADMIN Linux capability.\",", + "\t\tNoExceptions,", + "\t\tTestSysAdminIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestIpcLockIdentifier = AddCatalogEntry(", + "\t\t\"ipc-lock-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use IPC_LOCK capability. Workloads should avoid accessing host resources - spec.HostIpc should be false.`,", + "\t\tSecConRemediation,", + "\t\t`Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why.`,", + "\t\tTestIpcLockIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestNetRawIdentifier = AddCatalogEntry(", + "\t\t\"net-raw-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use NET_RAW capability. `+iptablesNftablesImplicitCheck,", + "\t\tSecConRemediation,", + "\t\t`Exception will be considered for user plane or networking functions. Must identify which container requires the capability and detail why.`,", + "\t\tTestNetRawIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestBpfIdentifier = AddCatalogEntry(", + "\t\t\"bpf-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use BPF capability. Workloads should avoid loading eBPF filters`,", + "\t\tBpfCapabilityRemediation,", + "\t\t`Exception can be considered. Must identify which container requires the capability and detail why.`,", + "\t\tTestBpfIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestExclusiveCPUPoolIdentifier = AddCatalogEntry(", + "\t\t\"exclusive-cpu-pool\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that if one container in a Pod selects an exclusive CPU pool the rest select the same type of CPU pool`,", + "\t\tExclusiveCPUPoolRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestExclusiveCPUPoolIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestSharedCPUPoolSchedulingPolicy = AddCatalogEntry(", + "\t\t\"shared-cpu-pool-non-rt-scheduling-policy\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that if application workload runs in shared CPU pool, it chooses non-RT CPU schedule policy to always share the CPU with other applications and kernel threads.`,", + "\t\tSharedCPUPoolSchedulingPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestSharedCPUPoolSchedulingPolicyDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestExclusiveCPUPoolSchedulingPolicy = AddCatalogEntry(", + "\t\t\"exclusive-cpu-pool-rt-scheduling-policy\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that if application workload runs in exclusive CPU pool, it chooses RT CPU schedule policy and set the priority less than 10.`,", + "\t\tExclusiveCPUPoolSchedulingPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestExclusiveCPUPoolSchedulingPolicyDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestIsolatedCPUPoolSchedulingPolicy = AddCatalogEntry(", + "\t\t\"isolated-cpu-pool-rt-scheduling-policy\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that a workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy`,", + "\t\tIsolatedCPUPoolSchedulingPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestIsolatedCPUPoolSchedulingPolicyDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestRtAppNoExecProbes = AddCatalogEntry(", + "\t\t\"rt-apps-no-exec-probes\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that if one container runs a real time application exec probes are not used`,", + "\t\tRtAppNoExecProbesRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestRtAppNoExecProbesDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestRestartOnRebootLabelOnPodsUsingSRIOV = AddCatalogEntry(", + "\t\t\"restart-on-reboot-sriov-pod\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Ensures that the label restart-on-reboot exists on pods that use SRIOV network interfaces.`,", + "\t\tSRIOVPodsRestartOnRebootLabelRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestRestartOnRebootLabelOnPodsUsingSRIOVDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestNetworkAttachmentDefinitionSRIOVUsingMTU = AddCatalogEntry(", + "\t\t\"network-attachment-definition-sriov-mtu\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Ensures that MTU values are set correctly in NetworkAttachmentDefinitions for SRIOV network interfaces.`,", + "\t\tSRIOVNetworkAttachmentDefinitionMTURemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestNetworkAttachmentDefinitionSRIOVUsingMTUDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestSecConNonRootUserIDIdentifier = AddCatalogEntry(", + "\t\t\"security-context-non-root-user-id-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks securityContext's runAsNonRoot and runAsUser fields at pod and container level to make sure containers are not run as root.`,", + "\t\tSecConRunAsNonRootUserRemediation,", + "\t\tSecConNonRootUserExceptionProcess,", + "\t\tTestSecConNonRootUserIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSecConReadOnlyFilesystem = AddCatalogEntry(", + "\t\t\"security-context-read-only-file-system\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks the security context readOnlyFileSystem in containers is enabled. Containers should not try modify its own filesystem.`,", + "\t\tSecConNonRootUserExceptionProcess,", + "\t\tNoExceptions,", + "\t\tTestSecContextIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSecContextIdentifier = AddCatalogEntry(", + "\t\t\"security-context\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks the security context matches one of the 4 categories`,", + "\t\t`Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and document why. If the container had the right configuration of the allowed category from the 4 approved list then the test will pass. The 4 categories are defined in Requirement ID 94118 [here](#security-context-categories)`, //nolint:lll", + "\t\t`no exception needed for optional/extended test`,", + "\t\tTestSecContextIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestSecConPrivilegeEscalation = AddCatalogEntry(", + "\t\t\"security-context-privilege-escalation\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks if privileged escalation is enabled (AllowPrivilegeEscalation=true).`,", + "\t\tSecConPrivilegeRemediation,", + "\t\tNoExceptions,", + "\t\tTestSecConPrivilegeEscalationDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestContainerHostPort = AddCatalogEntry(", + "\t\t\"container-host-port\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies if containers define a hostPort.`,", + "\t\tContainerHostPortRemediation,", + "\t\t\"Exception for host resource access tests will only be considered in rare cases where it is absolutely needed\",", + "\t\tTestContainerHostPortDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHostNetwork = AddCatalogEntry(", + "\t\t\"pod-host-network\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies that the spec.HostNetwork parameter is not set (not present)`,", + "\t\tPodHostNetworkRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestPodHostNetworkDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHostPath = AddCatalogEntry(", + "\t\t\"pod-host-path\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies that the spec.HostPath parameter is not set (not present)`,", + "\t\tPodHostPathRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestPodHostPathDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHostIPC = AddCatalogEntry(", + "\t\t\"pod-host-ipc\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies that the spec.HostIpc parameter is set to false`,", + "\t\tPodHostIPCRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestPodHostIPCDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHostPID = AddCatalogEntry(", + "\t\t\"pod-host-pid\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies that the spec.HostPid parameter is set to false`,", + "\t\tPodHostPIDRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestPodHostPIDDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestHugepagesNotManuallyManipulated = AddCatalogEntry(", + "\t\t\"hugepages-config\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Checks to see that HugePage settings have been configured through MachineConfig, and not manually on the underlying Node. This test case applies only to Nodes that are configured with the \"worker\" MachineConfigSet. First, the \"worker\" MachineConfig is polled, and the Hugepage settings are extracted. Next, the underlying Nodes are polled for configured HugePages through inspection of /proc/meminfo. The results are compared, and the test passes only if they are the same.`, //nolint:lll", + "\t\tHugepagesNotManuallyManipulatedRemediation,", + "\t\tNoExceptions,", + "\t\tTestHugepagesNotManuallyManipulatedDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestICMPv4ConnectivityIdentifier = AddCatalogEntry(", + "\t\t\"icmpv4-connectivity\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that each workload Container is able to communicate via ICMPv4 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.`, //nolint:lll", + "\t\t`Ensure that the workload is able to communicate via the Default OpenShift network. In some rare cases, workloads may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence.`, //nolint:lll", + "\t\t`No exceptions - must be able to communicate on default network using IPv4`,", + "\t\tTestICMPv4ConnectivityIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestICMPv6ConnectivityIdentifier = AddCatalogEntry(", + "\t\t\"icmpv6-connectivity\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that each workload Container is able to communicate via ICMPv6 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.`, //nolint:lll", + "\t\tICMPv6ConnectivityRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestICMPv6ConnectivityIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestICMPv4ConnectivityMultusIdentifier = AddCatalogEntry(", + "\t\t\"icmpv4-connectivity-multus\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that each workload Container is able to communicate via ICMPv4 on the Multus network(s). This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.`, //nolint:lll", + "\t\tICMPv4ConnectivityMultusRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestICMPv4ConnectivityMultusIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestICMPv6ConnectivityMultusIdentifier = AddCatalogEntry(", + "\t\t\"icmpv6-connectivity-multus\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that each workload Container is able to communicate via ICMPv6 on the Multus network(s). This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.`, //nolint:lll", + "\t\tICMPv6ConnectivityMultusRemediation+` Not applicable if IPv6/MULTUS is not supported.`,", + "\t\tNoDocumentedProcess,", + "\t\tTestICMPv6ConnectivityMultusIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestServiceDualStackIdentifier = AddCatalogEntry(", + "\t\t\"dual-stack-service\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that all services in namespaces under test are either ipv6 single stack or dual stack. This test case requires the deployment of the probe daemonset.`,", + "\t\tTestServiceDualStackRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestServiceDualStackIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestNamespaceBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"namespace\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Tests that all workload resources (PUTs and CRs) belong to valid namespaces. A valid namespace meets", + "the following conditions: (1) It was declared in the yaml config file under the targetNameSpaces", + "tag. (2) It does not have any of the following prefixes: default, openshift-, istio- and aspenmesh-`,", + "\t\tNamespaceBestPracticesRemediation,", + "\t\tNoExceptions,", + "\t\tTestNamespaceBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestNonTaintedNodeKernelsIdentifier = AddCatalogEntry(", + "\t\t\"tainted-node-kernel\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Ensures that the Node(s) hosting workloads do not utilize tainted kernels. This test case is especially", + "important to support Highly Available workloads, since when a workload is re-instantiated on a backup Node,", + "that Node's kernel may not have the same hacks.'`,", + "\t\tNonTaintedNodeKernelsRemediation,", + "\t\t`If taint is necessary, document details of the taint and why it's needed by workload or environment.`,", + "\t\tTestNonTaintedNodeKernelsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorInstallStatusSucceededIdentifier = AddCatalogEntry(", + "\t\t\"install-status-succeeded\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Ensures that the target workload operators report \"Succeeded\" as their installation status.`,", + "\t\tOperatorInstallStatusSucceededRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorInstallStatusSucceededIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorNoSCCAccess = AddCatalogEntry(", + "\t\t\"install-status-no-privileges\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Checks whether the operator needs access to Security Context Constraints. Test passes if clusterPermissions is not present in the CSV manifest or is present with no RBAC rules related to SCCs.`,", + "\t\tOperatorNoPrivilegesRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorNoPrivilegesDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorIsCertifiedIdentifier = AddCatalogEntry(", + "\t\t\"operator-is-certified\",", + "\t\tcommon.AffiliatedCertTestKey,", + "\t\t`Tests whether the workload Operators listed in the configuration file have passed the Red Hat Operator Certification Program (OCP).`,", + "\t\tOperatorIsCertifiedRemediation,", + "\t\tAffiliatedCert,", + "\t\tTestOperatorIsCertifiedIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestHelmIsCertifiedIdentifier = AddCatalogEntry(", + "\t\t\"helmchart-is-certified\",", + "\t\tcommon.AffiliatedCertTestKey,", + "\t\t`Tests whether helm charts listed in the cluster passed the Red Hat Helm Certification Program.`,", + "\t\tHelmIsCertifiedRemediation,", + "\t\tAffiliatedCert,", + "\t\tTestHelmIsCertifiedIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorIsInstalledViaOLMIdentifier = AddCatalogEntry(", + "\t\t\"install-source\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether a workload Operator is installed via OLM.`,", + "\t\tOperatorIsInstalledViaOLMRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorIsInstalledViaOLMIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace = AddCatalogEntry(", + "\t\t\"single-or-multi-namespaced-allowed-in-tenant-namespaces\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Verifies that only single/multi namespaced operators are installed in a tenant-dedicated namespace. The test fails if this namespace contains any installed operator with Own/All-namespaced install mode, unlabeled operators, operands of any operator installed elsewhere, or pods unrelated to any operator.`, //nolint:lll", + "\t\tSingleOrMultiNamespacedOperatorInstallationInTenantNamespaceRemediation,", + "\t\tNoExceptions,", + "\t\tTestSingleOrMultiNamespacedOperatorInstallationInTenantNamespaceDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestOperatorHasSemanticVersioningIdentifier = AddCatalogEntry(", + "\t\t\"semantic-versioning\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether an application Operator has a valid semantic versioning.`,", + "\t\tOperatorHasSemanticVersioningRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorHasSemanticVersioningIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorOlmSkipRange = AddCatalogEntry(", + "\t\t\"olm-skip-range\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Test that checks the operator has a valid olm skip range.`,", + "\t\tOperatorOlmSkipRangeRemediation,", + "\t\tOperatorSkipRangeExceptionProcess,", + "\t\tTestOperatorOlmSkipRangeDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorCrdVersioningIdentifier = AddCatalogEntry(", + "\t\t\"crd-versioning\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether the Operator CRD has a valid versioning.`,", + "\t\tOperatorCrdVersioningRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorCrdVersioningIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorCrdSchemaIdentifier = AddCatalogEntry(", + "\t\t\"crd-openapi-schema\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether an application Operator CRD is defined with OpenAPI spec.`,", + "\t\tOperatorCrdSchemaIdentifierRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorCrdSchemaIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorSingleCrdOwnerIdentifier = AddCatalogEntry(", + "\t\t\"single-crd-owner\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether a CRD is owned by a single Operator.`,", + "\t\tOperatorSingleCrdOwnerRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorSingleCrdOwnerIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorPodsNoHugepages = AddCatalogEntry(", + "\t\t\"pods-no-hugepages\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests that the pods do not have hugepages enabled.`,", + "\t\tOperatorPodsNoHugepagesRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorPodsNoHugepagesDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorCatalogSourceBundleCountIdentifier = AddCatalogEntry(", + "\t\t\"catalogsource-bundle-count\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests operator catalog source bundle count is less than 1000`,", + "\t\tOperatorCatalogSourceBundleCountRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorCatalogSourceBundleCountIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestMultipleSameOperatorsIdentifier = AddCatalogEntry(", + "\t\t\"multiple-same-operators\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether multiple instances of the same Operator CSV are installed.`,", + "\t\tMultipleSameOperatorsRemediation,", + "\t\tNoExceptions,", + "\t\tTestMultipleSameOperatorsIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodNodeSelectorAndAffinityBestPractices = AddCatalogEntry(", + "\t\t\"pod-scheduling\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensures that workload Pods do not specify nodeSelector or nodeAffinity. In most cases, Pods should allow for instantiation on any underlying Node. Workloads shall not use node selectors nor taints/tolerations to assign pod location.`,", + "\t\tPodNodeSelectorAndAffinityBestPracticesRemediation,", + "\t\t`Exception will only be considered if application requires specialized hardware. Must specify which container requires special hardware and why.`,", + "\t\tTestPodNodeSelectorAndAffinityBestPracticesDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPodHighAvailabilityBestPractices = AddCatalogEntry(", + "\t\t\"pod-high-availability\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensures that workloads Pods specify podAntiAffinity rules and replica value is set to more than 1.`,", + "\t\tPodHighAvailabilityBestPracticesRemediation,", + "\t\tNoDocumentedProcess+NotApplicableSNO,", + "\t\tTestPodHighAvailabilityBestPracticesDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodClusterRoleBindingsBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"cluster-role-bindings\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Tests that a Pod does not specify ClusterRoleBindings.`,", + "\t\tPodClusterRoleBindingsBestPracticesRemediation,", + "\t\t\"Exception possible only for workloads that's cluster wide in nature and absolutely needs cluster level roles \u0026 role bindings\",", + "\t\tTestPodClusterRoleBindingsBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPodDeploymentBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"pod-owner-type\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that the workload Pods are deployed as part of a ReplicaSet(s)/StatefulSet(s).`,", + "\t\tPodDeploymentBestPracticesRemediation,", + "\t\tNoDocumentedProcess+` Pods should not be deployed as DaemonSet or naked pods.`,", + "\t\tTestPodDeploymentBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestDeploymentScalingIdentifier = AddCatalogEntry(", + "\t\t\"deployment-scaling\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that workload deployments support scale in/out operations. First, the test starts getting the current replicaCount (N) of the deployment/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the deployment/s. In case of deployments that are managed by HPA the test is changing the min and max value to deployment Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the deployment/s`, //nolint:lll", + "\t\tDeploymentScalingRemediation,", + "\t\tNoDocumentedProcess+NotApplicableSNO,", + "\t\tTestDeploymentScalingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestStatefulSetScalingIdentifier = AddCatalogEntry(", + "\t\t\"statefulset-scaling\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that workload statefulsets support scale in/out operations. First, the test starts getting the current replicaCount (N) of the statefulset/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the statefulset/s. In case of statefulsets that are managed by HPA the test is changing the min and max value to statefulset Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the statefulset/s`, //nolint:lll", + "\t\tStatefulSetScalingRemediation,", + "\t\tNoDocumentedProcess+NotApplicableSNO,", + "\t\tTestStatefulSetScalingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestImagePullPolicyIdentifier = AddCatalogEntry(", + "\t\t\"image-pull-policy\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensure that the containers under test are using IfNotPresent as Image Pull Policy. If there is a situation where the container dies and needs to be restarted, the image pull policy becomes important. PullIfNotPresent is recommended so that a loss of image registry access does not prevent the pod from restarting.`, //nolint:lll", + "\t\tImagePullPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestImagePullPolicyIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPodRecreationIdentifier = AddCatalogEntry(", + "\t\t\"pod-recreation\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that a workload is configured to support High Availability. First, this test cordons and drains a Node that hosts the workload Pod. Next, the test ensures that OpenShift can re-instantiate the Pod on another Node, and that the actual replica count matches the desired replica count.`, //nolint:lll", + "\t\tPodRecreationRemediation,", + "\t\t`No exceptions - workloads should be able to be restarted/recreated.`,", + "\t\tTestPodRecreationIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodRoleBindingsBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"pod-role-bindings\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that a workload does not utilize RoleBinding(s) in a non-workload Namespace.`,", + "\t\tPodRoleBindingsBestPracticesRemediation,", + "\t\tNoExceptions,", + "\t\tTestPodRoleBindingsBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodServiceAccountBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"pod-service-account\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Tests that each workload Pod utilizes a valid Service Account. Default or empty service account is not valid.`,", + "\t\tPodServiceAccountBestPracticesRemediation,", + "\t\tNoExceptions,", + "\t\tTestPodServiceAccountBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodAutomountServiceAccountIdentifier = AddCatalogEntry(", + "\t\t\"pod-automount-service-account-token\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that all pods under test have automountServiceAccountToken set to false. Only pods that require access to the kubernetes API server should have automountServiceAccountToken set to true`,", + "\t\tAutomountServiceTokenRemediation,", + "\t\t`Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used.`,", + "\t\tTestPodAutomountServiceAccountIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestServicesDoNotUseNodeportsIdentifier = AddCatalogEntry(", + "\t\t\"service-type\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Tests that each workload Service does not utilize NodePort(s).`,", + "\t\tServicesDoNotUseNodeportsRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestServicesDoNotUseNodeportsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestUnalteredBaseImageIdentifier = AddCatalogEntry(", + "\t\t\"base-image\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Ensures that the Container Base Image is not altered post-startup. This test is a heuristic, and ensures that there are no changes to the following directories: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64`, //nolint:lll", + "\t\tUnalteredBaseImageRemediation,", + "\t\tNoExceptions,", + "\t\tTestUnalteredBaseImageIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestUnalteredStartupBootParamsIdentifier = AddCatalogEntry(", + "\t\t\"boot-params\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that boot parameters are set through the MachineConfigOperator, and not set manually on the Node.`,", + "\t\tUnalteredStartupBootParamsRemediation,", + "\t\tNoExceptions,", + "\t\tTestUnalteredStartupBootParamsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestLoggingIdentifier = AddCatalogEntry(", + "\t\t\"container-logging\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Check that all containers under test use standard input output and standard error when logging. A container must provide APIs for the platform to observe the container health and act accordingly. These APIs include health checks (liveness and readiness), logging to stderr and stdout for log aggregation (by tools such as Logstash or Filebeat), and integrate with tracing and metrics-gathering libraries (such as Prometheus or Metricbeat).`, //nolint:lll", + "\t\tLoggingRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestLoggingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestTerminationMessagePolicyIdentifier = AddCatalogEntry(", + "\t\t\"termination-policy\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Check that all containers are using terminationMessagePolicy: FallbackToLogsOnError. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.`, //nolint:lll", + "\t\tTerminationMessagePolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestTerminationMessagePolicyIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestCrdsStatusSubresourceIdentifier = AddCatalogEntry(", + "\t\t\"crd-status\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Checks that all CRDs have a status sub-resource specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”]).`,", + "\t\tCrdsStatusSubresourceRemediation,", + "\t\tNoExceptions,", + "\t\tTestCrdsStatusSubresourceIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSysctlConfigsIdentifier = AddCatalogEntry(", + "\t\t\"sysctl-config\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that no one has changed the node's sysctl configs after the node was created, the tests works by checking if the sysctl configs are consistent with the MachineConfig CR which defines how the node should be configured`,", + "\t\tSysctlConfigsRemediation,", + "\t\tNoExceptions,", + "\t\tTestSysctlConfigsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestServiceMeshIdentifier = AddCatalogEntry(", + "\t\t\"service-mesh-usage\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Checks if the istio namespace (\"istio-system\") is present. If it is present, checks that the istio sidecar is present in all pods under test.`,", + "\t\tServiceMeshRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestServiceMeshIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestOCPLifecycleIdentifier = AddCatalogEntry(", + "\t\t\"ocp-lifecycle\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that the running OCP version is not end of life.`,", + "\t\tOCPLifecycleRemediation,", + "\t\tNoExceptions,", + "\t\tTestOCPLifecycleIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestNodeOperatingSystemIdentifier = AddCatalogEntry(", + "\t\t\"ocp-node-os-lifecycle\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that the nodes running in the cluster have operating systems that are compatible with the deployed version of OpenShift.`,", + "\t\tNodeOperatingSystemRemediation,", + "\t\tNoExceptions,", + "\t\tTestNodeOperatingSystemIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestIsRedHatReleaseIdentifier = AddCatalogEntry(", + "\t\t\"isredhat-release\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`verifies if the container base image is redhat.`,", + "\t\tIsRedHatReleaseRemediation,", + "\t\tNoExceptions,", + "\t\tTestIsRedHatReleaseIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestClusterOperatorHealth = AddCatalogEntry(", + "\t\t\"cluster-operator-health\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that all cluster operators are healthy.`,", + "\t\tClusterOperatorHealthRemediation,", + "\t\tNoExceptions,", + "\t\tTestClusterOperatorHealthDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestIsSELinuxEnforcingIdentifier = AddCatalogEntry(", + "\t\t\"is-selinux-enforcing\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`verifies that all openshift platform/cluster nodes have selinux in \"Enforcing\" mode.`,", + "\t\tIsSELinuxEnforcingRemediation,", + "\t\tNoExceptions,", + "\t\tTestIsSELinuxEnforcingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestUndeclaredContainerPortsUsage = AddCatalogEntry(", + "\t\t\"undeclared-container-ports-usage\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Check that containers do not listen on ports that weren't declared in their specification. Platforms may be configured to block undeclared ports.`,", + "\t\tUndeclaredContainerPortsRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestUndeclaredContainerPortsUsageDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestOCPReservedPortsUsage = AddCatalogEntry(", + "\t\t\"ocp-reserved-ports-usage\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Check that containers do not listen on ports that are reserved by OpenShift`,", + "\t\tOCPReservedPortsUsageRemediation,", + "\t\tNoExceptions,", + "\t\tTestOCPReservedPortsUsageDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestLivenessProbeIdentifier = AddCatalogEntry(", + "\t\t\"liveness-probe\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that all containers under test have liveness probe defined. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. When starting up, health probes like liveness and readiness checks can be put into place to ensure the application is functioning properly.`, //nolint:lll", + "\t\tLivenessProbeRemediation+` workloads shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum.`, //nolint:lll", + "\t\tNoDocumentedProcess,", + "\t\tTestLivenessProbeIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestReadinessProbeIdentifier = AddCatalogEntry(", + "\t\t\"readiness-probe\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that all containers under test have readiness probe defined. There are different ways a pod can stop on on OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.`, //nolint:lll", + "\t\tReadinessProbeRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestReadinessProbeIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestStartupProbeIdentifier = AddCatalogEntry(", + "\t\t\"startup-probe\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that all containers under test have startup probe defined. Workloads shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum.`, //nolint:lll", + "\t\tStartupProbeRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestStartupProbeIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestOneProcessPerContainerIdentifier = AddCatalogEntry(", + "\t\t\"one-process-per-container\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that all containers under test have only one process running`,", + "\t\tOneProcessPerContainerRemediation,", + "\t\tNoExceptionProcessForExtendedTests+NotApplicableSNO,", + "\t\tTestOneProcessPerContainerIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSYSNiceRealtimeCapabilityIdentifier = AddCatalogEntry(", + "\t\t\"sys-nice-realtime-capability\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that pods running on nodes with realtime kernel enabled have the SYS_NICE capability enabled in their spec. In the case that a workolad is running on a node using the real-time kernel, SYS_NICE will be used to allow DPDK application to switch to SCHED_FIFO.`, //nolint:lll", + "\t\tSYSNiceRealtimeCapabilityRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestSYSNiceRealtimeCapabilityIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestSysPtraceCapabilityIdentifier = AddCatalogEntry(", + "\t\t\"sys-ptrace-capability\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that if process namespace sharing is enabled for a Pod then the SYS_PTRACE capability is allowed. This capability is required when using Process Namespace Sharing. This is used when processes from one Container need to be exposed to another Container. For example, to send signals like SIGHUP from a process in a Container to another process in another Container. For more information on these capabilities refer to https://cloud.redhat.com/blog/linux-capabilities-in-openshift and https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/`, //nolint:lll", + "\t\tSysPtraceCapabilityRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestSysPtraceCapabilityIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPodRequestsIdentifier = AddCatalogEntry(", + "\t\t\"requests\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that containers have resource requests specified in their spec. Set proper resource requests based on container use case.`,", + "\t\tRequestsRemediation,", + "\t\tRequestsExceptionProcess,", + "\t\tTestPodRequestsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestNamespaceResourceQuotaIdentifier = AddCatalogEntry(", + "\t\t\"namespace-resource-quota\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks to see if workload pods are running in namespaces that have resource quotas applied.`,", + "\t\tNamespaceResourceQuotaRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestNamespaceResourceQuotaIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestPodDisruptionBudgetIdentifier = AddCatalogEntry(", + "\t\t\"pod-disruption-budget\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Checks to see if pod disruption budgets have allowed values for minAvailable and maxUnavailable`,", + "\t\tPodDisruptionBudgetRemediation,", + "\t\tNoExceptions,", + "\t\tTestPodDisruptionBudgetIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestAPICompatibilityWithNextOCPReleaseIdentifier = AddCatalogEntry(", + "\t\t\"compatibility-with-next-ocp-release\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Checks to ensure if the APIs the workload uses are compatible with the next OCP version`,", + "\t\tAPICompatibilityWithNextOCPReleaseRemediation,", + "\t\tNoExceptions,", + "\t\tTestAPICompatibilityWithNextOCPReleaseIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodTolerationBypassIdentifier = AddCatalogEntry(", + "\t\t\"pod-toleration-bypass\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that pods do not have NoExecute, PreferNoSchedule, or NoSchedule tolerations that have been modified from the default.`,", + "\t\tPodTolerationBypassRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestPodTolerationBypassIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPersistentVolumeReclaimPolicyIdentifier = AddCatalogEntry(", + "\t\t\"persistent-volume-reclaim-policy\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that the persistent volumes the workloads pods are using have a reclaim policy of delete. Network Functions should clear persistent storage by deleting their PVs when removing their application from a cluster.`,", + "\t\tPersistentVolumeReclaimPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestPersistentVolumeReclaimPolicyIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestContainersImageTag = AddCatalogEntry(", + "\t\t\"containers-image-tag\",", + "\t\tcommon.ManageabilityTestKey,", + "\t\t`Check that image tag exists on containers.`,", + "\t\tContainersImageTagRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestContainersImageTagDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestNoSSHDaemonsAllowedIdentifier = AddCatalogEntry(", + "\t\t\"ssh-daemons\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that pods do not run SSH daemons.`,", + "\t\tNoSSHDaemonsAllowedRemediation,", + "\t\t`No exceptions - special consideration can be given to certain containers which run as utility tool daemon`,", + "\t\tTestNoSSHDaemonsAllowedIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestCPUIsolationIdentifier = AddCatalogEntry(", + "\t\t\"cpu-isolation\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`CPU isolation requires: For each container within the pod, resource requests and limits must be identical. If cpu requests and limits are not identical and in whole units (Guaranteed pods with exclusive cpus), your pods will not be tested for compliance. The runTimeClassName must be specified. Annotations required disabling CPU and IRQ load-balancing.`, //nolint:lll", + "\t\tCPUIsolationRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestCPUIsolationIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestContainerPortNameFormat = AddCatalogEntry(", + "\t\t\"container-port-name-format\",", + "\t\tcommon.ManageabilityTestKey,", + "\t\t\"Check that the container's ports name follow the naming conventions. Name field in ContainerPort section must be of form `\u003cprotocol\u003e[-\u003csuffix\u003e]`. More naming convention requirements may be released in future\",", + "\t\tContainerPortNameFormatRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestContainerPortNameFormatDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestCrdScalingIdentifier = AddCatalogEntry(", + "\t\t\"crd-scaling\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that a workload's CRD support scale in/out operations. First, the test starts getting the current replicaCount (N) of the crd/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the crd/s. In case of crd that are managed by HPA the test is changing the min and max value to crd Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the crd/s`, //nolint:lll", + "\t\tCrdScalingRemediation,", + "\t\tNoDocumentedProcess+NotApplicableSNO,", + "\t\tTestCrdScalingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon,", + "\t)", + "", + "\tTestCrdRoleIdentifier = AddCatalogEntry(", + "\t\t\"crd-roles\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t\"If an application creates CRDs it must supply a role to access those CRDs and no other API resources/permission. This test checks that there is at least one role present in each namespaces under test that only refers to CRDs under test.\",", + "\t\t\"Roles providing access to CRDs should not refer to any other api or resources. Change the generation of the CRD role accordingly\",", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\t\"https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-custom-role-to-access-application-crds\",", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\t//nolint:gocritic", + "\t// TestPodDeleteIdentifier = AddCatalogEntry(", + "\t// \t\"pod-delete\",", + "\t// \tcommon.ChaosTesting,", + "\t// \t\"Chaos test suite is under construction.\",", + "\t// \t\"\",", + "\t// \tNoDocumentedProcess,", + "\t// \t\"\",", + "\t// \tfalse,", + "\t// \tmap[string]string{", + "\t// \t\tFarEdge: Optional,", + "\t// \t\tTelco: Optional,", + "\t// \t\tNonTelco: Optional,", + "\t// \t\tExtended: Optional,", + "\t// \t},", + "\t// \tTagCommon)", + "", + "\treturn Catalog", + "}" + ] + }, + { + "name": "init", + "qualifiedName": "init", + "exported": false, + "signature": "func()()", + "doc": "init initializes the test catalog\n\nWhen the identifiers package is imported this function runs automatically and\ncalls InitCatalog to populate the global catalog of test cases. It ensures\nall test entries are registered before any tests execute.", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:60", + "calls": [ + { + "name": "InitCatalog", + "kind": "function", + "source": [ + "func InitCatalog() map[claim.Identifier]claim.TestCaseDescription {", + "\tTestNetworkPolicyDenyAllIdentifier = AddCatalogEntry(", + "\t\t\"network-policy-deny-all\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Check that network policies attached to namespaces running workload pods contain a default deny-all rule for both ingress and egress traffic`,", + "\t\tNetworkPolicyDenyAllRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestNetworkPolicyDenyAllIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTest1337UIDIdentifier = AddCatalogEntry(", + "\t\t\"no-1337-uid\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks that all pods are not using the securityContext UID 1337`,", + "\t\tUID1337Remediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTest1337UIDIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestLimitedUseOfExecProbesIdentifier = AddCatalogEntry(", + "\t\t\"max-resources-exec-probes\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Checks that less than 10 exec probes are configured in the cluster for this workload. Also checks that the periodSeconds parameter for each probe is superior or equal to 10.`,", + "\t\tLimitedUseOfExecProbesRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestLimitedUseOfExecProbesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional},", + "\t\tTagFarEdge)", + "", + "\tTestHelmVersionIdentifier = AddCatalogEntry(", + "\t\t\"helm-version\",", + "\t\tcommon.AffiliatedCertTestKey,", + "\t\t`Test to check if the helm chart is v3`,", + "\t\tHelmVersionV3Remediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestHelmVersionIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\t// TestContainerIsCertifiedDigestIdentifier tests whether the container has passed Container Certification.", + "\tTestContainerIsCertifiedDigestIdentifier = AddCatalogEntry(", + "\t\t\"container-is-certified-digest\",", + "\t\tcommon.AffiliatedCertTestKey,", + "\t\t`Tests whether container images that are autodiscovered have passed the Red Hat Container Certification Program by their digest(CCP).`,", + "\t\tContainerIsCertifiedDigestRemediation,", + "\t\tAffiliatedCert,", + "\t\tTestContainerIsCertifiedDigestIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHugePages2M = AddCatalogEntry(", + "\t\t\"hugepages-2m-only\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Check that pods using hugepages only use 2Mi size`,", + "\t\tPodHugePages2MRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestPodHugePages2MDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestPodHugePages1G = AddCatalogEntry(", + "\t\t\"hugepages-1g-only\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Check that pods using hugepages only use 1Gi size`,", + "\t\tPodHugePages1GRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestPodHugePages1GDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestHyperThreadEnable = AddCatalogEntry(", + "\t\t\"hyperthread-enable\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Check that baremetal workers have hyperthreading enabled`,", + "\t\tHyperThreadEnable,", + "\t\tNoDocumentedProcess,", + "\t\tTestHyperThreadEnableDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestReservedExtendedPartnerPorts = AddCatalogEntry(", + "\t\t\"reserved-partner-ports\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that pods and containers are not consuming ports designated as reserved by partner`,", + "\t\tReservedPartnerPortsRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestReservedExtendedPartnerPortsDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestAffinityRequiredPods = AddCatalogEntry(", + "\t\t\"affinity-required-pods\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Checks that affinity rules are in place if AffinityRequired: 'true' labels are set on Pods.`,", + "\t\tAffinityRequiredRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestAffinityRequiredPodsDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestStorageProvisioner = AddCatalogEntry(", + "\t\t\"storage-provisioner\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Checks that pods do not place persistent volumes on local storage in multinode clusters. Local storage is recommended for single node clusters, but only one type of local storage should be installed (lvms or noprovisioner).`,", + "\t\tCheckStorageProvisionerRemediation,", + "\t\tNoExceptions,", + "\t\tTestStorageProvisionerDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestContainerPostStartIdentifier = AddCatalogEntry(", + "\t\t\"container-poststart\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensure that the containers lifecycle postStart management feature is configured. A container must receive important events from the platform and conform/react to these events properly. For example, a container should catch SIGTERM or SIGKILL from the platform and shutdown as quickly as possible. Other typically important events from the platform are PostStart to initialize before servicing requests and PreStop to release resources cleanly before shutting down.`, //nolint:lll", + "\t\t`PostStart is normally used to configure the container, set up dependencies, and record the new creation. You could use this event to check that a required API is available before the container’s main work begins. Kubernetes will not change the container’s state to Running until the PostStart script has executed successfully. For details, see https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. PostStart is used to configure container, set up dependencies, record new creation. It can also be used to check that a required API is available before the container’s work begins.`, //nolint:lll", + "\t\tContainerPostStartIdentifierRemediation,", + "\t\tTestContainerPostStartIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestContainerPrestopIdentifier = AddCatalogEntry(", + "\t\t\"container-prestop\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensure that the containers lifecycle preStop management feature is configured. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. When pods are shut down by the platform they are sent a SIGTERM signal which means that the process in the container should start shutting down, closing connections and stopping all activity. If the pod doesn’t shut down within the default 30 seconds then the platform may send a SIGKILL signal which will stop the pod immediately. This method isn’t as clean and the default time between the SIGTERM and SIGKILL messages can be modified based on the requirements of the application. Containers should respond to SIGTERM/SIGKILL with graceful shutdown.`, //nolint:lll", + "\t\t`The preStop can be used to gracefully stop the container and clean resources (e.g., DB connection). For details, see https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. All pods must respond to SIGTERM signal and shutdown gracefully with a zero exit code.`, //nolint:lll", + "\t\tContainerPrestopIdentifierRemediation,", + "\t\tTestContainerPrestopIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestDpdkCPUPinningExecProbe = AddCatalogEntry(", + "\t\t\"dpdk-cpu-pinning-exec-probe\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`If a workload is doing CPU pinning, exec probes may not be used.`,", + "\t\tDpdkCPUPinningExecProbeRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestDpdkCPUPinningExecProbeDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestNetAdminIdentifier = AddCatalogEntry(", + "\t\t\"net-admin-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use NET_ADMIN capability. `+iptablesNftablesImplicitCheck,", + "\t\tSecConRemediation,", + "\t\t`Exception will be considered for user plane or networking functions (e.g. SR-IOV, Multicast). Must identify which container requires the capability and detail why.`,", + "\t\tTestNetAdminIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestSysAdminIdentifier = AddCatalogEntry(", + "\t\t\"sys-admin-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use SYS_ADMIN capability`,", + "\t\tSecConRemediation+\" Containers should not use the SYS_ADMIN Linux capability.\",", + "\t\tNoExceptions,", + "\t\tTestSysAdminIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestIpcLockIdentifier = AddCatalogEntry(", + "\t\t\"ipc-lock-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use IPC_LOCK capability. Workloads should avoid accessing host resources - spec.HostIpc should be false.`,", + "\t\tSecConRemediation,", + "\t\t`Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why.`,", + "\t\tTestIpcLockIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestNetRawIdentifier = AddCatalogEntry(", + "\t\t\"net-raw-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use NET_RAW capability. `+iptablesNftablesImplicitCheck,", + "\t\tSecConRemediation,", + "\t\t`Exception will be considered for user plane or networking functions. Must identify which container requires the capability and detail why.`,", + "\t\tTestNetRawIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestBpfIdentifier = AddCatalogEntry(", + "\t\t\"bpf-capability-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that containers do not use BPF capability. Workloads should avoid loading eBPF filters`,", + "\t\tBpfCapabilityRemediation,", + "\t\t`Exception can be considered. Must identify which container requires the capability and detail why.`,", + "\t\tTestBpfIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestExclusiveCPUPoolIdentifier = AddCatalogEntry(", + "\t\t\"exclusive-cpu-pool\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that if one container in a Pod selects an exclusive CPU pool the rest select the same type of CPU pool`,", + "\t\tExclusiveCPUPoolRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestExclusiveCPUPoolIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestSharedCPUPoolSchedulingPolicy = AddCatalogEntry(", + "\t\t\"shared-cpu-pool-non-rt-scheduling-policy\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that if application workload runs in shared CPU pool, it chooses non-RT CPU schedule policy to always share the CPU with other applications and kernel threads.`,", + "\t\tSharedCPUPoolSchedulingPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestSharedCPUPoolSchedulingPolicyDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestExclusiveCPUPoolSchedulingPolicy = AddCatalogEntry(", + "\t\t\"exclusive-cpu-pool-rt-scheduling-policy\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that if application workload runs in exclusive CPU pool, it chooses RT CPU schedule policy and set the priority less than 10.`,", + "\t\tExclusiveCPUPoolSchedulingPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestExclusiveCPUPoolSchedulingPolicyDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestIsolatedCPUPoolSchedulingPolicy = AddCatalogEntry(", + "\t\t\"isolated-cpu-pool-rt-scheduling-policy\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that a workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy`,", + "\t\tIsolatedCPUPoolSchedulingPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestIsolatedCPUPoolSchedulingPolicyDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestRtAppNoExecProbes = AddCatalogEntry(", + "\t\t\"rt-apps-no-exec-probes\",", + "\t\tcommon.PerformanceTestKey,", + "\t\t`Ensures that if one container runs a real time application exec probes are not used`,", + "\t\tRtAppNoExecProbesRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestRtAppNoExecProbesDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestRestartOnRebootLabelOnPodsUsingSRIOV = AddCatalogEntry(", + "\t\t\"restart-on-reboot-sriov-pod\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Ensures that the label restart-on-reboot exists on pods that use SRIOV network interfaces.`,", + "\t\tSRIOVPodsRestartOnRebootLabelRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestRestartOnRebootLabelOnPodsUsingSRIOVDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestNetworkAttachmentDefinitionSRIOVUsingMTU = AddCatalogEntry(", + "\t\t\"network-attachment-definition-sriov-mtu\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Ensures that MTU values are set correctly in NetworkAttachmentDefinitions for SRIOV network interfaces.`,", + "\t\tSRIOVNetworkAttachmentDefinitionMTURemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestNetworkAttachmentDefinitionSRIOVUsingMTUDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagFarEdge)", + "", + "\tTestSecConNonRootUserIDIdentifier = AddCatalogEntry(", + "\t\t\"security-context-non-root-user-id-check\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks securityContext's runAsNonRoot and runAsUser fields at pod and container level to make sure containers are not run as root.`,", + "\t\tSecConRunAsNonRootUserRemediation,", + "\t\tSecConNonRootUserExceptionProcess,", + "\t\tTestSecConNonRootUserIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSecConReadOnlyFilesystem = AddCatalogEntry(", + "\t\t\"security-context-read-only-file-system\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks the security context readOnlyFileSystem in containers is enabled. Containers should not try modify its own filesystem.`,", + "\t\tSecConNonRootUserExceptionProcess,", + "\t\tNoExceptions,", + "\t\tTestSecContextIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSecContextIdentifier = AddCatalogEntry(", + "\t\t\"security-context\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks the security context matches one of the 4 categories`,", + "\t\t`Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and document why. If the container had the right configuration of the allowed category from the 4 approved list then the test will pass. The 4 categories are defined in Requirement ID 94118 [here](#security-context-categories)`, //nolint:lll", + "\t\t`no exception needed for optional/extended test`,", + "\t\tTestSecContextIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestSecConPrivilegeEscalation = AddCatalogEntry(", + "\t\t\"security-context-privilege-escalation\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks if privileged escalation is enabled (AllowPrivilegeEscalation=true).`,", + "\t\tSecConPrivilegeRemediation,", + "\t\tNoExceptions,", + "\t\tTestSecConPrivilegeEscalationDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestContainerHostPort = AddCatalogEntry(", + "\t\t\"container-host-port\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies if containers define a hostPort.`,", + "\t\tContainerHostPortRemediation,", + "\t\t\"Exception for host resource access tests will only be considered in rare cases where it is absolutely needed\",", + "\t\tTestContainerHostPortDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHostNetwork = AddCatalogEntry(", + "\t\t\"pod-host-network\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies that the spec.HostNetwork parameter is not set (not present)`,", + "\t\tPodHostNetworkRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestPodHostNetworkDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHostPath = AddCatalogEntry(", + "\t\t\"pod-host-path\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies that the spec.HostPath parameter is not set (not present)`,", + "\t\tPodHostPathRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestPodHostPathDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHostIPC = AddCatalogEntry(", + "\t\t\"pod-host-ipc\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies that the spec.HostIpc parameter is set to false`,", + "\t\tPodHostIPCRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestPodHostIPCDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodHostPID = AddCatalogEntry(", + "\t\t\"pod-host-pid\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Verifies that the spec.HostPid parameter is set to false`,", + "\t\tPodHostPIDRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestPodHostPIDDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestHugepagesNotManuallyManipulated = AddCatalogEntry(", + "\t\t\"hugepages-config\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Checks to see that HugePage settings have been configured through MachineConfig, and not manually on the underlying Node. This test case applies only to Nodes that are configured with the \"worker\" MachineConfigSet. First, the \"worker\" MachineConfig is polled, and the Hugepage settings are extracted. Next, the underlying Nodes are polled for configured HugePages through inspection of /proc/meminfo. The results are compared, and the test passes only if they are the same.`, //nolint:lll", + "\t\tHugepagesNotManuallyManipulatedRemediation,", + "\t\tNoExceptions,", + "\t\tTestHugepagesNotManuallyManipulatedDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestICMPv4ConnectivityIdentifier = AddCatalogEntry(", + "\t\t\"icmpv4-connectivity\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that each workload Container is able to communicate via ICMPv4 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.`, //nolint:lll", + "\t\t`Ensure that the workload is able to communicate via the Default OpenShift network. In some rare cases, workloads may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence.`, //nolint:lll", + "\t\t`No exceptions - must be able to communicate on default network using IPv4`,", + "\t\tTestICMPv4ConnectivityIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestICMPv6ConnectivityIdentifier = AddCatalogEntry(", + "\t\t\"icmpv6-connectivity\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that each workload Container is able to communicate via ICMPv6 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.`, //nolint:lll", + "\t\tICMPv6ConnectivityRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestICMPv6ConnectivityIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestICMPv4ConnectivityMultusIdentifier = AddCatalogEntry(", + "\t\t\"icmpv4-connectivity-multus\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that each workload Container is able to communicate via ICMPv4 on the Multus network(s). This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.`, //nolint:lll", + "\t\tICMPv4ConnectivityMultusRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestICMPv4ConnectivityMultusIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestICMPv6ConnectivityMultusIdentifier = AddCatalogEntry(", + "\t\t\"icmpv6-connectivity-multus\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that each workload Container is able to communicate via ICMPv6 on the Multus network(s). This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.`, //nolint:lll", + "\t\tICMPv6ConnectivityMultusRemediation+` Not applicable if IPv6/MULTUS is not supported.`,", + "\t\tNoDocumentedProcess,", + "\t\tTestICMPv6ConnectivityMultusIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestServiceDualStackIdentifier = AddCatalogEntry(", + "\t\t\"dual-stack-service\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Checks that all services in namespaces under test are either ipv6 single stack or dual stack. This test case requires the deployment of the probe daemonset.`,", + "\t\tTestServiceDualStackRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestServiceDualStackIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestNamespaceBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"namespace\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Tests that all workload resources (PUTs and CRs) belong to valid namespaces. A valid namespace meets", + "the following conditions: (1) It was declared in the yaml config file under the targetNameSpaces", + "tag. (2) It does not have any of the following prefixes: default, openshift-, istio- and aspenmesh-`,", + "\t\tNamespaceBestPracticesRemediation,", + "\t\tNoExceptions,", + "\t\tTestNamespaceBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestNonTaintedNodeKernelsIdentifier = AddCatalogEntry(", + "\t\t\"tainted-node-kernel\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Ensures that the Node(s) hosting workloads do not utilize tainted kernels. This test case is especially", + "important to support Highly Available workloads, since when a workload is re-instantiated on a backup Node,", + "that Node's kernel may not have the same hacks.'`,", + "\t\tNonTaintedNodeKernelsRemediation,", + "\t\t`If taint is necessary, document details of the taint and why it's needed by workload or environment.`,", + "\t\tTestNonTaintedNodeKernelsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorInstallStatusSucceededIdentifier = AddCatalogEntry(", + "\t\t\"install-status-succeeded\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Ensures that the target workload operators report \"Succeeded\" as their installation status.`,", + "\t\tOperatorInstallStatusSucceededRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorInstallStatusSucceededIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorNoSCCAccess = AddCatalogEntry(", + "\t\t\"install-status-no-privileges\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Checks whether the operator needs access to Security Context Constraints. Test passes if clusterPermissions is not present in the CSV manifest or is present with no RBAC rules related to SCCs.`,", + "\t\tOperatorNoPrivilegesRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorNoPrivilegesDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorIsCertifiedIdentifier = AddCatalogEntry(", + "\t\t\"operator-is-certified\",", + "\t\tcommon.AffiliatedCertTestKey,", + "\t\t`Tests whether the workload Operators listed in the configuration file have passed the Red Hat Operator Certification Program (OCP).`,", + "\t\tOperatorIsCertifiedRemediation,", + "\t\tAffiliatedCert,", + "\t\tTestOperatorIsCertifiedIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestHelmIsCertifiedIdentifier = AddCatalogEntry(", + "\t\t\"helmchart-is-certified\",", + "\t\tcommon.AffiliatedCertTestKey,", + "\t\t`Tests whether helm charts listed in the cluster passed the Red Hat Helm Certification Program.`,", + "\t\tHelmIsCertifiedRemediation,", + "\t\tAffiliatedCert,", + "\t\tTestHelmIsCertifiedIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorIsInstalledViaOLMIdentifier = AddCatalogEntry(", + "\t\t\"install-source\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether a workload Operator is installed via OLM.`,", + "\t\tOperatorIsInstalledViaOLMRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorIsInstalledViaOLMIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace = AddCatalogEntry(", + "\t\t\"single-or-multi-namespaced-allowed-in-tenant-namespaces\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Verifies that only single/multi namespaced operators are installed in a tenant-dedicated namespace. The test fails if this namespace contains any installed operator with Own/All-namespaced install mode, unlabeled operators, operands of any operator installed elsewhere, or pods unrelated to any operator.`, //nolint:lll", + "\t\tSingleOrMultiNamespacedOperatorInstallationInTenantNamespaceRemediation,", + "\t\tNoExceptions,", + "\t\tTestSingleOrMultiNamespacedOperatorInstallationInTenantNamespaceDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestOperatorHasSemanticVersioningIdentifier = AddCatalogEntry(", + "\t\t\"semantic-versioning\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether an application Operator has a valid semantic versioning.`,", + "\t\tOperatorHasSemanticVersioningRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorHasSemanticVersioningIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorOlmSkipRange = AddCatalogEntry(", + "\t\t\"olm-skip-range\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Test that checks the operator has a valid olm skip range.`,", + "\t\tOperatorOlmSkipRangeRemediation,", + "\t\tOperatorSkipRangeExceptionProcess,", + "\t\tTestOperatorOlmSkipRangeDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorCrdVersioningIdentifier = AddCatalogEntry(", + "\t\t\"crd-versioning\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether the Operator CRD has a valid versioning.`,", + "\t\tOperatorCrdVersioningRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorCrdVersioningIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorCrdSchemaIdentifier = AddCatalogEntry(", + "\t\t\"crd-openapi-schema\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether an application Operator CRD is defined with OpenAPI spec.`,", + "\t\tOperatorCrdSchemaIdentifierRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorCrdSchemaIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorSingleCrdOwnerIdentifier = AddCatalogEntry(", + "\t\t\"single-crd-owner\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether a CRD is owned by a single Operator.`,", + "\t\tOperatorSingleCrdOwnerRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorSingleCrdOwnerIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorPodsNoHugepages = AddCatalogEntry(", + "\t\t\"pods-no-hugepages\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests that the pods do not have hugepages enabled.`,", + "\t\tOperatorPodsNoHugepagesRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorPodsNoHugepagesDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestOperatorCatalogSourceBundleCountIdentifier = AddCatalogEntry(", + "\t\t\"catalogsource-bundle-count\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests operator catalog source bundle count is less than 1000`,", + "\t\tOperatorCatalogSourceBundleCountRemediation,", + "\t\tNoExceptions,", + "\t\tTestOperatorCatalogSourceBundleCountIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestMultipleSameOperatorsIdentifier = AddCatalogEntry(", + "\t\t\"multiple-same-operators\",", + "\t\tcommon.OperatorTestKey,", + "\t\t`Tests whether multiple instances of the same Operator CSV are installed.`,", + "\t\tMultipleSameOperatorsRemediation,", + "\t\tNoExceptions,", + "\t\tTestMultipleSameOperatorsIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodNodeSelectorAndAffinityBestPractices = AddCatalogEntry(", + "\t\t\"pod-scheduling\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensures that workload Pods do not specify nodeSelector or nodeAffinity. In most cases, Pods should allow for instantiation on any underlying Node. Workloads shall not use node selectors nor taints/tolerations to assign pod location.`,", + "\t\tPodNodeSelectorAndAffinityBestPracticesRemediation,", + "\t\t`Exception will only be considered if application requires specialized hardware. Must specify which container requires special hardware and why.`,", + "\t\tTestPodNodeSelectorAndAffinityBestPracticesDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPodHighAvailabilityBestPractices = AddCatalogEntry(", + "\t\t\"pod-high-availability\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensures that workloads Pods specify podAntiAffinity rules and replica value is set to more than 1.`,", + "\t\tPodHighAvailabilityBestPracticesRemediation,", + "\t\tNoDocumentedProcess+NotApplicableSNO,", + "\t\tTestPodHighAvailabilityBestPracticesDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodClusterRoleBindingsBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"cluster-role-bindings\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Tests that a Pod does not specify ClusterRoleBindings.`,", + "\t\tPodClusterRoleBindingsBestPracticesRemediation,", + "\t\t\"Exception possible only for workloads that's cluster wide in nature and absolutely needs cluster level roles \u0026 role bindings\",", + "\t\tTestPodClusterRoleBindingsBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPodDeploymentBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"pod-owner-type\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that the workload Pods are deployed as part of a ReplicaSet(s)/StatefulSet(s).`,", + "\t\tPodDeploymentBestPracticesRemediation,", + "\t\tNoDocumentedProcess+` Pods should not be deployed as DaemonSet or naked pods.`,", + "\t\tTestPodDeploymentBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestDeploymentScalingIdentifier = AddCatalogEntry(", + "\t\t\"deployment-scaling\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that workload deployments support scale in/out operations. First, the test starts getting the current replicaCount (N) of the deployment/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the deployment/s. In case of deployments that are managed by HPA the test is changing the min and max value to deployment Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the deployment/s`, //nolint:lll", + "\t\tDeploymentScalingRemediation,", + "\t\tNoDocumentedProcess+NotApplicableSNO,", + "\t\tTestDeploymentScalingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestStatefulSetScalingIdentifier = AddCatalogEntry(", + "\t\t\"statefulset-scaling\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that workload statefulsets support scale in/out operations. First, the test starts getting the current replicaCount (N) of the statefulset/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the statefulset/s. In case of statefulsets that are managed by HPA the test is changing the min and max value to statefulset Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the statefulset/s`, //nolint:lll", + "\t\tStatefulSetScalingRemediation,", + "\t\tNoDocumentedProcess+NotApplicableSNO,", + "\t\tTestStatefulSetScalingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestImagePullPolicyIdentifier = AddCatalogEntry(", + "\t\t\"image-pull-policy\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Ensure that the containers under test are using IfNotPresent as Image Pull Policy. If there is a situation where the container dies and needs to be restarted, the image pull policy becomes important. PullIfNotPresent is recommended so that a loss of image registry access does not prevent the pod from restarting.`, //nolint:lll", + "\t\tImagePullPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestImagePullPolicyIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPodRecreationIdentifier = AddCatalogEntry(", + "\t\t\"pod-recreation\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that a workload is configured to support High Availability. First, this test cordons and drains a Node that hosts the workload Pod. Next, the test ensures that OpenShift can re-instantiate the Pod on another Node, and that the actual replica count matches the desired replica count.`, //nolint:lll", + "\t\tPodRecreationRemediation,", + "\t\t`No exceptions - workloads should be able to be restarted/recreated.`,", + "\t\tTestPodRecreationIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodRoleBindingsBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"pod-role-bindings\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Ensures that a workload does not utilize RoleBinding(s) in a non-workload Namespace.`,", + "\t\tPodRoleBindingsBestPracticesRemediation,", + "\t\tNoExceptions,", + "\t\tTestPodRoleBindingsBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodServiceAccountBestPracticesIdentifier = AddCatalogEntry(", + "\t\t\"pod-service-account\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Tests that each workload Pod utilizes a valid Service Account. Default or empty service account is not valid.`,", + "\t\tPodServiceAccountBestPracticesRemediation,", + "\t\tNoExceptions,", + "\t\tTestPodServiceAccountBestPracticesIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodAutomountServiceAccountIdentifier = AddCatalogEntry(", + "\t\t\"pod-automount-service-account-token\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that all pods under test have automountServiceAccountToken set to false. Only pods that require access to the kubernetes API server should have automountServiceAccountToken set to true`,", + "\t\tAutomountServiceTokenRemediation,", + "\t\t`Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used.`,", + "\t\tTestPodAutomountServiceAccountIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestServicesDoNotUseNodeportsIdentifier = AddCatalogEntry(", + "\t\t\"service-type\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Tests that each workload Service does not utilize NodePort(s).`,", + "\t\tServicesDoNotUseNodeportsRemediation,", + "\t\t`Exception for host resource access tests will only be considered in rare cases where it is absolutely needed`,", + "\t\tTestServicesDoNotUseNodeportsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestUnalteredBaseImageIdentifier = AddCatalogEntry(", + "\t\t\"base-image\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Ensures that the Container Base Image is not altered post-startup. This test is a heuristic, and ensures that there are no changes to the following directories: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64`, //nolint:lll", + "\t\tUnalteredBaseImageRemediation,", + "\t\tNoExceptions,", + "\t\tTestUnalteredBaseImageIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestUnalteredStartupBootParamsIdentifier = AddCatalogEntry(", + "\t\t\"boot-params\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that boot parameters are set through the MachineConfigOperator, and not set manually on the Node.`,", + "\t\tUnalteredStartupBootParamsRemediation,", + "\t\tNoExceptions,", + "\t\tTestUnalteredStartupBootParamsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestLoggingIdentifier = AddCatalogEntry(", + "\t\t\"container-logging\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Check that all containers under test use standard input output and standard error when logging. A container must provide APIs for the platform to observe the container health and act accordingly. These APIs include health checks (liveness and readiness), logging to stderr and stdout for log aggregation (by tools such as Logstash or Filebeat), and integrate with tracing and metrics-gathering libraries (such as Prometheus or Metricbeat).`, //nolint:lll", + "\t\tLoggingRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestLoggingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestTerminationMessagePolicyIdentifier = AddCatalogEntry(", + "\t\t\"termination-policy\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Check that all containers are using terminationMessagePolicy: FallbackToLogsOnError. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.`, //nolint:lll", + "\t\tTerminationMessagePolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestTerminationMessagePolicyIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestCrdsStatusSubresourceIdentifier = AddCatalogEntry(", + "\t\t\"crd-status\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Checks that all CRDs have a status sub-resource specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”]).`,", + "\t\tCrdsStatusSubresourceRemediation,", + "\t\tNoExceptions,", + "\t\tTestCrdsStatusSubresourceIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSysctlConfigsIdentifier = AddCatalogEntry(", + "\t\t\"sysctl-config\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that no one has changed the node's sysctl configs after the node was created, the tests works by checking if the sysctl configs are consistent with the MachineConfig CR which defines how the node should be configured`,", + "\t\tSysctlConfigsRemediation,", + "\t\tNoExceptions,", + "\t\tTestSysctlConfigsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestServiceMeshIdentifier = AddCatalogEntry(", + "\t\t\"service-mesh-usage\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Checks if the istio namespace (\"istio-system\") is present. If it is present, checks that the istio sidecar is present in all pods under test.`,", + "\t\tServiceMeshRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestServiceMeshIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestOCPLifecycleIdentifier = AddCatalogEntry(", + "\t\t\"ocp-lifecycle\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that the running OCP version is not end of life.`,", + "\t\tOCPLifecycleRemediation,", + "\t\tNoExceptions,", + "\t\tTestOCPLifecycleIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestNodeOperatingSystemIdentifier = AddCatalogEntry(", + "\t\t\"ocp-node-os-lifecycle\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that the nodes running in the cluster have operating systems that are compatible with the deployed version of OpenShift.`,", + "\t\tNodeOperatingSystemRemediation,", + "\t\tNoExceptions,", + "\t\tTestNodeOperatingSystemIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestIsRedHatReleaseIdentifier = AddCatalogEntry(", + "\t\t\"isredhat-release\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`verifies if the container base image is redhat.`,", + "\t\tIsRedHatReleaseRemediation,", + "\t\tNoExceptions,", + "\t\tTestIsRedHatReleaseIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestClusterOperatorHealth = AddCatalogEntry(", + "\t\t\"cluster-operator-health\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`Tests that all cluster operators are healthy.`,", + "\t\tClusterOperatorHealthRemediation,", + "\t\tNoExceptions,", + "\t\tTestClusterOperatorHealthDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestIsSELinuxEnforcingIdentifier = AddCatalogEntry(", + "\t\t\"is-selinux-enforcing\",", + "\t\tcommon.PlatformAlterationTestKey,", + "\t\t`verifies that all openshift platform/cluster nodes have selinux in \"Enforcing\" mode.`,", + "\t\tIsSELinuxEnforcingRemediation,", + "\t\tNoExceptions,", + "\t\tTestIsSELinuxEnforcingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestUndeclaredContainerPortsUsage = AddCatalogEntry(", + "\t\t\"undeclared-container-ports-usage\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Check that containers do not listen on ports that weren't declared in their specification. Platforms may be configured to block undeclared ports.`,", + "\t\tUndeclaredContainerPortsRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestUndeclaredContainerPortsUsageDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestOCPReservedPortsUsage = AddCatalogEntry(", + "\t\t\"ocp-reserved-ports-usage\",", + "\t\tcommon.NetworkingTestKey,", + "\t\t`Check that containers do not listen on ports that are reserved by OpenShift`,", + "\t\tOCPReservedPortsUsageRemediation,", + "\t\tNoExceptions,", + "\t\tTestOCPReservedPortsUsageDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestLivenessProbeIdentifier = AddCatalogEntry(", + "\t\t\"liveness-probe\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that all containers under test have liveness probe defined. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. When starting up, health probes like liveness and readiness checks can be put into place to ensure the application is functioning properly.`, //nolint:lll", + "\t\tLivenessProbeRemediation+` workloads shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum.`, //nolint:lll", + "\t\tNoDocumentedProcess,", + "\t\tTestLivenessProbeIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestReadinessProbeIdentifier = AddCatalogEntry(", + "\t\t\"readiness-probe\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that all containers under test have readiness probe defined. There are different ways a pod can stop on on OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.`, //nolint:lll", + "\t\tReadinessProbeRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestReadinessProbeIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestStartupProbeIdentifier = AddCatalogEntry(", + "\t\t\"startup-probe\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that all containers under test have startup probe defined. Workloads shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum.`, //nolint:lll", + "\t\tStartupProbeRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestStartupProbeIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestOneProcessPerContainerIdentifier = AddCatalogEntry(", + "\t\t\"one-process-per-container\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that all containers under test have only one process running`,", + "\t\tOneProcessPerContainerRemediation,", + "\t\tNoExceptionProcessForExtendedTests+NotApplicableSNO,", + "\t\tTestOneProcessPerContainerIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestSYSNiceRealtimeCapabilityIdentifier = AddCatalogEntry(", + "\t\t\"sys-nice-realtime-capability\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that pods running on nodes with realtime kernel enabled have the SYS_NICE capability enabled in their spec. In the case that a workolad is running on a node using the real-time kernel, SYS_NICE will be used to allow DPDK application to switch to SCHED_FIFO.`, //nolint:lll", + "\t\tSYSNiceRealtimeCapabilityRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestSYSNiceRealtimeCapabilityIdentifierDocLink,", + "\t\tfalse,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestSysPtraceCapabilityIdentifier = AddCatalogEntry(", + "\t\t\"sys-ptrace-capability\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that if process namespace sharing is enabled for a Pod then the SYS_PTRACE capability is allowed. This capability is required when using Process Namespace Sharing. This is used when processes from one Container need to be exposed to another Container. For example, to send signals like SIGHUP from a process in a Container to another process in another Container. For more information on these capabilities refer to https://cloud.redhat.com/blog/linux-capabilities-in-openshift and https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/`, //nolint:lll", + "\t\tSysPtraceCapabilityRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestSysPtraceCapabilityIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPodRequestsIdentifier = AddCatalogEntry(", + "\t\t\"requests\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that containers have resource requests specified in their spec. Set proper resource requests based on container use case.`,", + "\t\tRequestsRemediation,", + "\t\tRequestsExceptionProcess,", + "\t\tTestPodRequestsIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestNamespaceResourceQuotaIdentifier = AddCatalogEntry(", + "\t\t\"namespace-resource-quota\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Checks to see if workload pods are running in namespaces that have resource quotas applied.`,", + "\t\tNamespaceResourceQuotaRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestNamespaceResourceQuotaIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestPodDisruptionBudgetIdentifier = AddCatalogEntry(", + "\t\t\"pod-disruption-budget\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Checks to see if pod disruption budgets have allowed values for minAvailable and maxUnavailable`,", + "\t\tPodDisruptionBudgetRemediation,", + "\t\tNoExceptions,", + "\t\tTestPodDisruptionBudgetIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestAPICompatibilityWithNextOCPReleaseIdentifier = AddCatalogEntry(", + "\t\t\"compatibility-with-next-ocp-release\",", + "\t\tcommon.ObservabilityTestKey,", + "\t\t`Checks to ensure if the APIs the workload uses are compatible with the next OCP version`,", + "\t\tAPICompatibilityWithNextOCPReleaseRemediation,", + "\t\tNoExceptions,", + "\t\tTestAPICompatibilityWithNextOCPReleaseIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagCommon)", + "", + "\tTestPodTolerationBypassIdentifier = AddCatalogEntry(", + "\t\t\"pod-toleration-bypass\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that pods do not have NoExecute, PreferNoSchedule, or NoSchedule tolerations that have been modified from the default.`,", + "\t\tPodTolerationBypassRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestPodTolerationBypassIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestPersistentVolumeReclaimPolicyIdentifier = AddCatalogEntry(", + "\t\t\"persistent-volume-reclaim-policy\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Check that the persistent volumes the workloads pods are using have a reclaim policy of delete. Network Functions should clear persistent storage by deleting their PVs when removing their application from a cluster.`,", + "\t\tPersistentVolumeReclaimPolicyRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestPersistentVolumeReclaimPolicyIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestContainersImageTag = AddCatalogEntry(", + "\t\t\"containers-image-tag\",", + "\t\tcommon.ManageabilityTestKey,", + "\t\t`Check that image tag exists on containers.`,", + "\t\tContainersImageTagRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestContainersImageTagDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Optional,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestNoSSHDaemonsAllowedIdentifier = AddCatalogEntry(", + "\t\t\"ssh-daemons\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t`Check that pods do not run SSH daemons.`,", + "\t\tNoSSHDaemonsAllowedRemediation,", + "\t\t`No exceptions - special consideration can be given to certain containers which run as utility tool daemon`,", + "\t\tTestNoSSHDaemonsAllowedIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestCPUIsolationIdentifier = AddCatalogEntry(", + "\t\t\"cpu-isolation\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`CPU isolation requires: For each container within the pod, resource requests and limits must be identical. If cpu requests and limits are not identical and in whole units (Guaranteed pods with exclusive cpus), your pods will not be tested for compliance. The runTimeClassName must be specified. Annotations required disabling CPU and IRQ load-balancing.`, //nolint:lll", + "\t\tCPUIsolationRemediation,", + "\t\tNoDocumentedProcess,", + "\t\tTestCPUIsolationIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Mandatory,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagTelco)", + "", + "\tTestContainerPortNameFormat = AddCatalogEntry(", + "\t\t\"container-port-name-format\",", + "\t\tcommon.ManageabilityTestKey,", + "\t\t\"Check that the container's ports name follow the naming conventions. Name field in ContainerPort section must be of form `\u003cprotocol\u003e[-\u003csuffix\u003e]`. More naming convention requirements may be released in future\",", + "\t\tContainerPortNameFormatRemediation,", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\tTestContainerPortNameFormatDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\tTestCrdScalingIdentifier = AddCatalogEntry(", + "\t\t\"crd-scaling\",", + "\t\tcommon.LifecycleTestKey,", + "\t\t`Tests that a workload's CRD support scale in/out operations. First, the test starts getting the current replicaCount (N) of the crd/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the crd/s. In case of crd that are managed by HPA the test is changing the min and max value to crd Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the crd/s`, //nolint:lll", + "\t\tCrdScalingRemediation,", + "\t\tNoDocumentedProcess+NotApplicableSNO,", + "\t\tTestCrdScalingIdentifierDocLink,", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Mandatory,", + "\t\t\tNonTelco: Mandatory,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagCommon,", + "\t)", + "", + "\tTestCrdRoleIdentifier = AddCatalogEntry(", + "\t\t\"crd-roles\",", + "\t\tcommon.AccessControlTestKey,", + "\t\t\"If an application creates CRDs it must supply a role to access those CRDs and no other API resources/permission. This test checks that there is at least one role present in each namespaces under test that only refers to CRDs under test.\",", + "\t\t\"Roles providing access to CRDs should not refer to any other api or resources. Change the generation of the CRD role accordingly\",", + "\t\tNoExceptionProcessForExtendedTests,", + "\t\t\"https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-custom-role-to-access-application-crds\",", + "\t\ttrue,", + "\t\tmap[string]string{", + "\t\t\tFarEdge: Optional,", + "\t\t\tTelco: Optional,", + "\t\t\tNonTelco: Optional,", + "\t\t\tExtended: Mandatory,", + "\t\t},", + "\t\tTagExtended)", + "", + "\t//nolint:gocritic", + "\t// TestPodDeleteIdentifier = AddCatalogEntry(", + "\t// \t\"pod-delete\",", + "\t// \tcommon.ChaosTesting,", + "\t// \t\"Chaos test suite is under construction.\",", + "\t// \t\"\",", + "\t// \tNoDocumentedProcess,", + "\t// \t\"\",", + "\t// \tfalse,", + "\t// \tmap[string]string{", + "\t// \t\tFarEdge: Optional,", + "\t// \t\tTelco: Optional,", + "\t// \t\tNonTelco: Optional,", + "\t// \t\tExtended: Optional,", + "\t// \t},", + "\t// \tTagCommon)", + "", + "\treturn Catalog", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func init() {", + "\tInitCatalog()", + "}" + ] + } + ], + "globals": [ + { + "name": "Catalog", + "exported": true, + "type": "", + "doc": "Catalog is the JUnit testcase catalog of tests.", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:1870" + }, + { + "name": "Classification", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:1871" + }, + { + "name": "ImpactMap", + "exported": true, + "type": "", + "doc": "ImpactMap maps test IDs to their impact statements", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:166" + }, + { + "name": "Test1337UIDIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:94" + }, + { + "name": "TestAPICompatibilityWithNextOCPReleaseIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:185" + }, + { + "name": "TestAffinityRequiredPods", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:102" + }, + { + "name": "TestBpfIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:110" + }, + { + "name": "TestCPUIsolationIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:190" + }, + { + "name": "TestClusterOperatorHealth", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:99" + }, + { + "name": "TestContainerHostPort", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:122" + }, + { + "name": "TestContainerIsCertifiedDigestIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:95" + }, + { + "name": "TestContainerPortNameFormat", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:191" + }, + { + "name": "TestContainerPostStartIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:103" + }, + { + "name": "TestContainerPrestopIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:104" + }, + { + "name": "TestContainersImageTag", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:188" + }, + { + "name": "TestCrdRoleIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:193" + }, + { + "name": "TestCrdScalingIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:192" + }, + { + "name": "TestCrdsStatusSubresourceIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:167" + }, + { + "name": "TestDeploymentScalingIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:155" + }, + { + "name": "TestDpdkCPUPinningExecProbe", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:105" + }, + { + "name": "TestExclusiveCPUPoolIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:112" + }, + { + "name": "TestExclusiveCPUPoolSchedulingPolicy", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:114" + }, + { + "name": "TestHelmIsCertifiedIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:137" + }, + { + "name": "TestHelmVersionIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:96" + }, + { + "name": "TestHugepagesNotManuallyManipulated", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:127" + }, + { + "name": "TestHyperThreadEnable", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:100" + }, + { + "name": "TestICMPv4ConnectivityIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:92" + }, + { + "name": "TestICMPv4ConnectivityMultusIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:129" + }, + { + "name": "TestICMPv6ConnectivityIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:128" + }, + { + "name": "TestICMPv6ConnectivityMultusIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:130" + }, + { + "name": "TestIDToClaimID", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:1853" + }, + { + "name": "TestImagePullPolicyIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:157" + }, + { + "name": "TestIpcLockIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:109" + }, + { + "name": "TestIsRedHatReleaseIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:172" + }, + { + "name": "TestIsSELinuxEnforcingIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:173" + }, + { + "name": "TestIsolatedCPUPoolSchedulingPolicy", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:115" + }, + { + "name": "TestLimitedUseOfExecProbesIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:194" + }, + { + "name": "TestLivenessProbeIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:176" + }, + { + "name": "TestLoggingIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:165" + }, + { + "name": "TestMultipleSameOperatorsIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:148" + }, + { + "name": "TestNamespaceBestPracticesIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:132" + }, + { + "name": "TestNamespaceResourceQuotaIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:183" + }, + { + "name": "TestNetAdminIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:107" + }, + { + "name": "TestNetRawIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:108" + }, + { + "name": "TestNetworkAttachmentDefinitionSRIOVUsingMTU", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:119" + }, + { + "name": "TestNetworkPolicyDenyAllIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:93" + }, + { + "name": "TestNoSSHDaemonsAllowedIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:189" + }, + { + "name": "TestNodeOperatingSystemIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:171" + }, + { + "name": "TestNonTaintedNodeKernelsIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:133" + }, + { + "name": "TestOCPLifecycleIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:170" + }, + { + "name": "TestOCPReservedPortsUsage", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:175" + }, + { + "name": "TestOneProcessPerContainerIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:179" + }, + { + "name": "TestOperatorAutomountTokens", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:142" + }, + { + "name": "TestOperatorCatalogSourceBundleCountIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:150" + }, + { + "name": "TestOperatorCrdSchemaIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:145" + }, + { + "name": "TestOperatorCrdVersioningIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:144" + }, + { + "name": "TestOperatorHasSemanticVersioningIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:139" + }, + { + "name": "TestOperatorInstallStatusSucceededIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:134" + }, + { + "name": "TestOperatorIsCertifiedIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:136" + }, + { + "name": "TestOperatorIsInstalledViaOLMIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:138" + }, + { + "name": "TestOperatorNoSCCAccess", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:135" + }, + { + "name": "TestOperatorOlmSkipRange", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:141" + }, + { + "name": "TestOperatorPodsNoHugepages", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:147" + }, + { + "name": "TestOperatorRunAsNonRoot", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:143" + }, + { + "name": "TestOperatorSingleCrdOwnerIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:146" + }, + { + "name": "TestPersistentVolumeReclaimPolicyIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:187" + }, + { + "name": "TestPodAutomountServiceAccountIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:161" + }, + { + "name": "TestPodClusterRoleBindingsBestPracticesIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:153" + }, + { + "name": "TestPodDeploymentBestPracticesIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:154" + }, + { + "name": "TestPodDisruptionBudgetIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:184" + }, + { + "name": "TestPodHighAvailabilityBestPractices", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:152" + }, + { + "name": "TestPodHostIPC", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:125" + }, + { + "name": "TestPodHostNetwork", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:123" + }, + { + "name": "TestPodHostPID", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:126" + }, + { + "name": "TestPodHostPath", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:124" + }, + { + "name": "TestPodHugePages1G", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:98" + }, + { + "name": "TestPodHugePages2M", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:97" + }, + { + "name": "TestPodNodeSelectorAndAffinityBestPractices", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:151" + }, + { + "name": "TestPodRecreationIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:158" + }, + { + "name": "TestPodRequestsIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:182" + }, + { + "name": "TestPodRoleBindingsBestPracticesIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:159" + }, + { + "name": "TestPodServiceAccountBestPracticesIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:160" + }, + { + "name": "TestPodTolerationBypassIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:186" + }, + { + "name": "TestReadinessProbeIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:177" + }, + { + "name": "TestReservedExtendedPartnerPorts", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:101" + }, + { + "name": "TestRestartOnRebootLabelOnPodsUsingSRIOV", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:117" + }, + { + "name": "TestRtAppNoExecProbes", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:116" + }, + { + "name": "TestSYSNiceRealtimeCapabilityIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:180" + }, + { + "name": "TestSecConNonRootUserIDIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:118" + }, + { + "name": "TestSecConPrivilegeEscalation", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:121" + }, + { + "name": "TestSecConReadOnlyFilesystem", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:140" + }, + { + "name": "TestSecContextIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:120" + }, + { + "name": "TestServiceDualStackIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:131" + }, + { + "name": "TestServiceMeshIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:169" + }, + { + "name": "TestServicesDoNotUseNodeportsIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:162" + }, + { + "name": "TestSharedCPUPoolSchedulingPolicy", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:113" + }, + { + "name": "TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:149" + }, + { + "name": "TestStartupProbeIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:178" + }, + { + "name": "TestStatefulSetScalingIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:156" + }, + { + "name": "TestStorageProvisioner", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:111" + }, + { + "name": "TestSysAdminIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:106" + }, + { + "name": "TestSysPtraceCapabilityIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:181" + }, + { + "name": "TestSysctlConfigsIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:168" + }, + { + "name": "TestTerminationMessagePolicyIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:166" + }, + { + "name": "TestUnalteredBaseImageIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:163" + }, + { + "name": "TestUnalteredStartupBootParamsIdentifier", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:164" + }, + { + "name": "TestUndeclaredContainerPortsUsage", + "exported": true, + "type": "claim.Identifier", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:174" + } + ], + "consts": [ + { + "name": "APICompatibilityWithNextOCPReleaseRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:166" + }, + { + "name": "AffiliatedCert", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/exceptions.go:51" + }, + { + "name": "AffinityRequiredRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:187" + }, + { + "name": "AutomountServiceTokenRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:21" + }, + { + "name": "BpfCapabilityRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:46" + }, + { + "name": "CPUIsolationRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:179" + }, + { + "name": "CheckStorageProvisionerRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:193" + }, + { + "name": "ClusterOperatorHealthRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:25" + }, + { + "name": "ContainerHostPortRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:52" + }, + { + "name": "ContainerIsCertifiedDigestRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:211" + }, + { + "name": "ContainerPortNameFormatRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:189" + }, + { + "name": "ContainerPostStartIdentifierRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:219" + }, + { + "name": "ContainerPrestopIdentifierRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:221" + }, + { + "name": "ContainersImageTagRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:173" + }, + { + "name": "CrdScalingRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:40" + }, + { + "name": "CrdsStatusSubresourceRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:140" + }, + { + "name": "DeploymentScalingRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:39" + }, + { + "name": "DocOperatorRequirement", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:106" + }, + { + "name": "DpdkCPUPinningExecProbeRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:191" + }, + { + "name": "ElaborateOnWhyItIsNeeded", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/exceptions.go:42" + }, + { + "name": "ExclusiveCPUPoolRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:195" + }, + { + "name": "ExclusiveCPUPoolSchedulingPolicyRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:199" + }, + { + "name": "Extended", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:45" + }, + { + "name": "FarEdge", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:42" + }, + { + "name": "HelmIsCertifiedRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:84" + }, + { + "name": "HelmVersionV3Remediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:209" + }, + { + "name": "HugepagesNotManuallyManipulatedRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:62" + }, + { + "name": "HyperThreadEnable", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:217" + }, + { + "name": "ICMPv4ConnectivityMultusRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:68" + }, + { + "name": "ICMPv4ConnectivityRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:64" + }, + { + "name": "ICMPv6ConnectivityMultusRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:70" + }, + { + "name": "ICMPv6ConnectivityRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:66" + }, + { + "name": "ImagePullPolicyRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:118" + }, + { + "name": "IsRedHatReleaseExceptionProcess", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/exceptions.go:29" + }, + { + "name": "IsRedHatReleaseRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:23" + }, + { + "name": "IsSELinuxEnforcingRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:136" + }, + { + "name": "IsolatedCPUPoolSchedulingPolicyRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:201" + }, + { + "name": "LimitedUseOfExecProbesRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:183" + }, + { + "name": "LivenessProbeRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:146" + }, + { + "name": "LoggingRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:142" + }, + { + "name": "Mandatory", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:47" + }, + { + "name": "MultipleSameOperatorsRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:106" + }, + { + "name": "NamespaceBestPracticesRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:74" + }, + { + "name": "NamespaceResourceQuotaRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:162" + }, + { + "name": "NetworkPolicyDenyAllRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:177" + }, + { + "name": "NoDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:8" + }, + { + "name": "NoDocLinkExtended", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:5" + }, + { + "name": "NoDocLinkFarEdge", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:6" + }, + { + "name": "NoDocLinkTelco", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:7" + }, + { + "name": "NoDocumentedProcess", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/exceptions.go:39" + }, + { + "name": "NoExceptionProcessForExtendedTests", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/exceptions.go:45" + }, + { + "name": "NoExceptions", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/exceptions.go:48" + }, + { + "name": "NoSSHDaemonsAllowedRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:175" + }, + { + "name": "NodeOperatingSystemRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:27" + }, + { + "name": "NonTaintedNodeKernelsRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:76" + }, + { + "name": "NonTelco", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:44" + }, + { + "name": "NotApplicableSNO", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:52" + }, + { + "name": "OCPLifecycleRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:37" + }, + { + "name": "OCPReservedPortsUsageRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:158" + }, + { + "name": "OneProcessPerContainerRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:152" + }, + { + "name": "OperatorAutomountTokens", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:94" + }, + { + "name": "OperatorCatalogSourceBundleCountRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:104" + }, + { + "name": "OperatorCrdSchemaIdentifierRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:90" + }, + { + "name": "OperatorCrdVersioningRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:96" + }, + { + "name": "OperatorHasSemanticVersioningRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:88" + }, + { + "name": "OperatorInstallStatusSucceededRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:78" + }, + { + "name": "OperatorIsCertifiedRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:82" + }, + { + "name": "OperatorIsInstalledViaOLMRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:86" + }, + { + "name": "OperatorNoPrivilegesRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:80" + }, + { + "name": "OperatorOlmSkipRangeRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:98" + }, + { + "name": "OperatorPodsNoHugepagesRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:102" + }, + { + "name": "OperatorRunAsNonRoot", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:92" + }, + { + "name": "OperatorSingleCrdOwnerRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:100" + }, + { + "name": "OperatorSkipRangeExceptionProcess", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/exceptions.go:53" + }, + { + "name": "Optional", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:46" + }, + { + "name": "PersistentVolumeReclaimPolicyRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:171" + }, + { + "name": "PodClusterRoleBindingsBestPracticesRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:114" + }, + { + "name": "PodDeploymentBestPracticesRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:116" + }, + { + "name": "PodDisruptionBudgetRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:164" + }, + { + "name": "PodHighAvailabilityBestPracticesRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:112" + }, + { + "name": "PodHostIPCRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:58" + }, + { + "name": "PodHostNetworkRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:54" + }, + { + "name": "PodHostPIDRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:60" + }, + { + "name": "PodHostPathRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:56" + }, + { + "name": "PodHugePages1GRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:215" + }, + { + "name": "PodHugePages2MRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:213" + }, + { + "name": "PodNodeSelectorAndAffinityBestPracticesRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:110" + }, + { + "name": "PodRecreationRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:128" + }, + { + "name": "PodRoleBindingsBestPracticesRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:120" + }, + { + "name": "PodServiceAccountBestPracticesRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:122" + }, + { + "name": "PodTolerationBypassRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:169" + }, + { + "name": "PreflightAllImageRefsInRelatedImagesImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:145" + }, + { + "name": "PreflightBasedOnUbiImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:146" + }, + { + "name": "PreflightBundleImageRefsAreCertifiedImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:147" + }, + { + "name": "PreflightDeployableByOLMImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:148" + }, + { + "name": "PreflightFollowsRestrictedNetworkEnablementGuidelinesImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:149" + }, + { + "name": "PreflightHasLicenseImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:150" + }, + { + "name": "PreflightHasModifiedFilesImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:151" + }, + { + "name": "PreflightHasNoProhibitedPackagesImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:152" + }, + { + "name": "PreflightHasProhibitedContainerNameImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:153" + }, + { + "name": "PreflightHasRequiredLabelImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:154" + }, + { + "name": "PreflightHasUniqueTagImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:155" + }, + { + "name": "PreflightLayerCountAcceptableImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:156" + }, + { + "name": "PreflightRequiredAnnotationsImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:157" + }, + { + "name": "PreflightRunAsNonRootImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:158" + }, + { + "name": "PreflightScorecardBasicSpecCheckImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:159" + }, + { + "name": "PreflightScorecardOlmSuiteCheckImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:160" + }, + { + "name": "PreflightSecurityContextConstraintsInCSVImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:161" + }, + { + "name": "PreflightValidateOperatorBundleImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:162" + }, + { + "name": "ReadinessProbeRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:148" + }, + { + "name": "RequestsExceptionProcess", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/exceptions.go:56" + }, + { + "name": "RequestsRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:160" + }, + { + "name": "ReservedPartnerPortsRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:185" + }, + { + "name": "RtAppNoExecProbesRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:203" + }, + { + "name": "SRIOVNetworkAttachmentDefinitionMTURemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:207" + }, + { + "name": "SRIOVPodsRestartOnRebootLabelRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:205" + }, + { + "name": "SYSNiceRealtimeCapabilityRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:156" + }, + { + "name": "ScalingRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:134" + }, + { + "name": "SecConCapabilitiesExceptionProcess", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/exceptions.go:33" + }, + { + "name": "SecConCapabilitiesRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:44" + }, + { + "name": "SecConNonRootUserExceptionProcess", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/exceptions.go:31" + }, + { + "name": "SecConNonRootUserRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:29" + }, + { + "name": "SecConPrivilegeRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:48" + }, + { + "name": "SecConReadOnlyFilesystem", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:50" + }, + { + "name": "SecConRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:33" + }, + { + "name": "SecConRunAsNonRootUserRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:31" + }, + { + "name": "ServiceMeshRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:132" + }, + { + "name": "ServicesDoNotUseNodeportsRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:124" + }, + { + "name": "SharedCPUPoolSchedulingPolicyRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:197" + }, + { + "name": "SingleOrMultiNamespacedOperatorInstallationInTenantNamespaceRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:108" + }, + { + "name": "StartupProbeRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:150" + }, + { + "name": "StatefulSetScalingRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:42" + }, + { + "name": "SysPtraceCapabilityRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:154" + }, + { + "name": "SysctlConfigsRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:130" + }, + { + "name": "TagCommon", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:38" + }, + { + "name": "TagExtended", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:39" + }, + { + "name": "TagFarEdge", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:41" + }, + { + "name": "TagPreflight", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:48" + }, + { + "name": "TagTelco", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:40" + }, + { + "name": "Telco", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:43" + }, + { + "name": "TerminationMessagePolicyRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:144" + }, + { + "name": "Test1337UIDIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:26" + }, + { + "name": "Test1337UIDIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:42" + }, + { + "name": "TestAPICompatibilityWithNextOCPReleaseIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:130" + }, + { + "name": "TestAPICompatibilityWithNextOCPReleaseIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:138" + }, + { + "name": "TestAffinityRequiredPodsDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:78" + }, + { + "name": "TestAffinityRequiredPodsImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:93" + }, + { + "name": "TestBpfIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:31" + }, + { + "name": "TestBpfIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:47" + }, + { + "name": "TestCPUIsolationIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:95" + }, + { + "name": "TestCPUIsolationIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:109" + }, + { + "name": "TestClusterOperatorHealthDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:75" + }, + { + "name": "TestClusterOperatorHealthImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:88" + }, + { + "name": "TestContainerHostNetworkDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:36" + }, + { + "name": "TestContainerHostPortDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:35" + }, + { + "name": "TestContainerHostPortImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:51" + }, + { + "name": "TestContainerIsCertifiedDigestIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:57" + }, + { + "name": "TestContainerIsCertifiedDigestIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:72" + }, + { + "name": "TestContainerIsCertifiedIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:58" + }, + { + "name": "TestContainerPortNameFormatDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:134" + }, + { + "name": "TestContainerPortNameFormatImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:142" + }, + { + "name": "TestContainerPostStartIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:80" + }, + { + "name": "TestContainerPostStartIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:95" + }, + { + "name": "TestContainerPrestopIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:81" + }, + { + "name": "TestContainerPrestopIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:96" + }, + { + "name": "TestContainersImageTagDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:133" + }, + { + "name": "TestContainersImageTagImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:141" + }, + { + "name": "TestCrdScalingIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:96" + }, + { + "name": "TestCrdScalingIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:110" + }, + { + "name": "TestCrdsStatusSubresourceIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:128" + }, + { + "name": "TestCrdsStatusSubresourceIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:136" + }, + { + "name": "TestDeploymentScalingIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:85" + }, + { + "name": "TestDeploymentScalingIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:100" + }, + { + "name": "TestDpdkCPUPinningExecProbeDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:14" + }, + { + "name": "TestDpdkCPUPinningExecProbeImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:30" + }, + { + "name": "TestExclusiveCPUPoolIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:99" + }, + { + "name": "TestExclusiveCPUPoolIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:113" + }, + { + "name": "TestExclusiveCPUPoolSchedulingPolicyDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:101" + }, + { + "name": "TestExclusiveCPUPoolSchedulingPolicyImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:115" + }, + { + "name": "TestHelmIsCertifiedIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:59" + }, + { + "name": "TestHelmIsCertifiedIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:74" + }, + { + "name": "TestHelmVersionIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:56" + }, + { + "name": "TestHelmVersionIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:71" + }, + { + "name": "TestHugepagesNotManuallyManipulatedDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:64" + }, + { + "name": "TestHugepagesNotManuallyManipulatedImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:79" + }, + { + "name": "TestHyperThreadEnableDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:69" + }, + { + "name": "TestHyperThreadEnableImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:90" + }, + { + "name": "TestICMPv4ConnectivityIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:11" + }, + { + "name": "TestICMPv4ConnectivityIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:27" + }, + { + "name": "TestICMPv4ConnectivityMultusIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:19" + }, + { + "name": "TestICMPv4ConnectivityMultusIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:35" + }, + { + "name": "TestICMPv6ConnectivityIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:18" + }, + { + "name": "TestICMPv6ConnectivityIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:34" + }, + { + "name": "TestICMPv6ConnectivityMultusIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:20" + }, + { + "name": "TestICMPv6ConnectivityMultusIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:36" + }, + { + "name": "TestImagePullPolicyIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:87" + }, + { + "name": "TestImagePullPolicyIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:102" + }, + { + "name": "TestIpcLockIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:29" + }, + { + "name": "TestIpcLockIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:45" + }, + { + "name": "TestIsRedHatReleaseIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:73" + }, + { + "name": "TestIsRedHatReleaseIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:87" + }, + { + "name": "TestIsSELinuxEnforcingIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:74" + }, + { + "name": "TestIsSELinuxEnforcingIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:89" + }, + { + "name": "TestIsolatedCPUPoolSchedulingPolicyDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:102" + }, + { + "name": "TestIsolatedCPUPoolSchedulingPolicyImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:116" + }, + { + "name": "TestLimitedUseOfExecProbesIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:17" + }, + { + "name": "TestLimitedUseOfExecProbesIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:33" + }, + { + "name": "TestLivenessProbeIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:89" + }, + { + "name": "TestLivenessProbeIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:104" + }, + { + "name": "TestLoggingIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:126" + }, + { + "name": "TestLoggingIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:134" + }, + { + "name": "TestMultipleSameOperatorsIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:123" + }, + { + "name": "TestMultipleSameOperatorsIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:131" + }, + { + "name": "TestNamespaceBestPracticesIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:41" + }, + { + "name": "TestNamespaceBestPracticesIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:56" + }, + { + "name": "TestNamespaceResourceQuotaIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:52" + }, + { + "name": "TestNamespaceResourceQuotaIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:66" + }, + { + "name": "TestNetAdminIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:27" + }, + { + "name": "TestNetAdminIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:43" + }, + { + "name": "TestNetRawIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:30" + }, + { + "name": "TestNetRawIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:46" + }, + { + "name": "TestNetworkAttachmentDefinitionSRIOVUsingMTUDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:16" + }, + { + "name": "TestNetworkAttachmentDefinitionSRIOVUsingMTUImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:32" + }, + { + "name": "TestNetworkPolicyDenyAllIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:12" + }, + { + "name": "TestNetworkPolicyDenyAllIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:28" + }, + { + "name": "TestNoSSHDaemonsAllowedIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:53" + }, + { + "name": "TestNoSSHDaemonsAllowedIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:68" + }, + { + "name": "TestNodeOperatingSystemIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:72" + }, + { + "name": "TestNodeOperatingSystemIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:86" + }, + { + "name": "TestNonTaintedNodeKernelsIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:65" + }, + { + "name": "TestNonTaintedNodeKernelsIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:80" + }, + { + "name": "TestOCPLifecycleIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:71" + }, + { + "name": "TestOCPLifecycleIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:85" + }, + { + "name": "TestOCPReservedPortsUsageDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:23" + }, + { + "name": "TestOCPReservedPortsUsageImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:39" + }, + { + "name": "TestOneProcessPerContainerIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:48" + }, + { + "name": "TestOneProcessPerContainerIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:62" + }, + { + "name": "TestOperatorAutomountTokensDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:118" + }, + { + "name": "TestOperatorCatalogSourceBundleCountIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:121" + }, + { + "name": "TestOperatorCatalogSourceBundleCountIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:130" + }, + { + "name": "TestOperatorCrdSchemaIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:113" + }, + { + "name": "TestOperatorCrdSchemaIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:127" + }, + { + "name": "TestOperatorCrdVersioningIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:114" + }, + { + "name": "TestOperatorCrdVersioningIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:126" + }, + { + "name": "TestOperatorHasSemanticVersioningIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:112" + }, + { + "name": "TestOperatorHasSemanticVersioningIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:124" + }, + { + "name": "TestOperatorInstallStatusSucceededIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:107" + }, + { + "name": "TestOperatorInstallStatusSucceededIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:120" + }, + { + "name": "TestOperatorIsCertifiedIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:109" + }, + { + "name": "TestOperatorIsCertifiedIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:73" + }, + { + "name": "TestOperatorIsInstalledViaOLMIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:110" + }, + { + "name": "TestOperatorIsInstalledViaOLMIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:122" + }, + { + "name": "TestOperatorNoPrivilegesDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:108" + }, + { + "name": "TestOperatorNoSCCAccessImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:121" + }, + { + "name": "TestOperatorOlmSkipRangeDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:122" + }, + { + "name": "TestOperatorOlmSkipRangeImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:125" + }, + { + "name": "TestOperatorPodsNoHugepagesDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:120" + }, + { + "name": "TestOperatorPodsNoHugepagesImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:129" + }, + { + "name": "TestOperatorReadOnlyFilesystemDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:119" + }, + { + "name": "TestOperatorRunAsNonRootDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:117" + }, + { + "name": "TestOperatorRunAsUserIDDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:116" + }, + { + "name": "TestOperatorSingleCrdOwnerIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:115" + }, + { + "name": "TestOperatorSingleCrdOwnerIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:128" + }, + { + "name": "TestPersistentVolumeReclaimPolicyIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:94" + }, + { + "name": "TestPersistentVolumeReclaimPolicyIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:108" + }, + { + "name": "TestPodAutomountServiceAccountIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:45" + }, + { + "name": "TestPodAutomountServiceAccountIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:60" + }, + { + "name": "TestPodClusterRoleBindingsBestPracticesIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:42" + }, + { + "name": "TestPodClusterRoleBindingsBestPracticesIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:57" + }, + { + "name": "TestPodDeploymentBestPracticesIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:84" + }, + { + "name": "TestPodDeploymentBestPracticesIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:99" + }, + { + "name": "TestPodDisruptionBudgetIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:129" + }, + { + "name": "TestPodDisruptionBudgetIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:137" + }, + { + "name": "TestPodHighAvailabilityBestPracticesDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:83" + }, + { + "name": "TestPodHighAvailabilityBestPracticesImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:98" + }, + { + "name": "TestPodHostIPCDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:39" + }, + { + "name": "TestPodHostIPCImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:54" + }, + { + "name": "TestPodHostNetworkDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:37" + }, + { + "name": "TestPodHostNetworkImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:52" + }, + { + "name": "TestPodHostPIDDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:40" + }, + { + "name": "TestPodHostPIDImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:55" + }, + { + "name": "TestPodHostPathDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:38" + }, + { + "name": "TestPodHostPathImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:53" + }, + { + "name": "TestPodHugePages1GDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:63" + }, + { + "name": "TestPodHugePages1GImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:78" + }, + { + "name": "TestPodHugePages2MDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:62" + }, + { + "name": "TestPodHugePages2MImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:77" + }, + { + "name": "TestPodNodeSelectorAndAffinityBestPracticesDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:82" + }, + { + "name": "TestPodNodeSelectorAndAffinityBestPracticesImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:97" + }, + { + "name": "TestPodRecreationIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:88" + }, + { + "name": "TestPodRecreationIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:103" + }, + { + "name": "TestPodRequestsIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:51" + }, + { + "name": "TestPodRequestsIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:65" + }, + { + "name": "TestPodRoleBindingsBestPracticesIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:43" + }, + { + "name": "TestPodRoleBindingsBestPracticesIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:58" + }, + { + "name": "TestPodServiceAccountBestPracticesIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:44" + }, + { + "name": "TestPodServiceAccountBestPracticesIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:59" + }, + { + "name": "TestPodTolerationBypassIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:93" + }, + { + "name": "TestPodTolerationBypassIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:107" + }, + { + "name": "TestReadinessProbeIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:90" + }, + { + "name": "TestReadinessProbeIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:105" + }, + { + "name": "TestReservedExtendedPartnerPortsDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:13" + }, + { + "name": "TestReservedExtendedPartnerPortsImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:29" + }, + { + "name": "TestRestartOnRebootLabelOnPodsUsingSRIOVDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:15" + }, + { + "name": "TestRestartOnRebootLabelOnPodsUsingSRIOVImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:31" + }, + { + "name": "TestRtAppNoExecProbesDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:103" + }, + { + "name": "TestRtAppNoExecProbesImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:117" + }, + { + "name": "TestSYSNiceRealtimeCapabilityIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:49" + }, + { + "name": "TestSYSNiceRealtimeCapabilityIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:63" + }, + { + "name": "TestSecConNonRootUserIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:32" + }, + { + "name": "TestSecConNonRootUserIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:48" + }, + { + "name": "TestSecConPrivilegeEscalationDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:34" + }, + { + "name": "TestSecConPrivilegeEscalationImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:50" + }, + { + "name": "TestSecConReadOnlyFilesystemImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:67" + }, + { + "name": "TestSecContextIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:33" + }, + { + "name": "TestSecContextIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:49" + }, + { + "name": "TestServiceDualStackIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:21" + }, + { + "name": "TestServiceDualStackIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:37" + }, + { + "name": "TestServiceDualStackRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:72" + }, + { + "name": "TestServiceMeshIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:68" + }, + { + "name": "TestServiceMeshIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:84" + }, + { + "name": "TestServicesDoNotUseNodeportsIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:46" + }, + { + "name": "TestServicesDoNotUseNodeportsIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:61" + }, + { + "name": "TestSharedCPUPoolSchedulingPolicyDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:100" + }, + { + "name": "TestSharedCPUPoolSchedulingPolicyImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:114" + }, + { + "name": "TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespaceDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:111" + }, + { + "name": "TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespaceImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:123" + }, + { + "name": "TestStartupProbeIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:91" + }, + { + "name": "TestStartupProbeIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:106" + }, + { + "name": "TestStatefulSetScalingIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:86" + }, + { + "name": "TestStatefulSetScalingIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:101" + }, + { + "name": "TestStorageProvisionerDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:79" + }, + { + "name": "TestStorageProvisionerImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:94" + }, + { + "name": "TestSysAdminIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:28" + }, + { + "name": "TestSysAdminIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:44" + }, + { + "name": "TestSysPtraceCapabilityIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:50" + }, + { + "name": "TestSysPtraceCapabilityIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:64" + }, + { + "name": "TestSysctlConfigsIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:67" + }, + { + "name": "TestSysctlConfigsIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:83" + }, + { + "name": "TestTerminationMessagePolicyIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:127" + }, + { + "name": "TestTerminationMessagePolicyIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:135" + }, + { + "name": "TestUnalteredBaseImageIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:47" + }, + { + "name": "TestUnalteredBaseImageIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:81" + }, + { + "name": "TestUnalteredStartupBootParamsIdentifierDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:66" + }, + { + "name": "TestUnalteredStartupBootParamsIdentifierImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:82" + }, + { + "name": "TestUndeclaredContainerPortsUsageDocLink", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/doclinks.go:22" + }, + { + "name": "TestUndeclaredContainerPortsUsageImpact", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/impact.go:38" + }, + { + "name": "UID1337Remediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:181" + }, + { + "name": "UnalteredBaseImageExceptionProcess", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/exceptions.go:36" + }, + { + "name": "UnalteredBaseImageRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:35" + }, + { + "name": "UnalteredStartupBootParamsRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:126" + }, + { + "name": "UndeclaredContainerPortsRemediation", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/remediation.go:138" + }, + { + "name": "iptablesNftablesImplicitCheck", + "exported": false, + "doc": "shared description text", + "position": "/Users/deliedit/dev/certsuite/tests/identifiers/identifiers.go:28" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "lifecycle", + "files": 2, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/postmortem", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/ownerreference", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/tolerations", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/volumes", + "k8s.io/api/core/v1", + "time" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "LoadChecks", + "qualifiedName": "LoadChecks", + "exported": true, + "signature": "func()()", + "doc": "LoadChecks Registers lifecycle test checks for the certsuite suite\n\nThis routine initializes a checks group dedicated to lifecycle tests and\nattaches a series of individual checks such as container\npre‑stop/post‑start probes, scaling tests, high availability validations,\nand storage provisioning rules. Each check is configured with skip functions\nthat prevent execution when required resources are absent or the test\nenvironment does not meet prerequisites. The function logs its activity and\nrelies on helper utilities to populate the checks database.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:77", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "WithBeforeEachFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewChecksGroup", + "kind": "function", + "source": [ + "func NewChecksGroup(groupName string) *ChecksGroup {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\tif dbByGroup == nil {", + "\t\tdbByGroup = map[string]*ChecksGroup{}", + "\t}", + "", + "\tgroup, exists := dbByGroup[groupName]", + "\tif exists {", + "\t\treturn group", + "\t}", + "", + "\tgroup = \u0026ChecksGroup{", + "\t\tname: groupName,", + "\t\tchecks: []*Check{},", + "\t\tcurrentRunningCheckIdx: checkIdxNone,", + "\t}", + "\tdbByGroup[groupName] = group", + "", + "\treturn group", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testContainersPreStop", + "kind": "function", + "source": [ + "func testContainersPreStop(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\tif cut.Lifecycle == nil || (cut.Lifecycle != nil \u0026\u0026 cut.Lifecycle.PreStop == nil) {", + "\t\t\tcheck.LogError(\"Container %q does not have preStop defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have preStop defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has preStop defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has preStop defined\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoCrdsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoCrdsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Crds) == 0 {", + "\t\t\treturn true, \"no roles to check\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNotIntrusiveSkipFn", + "kind": "function", + "source": [ + "func GetNotIntrusiveSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !env.IsIntrusive() {", + "\t\t\treturn true, \"not intrusive test\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testScaleCrd", + "kind": "function", + "source": [ + "func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.ScaleCrUnderTest {", + "\t\tgroupResourceSchema := env.ScaleCrUnderTest[i].GroupResourceSchema", + "\t\tscaleCr := env.ScaleCrUnderTest[i].Scale", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, scaleCr.Name, scaleCr.Namespace, scaleCr.Kind); hpa != nil {", + "\t\t\tif !scaling.TestScaleHPACrd(\u0026scaleCr, hpa, groupResourceSchema, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"CR has failed the scaling test: %s\", scaleCr.GetName())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"cr has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\tif !scaling.TestScaleCrd(\u0026scaleCr, groupResourceSchema, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"CR has failed the non-HPA scale test: %s\", scaleCr.GetName())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"CR has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"CR is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"CR is scalable\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testContainersPostStart", + "kind": "function", + "source": [ + "func testContainersPostStart(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\tif cut.Lifecycle == nil || (cut.Lifecycle != nil \u0026\u0026 cut.Lifecycle.PostStart == nil) {", + "\t\t\tcheck.LogError(\"Container %q does not have postStart defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have postStart defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has postStart defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has postStart defined.\"+", + "\t\t\t\t\"Attention: There is a known upstream bug where a pod with a still-running postStart lifecycle hook that is deleted may not be terminated even after \"+", + "\t\t\t\t\"the terminationGracePeriod k8s bug link: kubernetes/kubernetes#116032\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testContainersImagePolicy", + "kind": "function", + "source": [ + "func testContainersImagePolicy(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.ImagePullPolicy != corev1.PullIfNotPresent {", + "\t\t\tcheck.LogError(\"Container %q is using %q as ImagePullPolicy (compliant containers must use %q)\", cut, cut.ImagePullPolicy, corev1.PullIfNotPresent)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not using IfNotPresent as ImagePullPolicy\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q is using %q as ImagePullPolicy\", cut, cut.ImagePullPolicy)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is using IfNotPresent as ImagePullPolicy\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testContainersReadinessProbe", + "kind": "function", + "source": [ + "func testContainersReadinessProbe(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.ReadinessProbe == nil {", + "\t\t\tcheck.LogError(\"Container %q does not have ReadinessProbe defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have ReadinessProbe defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has ReadinessProbe defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has ReadinessProbe defined\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testContainersLivenessProbe", + "kind": "function", + "source": [ + "func testContainersLivenessProbe(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.LivenessProbe == nil {", + "\t\t\tcheck.LogError(\"Container %q does not have LivenessProbe defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have LivenessProbe defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has LivenessProbe defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has LivenessProbe defined\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testContainersStartupProbe", + "kind": "function", + "source": [ + "func testContainersStartupProbe(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.StartupProbe == nil {", + "\t\t\tcheck.LogError(\"Container %q does not have StartupProbe defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have StartupProbe defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has StartupProbe defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has StartupProbe defined\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodsOwnerReference", + "kind": "function", + "source": [ + "func testPodsOwnerReference(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\to := ownerreference.NewOwnerReference(put.Pod)", + "\t\to.RunTest(check.GetLogger())", + "\t\tif o.GetResults() != testhelper.SUCCESS {", + "\t\t\tcheck.LogError(\"Pod %q found with non-compliant owner reference\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has non-compliant owner reference\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has compliant owner reference\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has compliant owner reference\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNotEnoughWorkersSkipFn", + "kind": "function", + "source": [ + "func GetNotEnoughWorkersSkipFn(env *provider.TestEnvironment, minWorkerNodes int) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.GetWorkerCount() \u003c minWorkerNodes {", + "\t\t\treturn true, \"not enough nodes to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testHighAvailability", + "kind": "function", + "source": [ + "func testHighAvailability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, dp := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", dp.ToString())", + "\t\tif dp.Spec.Replicas == nil || *(dp.Spec.Replicas) \u003c= 1 {", + "\t\t\tcheck.LogError(\"Deployment %q found without valid high availability (number of replicas must be greater than 1)\", dp.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, \"Deployment found without valid high availability\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Skip any AffinityRequired pods", + "\t\t//nolint:goconst", + "\t\tif dp.Spec.Template.Labels[\"AffinityRequired\"] == \"true\" {", + "\t\t\tcheck.LogInfo(\"Skipping Deployment %q with affinity required\", dp.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif dp.Spec.Template.Spec.Affinity == nil ||", + "\t\t\tdp.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {", + "\t\t\tcheck.LogError(\"Deployment %q found without valid high availability (PodAntiAffinity must be defined)\", dp.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, \"Deployment found without valid high availability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q has valid high availability\", dp.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, \"Deployment has valid high availability\", true))", + "\t\t}", + "\t}", + "\tfor _, st := range env.StatefulSets {", + "\t\tif st.Spec.Replicas == nil || *(st.Spec.Replicas) \u003c= 1 {", + "\t\t\tcheck.LogError(\"StatefulSet %q found without valid high availability (number of replicas must be greater than 1)\", st.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, \"StatefulSet found without valid high availability\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Skip any AffinityRequired pods", + "\t\tif st.Spec.Template.Labels[\"AffinityRequired\"] == \"true\" {", + "\t\t\tcheck.LogInfo(\"Skipping StatefulSet %q with affinity required\", st.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif st.Spec.Template.Spec.Affinity == nil ||", + "\t\t\tst.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {", + "\t\t\tcheck.LogError(\"StatefulSet %q found without valid high availability (PodAntiAffinity must be defined)\", st.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, \"StatefulSet found without valid high availability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q has valid high availability\", st.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, \"StatefulSet has valid high availability\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNotEnoughWorkersSkipFn", + "kind": "function", + "source": [ + "func GetNotEnoughWorkersSkipFn(env *provider.TestEnvironment, minWorkerNodes int) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.GetWorkerCount() \u003c minWorkerNodes {", + "\t\t\treturn true, \"not enough nodes to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetPodsWithoutAffinityRequiredLabelSkipFn", + "kind": "function", + "source": [ + "func GetPodsWithoutAffinityRequiredLabelSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetPodsWithoutAffinityRequiredLabel()) == 0 {", + "\t\t\treturn true, \"no pods with required affinity label found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodNodeSelectorAndAffinityBestPractices", + "kind": "function", + "source": [ + "func testPodNodeSelectorAndAffinityBestPractices(testPods []*provider.Pod, check *checksdb.Check) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range testPods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tcompliantPod := true", + "\t\tif put.HasNodeSelector() {", + "\t\t\tcheck.LogError(\"Pod %q has a node selector. Node selector: %v\", put, put.Spec.NodeSelector)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has node selector\", false))", + "\t\t\tcompliantPod = false", + "\t\t}", + "\t\tif put.Spec.Affinity != nil \u0026\u0026 put.Spec.Affinity.NodeAffinity != nil {", + "\t\t\tcheck.LogError(\"Pod %q has a node affinity clause. Node affinity: %v\", put, put.Spec.Affinity.NodeAffinity)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has node affinity\", false))", + "\t\t\tcompliantPod = false", + "\t\t}", + "", + "\t\tif compliantPod {", + "\t\t\tcheck.LogInfo(\"Pod %q has no node selector or affinity\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has no node selector or affinity\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "GetPodsWithoutAffinityRequiredLabel", + "kind": "function" + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNotEnoughWorkersSkipFn", + "kind": "function", + "source": [ + "func GetNotEnoughWorkersSkipFn(env *provider.TestEnvironment, minWorkerNodes int) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.GetWorkerCount() \u003c minWorkerNodes {", + "\t\t\treturn true, \"not enough nodes to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNotIntrusiveSkipFn", + "kind": "function", + "source": [ + "func GetNotIntrusiveSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !env.IsIntrusive() {", + "\t\t\treturn true, \"not intrusive test\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodsRecreation", + "kind": "function", + "source": [ + "func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tneedsPostMortemInfo := true", + "\tdefer func() {", + "\t\tif needsPostMortemInfo {", + "\t\t\tcheck.LogDebug(\"%s\", postmortem.Log())", + "\t\t}", + "\t\t// Since we are possible exiting early, we need to make sure we set the result at the end of the function.", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}()", + "\tcheck.LogInfo(\"Testing node draining effect of deployment\")", + "\tcheck.LogInfo(\"Testing initial state for deployments\")", + "\tdefer env.SetNeedsRefresh()", + "", + "\t// Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check.", + "\t// timeout = k-mins + (1min * (num-deployments + num-statefulsets))", + "\tallPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets))", + "\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout, check.GetLogger())", + "\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\tfor _, dep := range notReadyDeployments {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment was not ready before draining any node.\", false))", + "\t\t}", + "\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset was not ready before draining any node.\", false))", + "\t\t}", + "\t\treturn", + "\t}", + "", + "\t// Filter out pods with Node Assignments present and FAIL them.", + "\t// We run into problems with this test when there are nodeSelectors assigned affecting where", + "\t// pods are scheduled. Also, they are not allowed in general, see the node-selector test case.", + "\t// Skip the safeguard for any pods that are using a runtimeClassName. This is potentially", + "\t// because pods that are derived from a performance profile might have a built-in nodeSelector.", + "\tvar podsWithNodeAssignment []*provider.Pod", + "\tfor _, put := range env.Pods {", + "\t\tif !put.IsRuntimeClassNameSpecified() \u0026\u0026 put.HasNodeSelector() {", + "\t\t\tpodsWithNodeAssignment = append(podsWithNodeAssignment, put)", + "\t\t\tcheck.LogError(\"Pod %q has been found with node selector(s): %v\", put, put.Spec.NodeSelector)", + "\t\t}", + "\t}", + "\tif len(podsWithNodeAssignment) \u003e 0 {", + "\t\tcheck.LogError(\"Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v\", podsWithNodeAssignment)", + "\t\tfor _, pod := range podsWithNodeAssignment {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has node assignment.\", false))", + "\t\t}", + "", + "\t\treturn", + "\t}", + "", + "\tfor nodeName := range podsets.GetAllNodesForAllPodSets(env.Pods) {", + "\t\tdefer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node", + "\t\terr := podrecreation.CordonHelper(nodeName, podrecreation.Cordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Error cordoning the node: %s\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node cordoning failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tcheck.LogInfo(\"Draining and Cordoning node %s: \", nodeName)", + "\t\tcount, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Getting pods list to drain failed, err=%v\", err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Getting pods list to drain failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tnodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count)", + "\t\tcheck.LogDebug(\"Draining node: %s with timeout: %s\", nodeName, nodeTimeout)", + "\t\t_, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Draining node %q failed, err=%v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Draining node failed\", false))", + "\t\t\treturn", + "\t\t}", + "", + "\t\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout, check.GetLogger())", + "\t\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\t\tfor _, dep := range notReadyDeployments {", + "\t\t\t\tcheck.LogError(\"Deployment %q not ready after draining node %q\", dep.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q not ready after draining node %q\", sts.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\treturn", + "\t\t}", + "", + "\t\terr = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogFatal(\"Error uncordoning the node: %s\", nodeName)", + "\t\t}", + "\t}", + "", + "\t// If everything went well for all nodes, the nonCompliantObjects should be empty. We need to", + "\t// manually add all the deps/sts into the compliant object lists so the check is marked as skipped.", + "\t// ToDo: Improve this.", + "\tif len(nonCompliantObjects) == 0 {", + "\t\tfor _, dep := range env.Deployments {", + "\t\t\tcheck.LogInfo(\"Deployment's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "", + "\t\tfor _, sts := range env.StatefulSets {", + "\t\t\tcheck.LogInfo(\"Statefulset's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "\t}", + "", + "\tneedsPostMortemInfo = false", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNotIntrusiveSkipFn", + "kind": "function", + "source": [ + "func GetNotIntrusiveSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !env.IsIntrusive() {", + "\t\t\treturn true, \"not intrusive test\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNotEnoughWorkersSkipFn", + "kind": "function", + "source": [ + "func GetNotEnoughWorkersSkipFn(env *provider.TestEnvironment, minWorkerNodes int) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.GetWorkerCount() \u003c minWorkerNodes {", + "\t\t\treturn true, \"not enough nodes to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testDeploymentScaling", + "kind": "function", + "source": [ + "func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, deployment := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", deployment.ToString())", + "\t\tif scaling.IsManaged(deployment.Name, env.Config.ManagedDeployments) {", + "\t\t\tif !scaling.CheckOwnerReference(deployment.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"Deployment %q scaling failed due to OwnerReferences that are not scalable\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Deployment %q scaling skipped due to scalable OwnerReferences, test will run on the CR scaling\", deployment.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip deployment if it is allowed by config", + "\t\tif nameInDeploymentSkipList(deployment.Name, deployment.Namespace, env.Config.SkipScalingTestDeployments) {", + "\t\t\tcheck.LogInfo(\"Deployment %q is being skipped due to configuration setting\", deployment.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TestDeploymentScaling test scaling of deployment", + "\t\t// This is the entry point for deployment scaling tests", + "\t\tns, name := deployment.Namespace, deployment.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"Deployment\"); hpa != nil {", + "\t\t\t// if the deployment is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the deployment", + "\t\t\tif !scaling.TestScaleHpaDeployment(deployment, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"Deployment %q has failed the HPA scale test\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the HPA scale test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the deployment is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleDeployment(deployment.Deployment, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Deployment %q has failed the non-HPA scale test\", deployment.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q is scalable\", deployment.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNotIntrusiveSkipFn", + "kind": "function", + "source": [ + "func GetNotIntrusiveSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !env.IsIntrusive() {", + "\t\t\treturn true, \"not intrusive test\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNotEnoughWorkersSkipFn", + "kind": "function", + "source": [ + "func GetNotEnoughWorkersSkipFn(env *provider.TestEnvironment, minWorkerNodes int) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.GetWorkerCount() \u003c minWorkerNodes {", + "\t\t\treturn true, \"not enough nodes to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testStatefulSetScaling", + "kind": "function", + "source": [ + "func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, statefulSet := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", statefulSet.ToString())", + "\t\tif scaling.IsManaged(statefulSet.Name, env.Config.ManagedStatefulsets) {", + "\t\t\tif !scaling.CheckOwnerReference(statefulSet.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q scaling failed due to OwnerReferences that are not scalable\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has OwnerReferences that are not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"StatefulSet %q scaling skipped due to scalable OwnerReferences, test will run on te CR scaling\", statefulSet.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip statefulset if it is allowed by config", + "\t\tif nameInStatefulSetSkipList(statefulSet.Name, statefulSet.Namespace, env.Config.SkipScalingTestStatefulSets) {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q is being skipped due to configuration setting\", statefulSet.String())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TeststatefulsetScaling test scaling of statefulset", + "\t\t// This is the entry point for statefulset scaling tests", + "\t\tns, name := statefulSet.Namespace, statefulSet.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"StatefulSet\"); hpa != nil {", + "\t\t\t// if the statefulset is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the statefulset", + "\t\t\tif !scaling.TestScaleHpaStatefulSet(statefulSet.StatefulSet, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %q\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the statefulset is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleStatefulSet(statefulSet.StatefulSet, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %s\", statefulSet.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPersistentVolumesSkipFn", + "kind": "function", + "source": [ + "func GetNoPersistentVolumesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.PersistentVolumes) == 0 {", + "\t\t\treturn true, \"no persistent volumes to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodPersistentVolumeReclaimPolicy", + "kind": "function", + "source": [ + "func testPodPersistentVolumeReclaimPolicy(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Look through all of the pods, matching their persistent volumes to the list of overall cluster PVs and checking their reclaim status.", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tcompliantPod := true", + "\t\t// Loop through all of the volumes attached to the pod.", + "\t\tfor pvIndex := range put.Spec.Volumes {", + "\t\t\t// Skip any volumes that do not have a PVC. No need to test them.", + "\t\t\tif put.Spec.Volumes[pvIndex].PersistentVolumeClaim == nil {", + "\t\t\t\tcheck.LogInfo(\"Pod %q does not have a PVC\", put)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// If the Pod Volume is not tied back to a PVC and corresponding PV that has a reclaim policy of DELETE.", + "\t\t\tif !volumes.IsPodVolumeReclaimPolicyDelete(\u0026put.Spec.Volumes[pvIndex], env.PersistentVolumes, env.PersistentVolumeClaims) {", + "\t\t\t\tcheck.LogError(\"Pod %q with volume %q has been found without a reclaim policy of DELETE.\", put, put.Spec.Volumes[pvIndex].Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod contains volume without a reclaim policy of DELETE\", false).", + "\t\t\t\t\tAddField(testhelper.PersistentVolumeName, put.Spec.Volumes[pvIndex].Name).", + "\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\tcompliantPod = false", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif compliantPod {", + "\t\t\tcheck.LogInfo(\"Pod %q complies with volume reclaim policy rules\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod complies with volume reclaim policy rules\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoGuaranteedPodsWithExclusiveCPUsSkipFn", + "kind": "function", + "source": [ + "func GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetGuaranteedPodsWithExclusiveCPUs()) == 0 {", + "\t\t\treturn true, \"no pods with exclusive CPUs found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testCPUIsolation", + "kind": "function", + "source": [ + "func testCPUIsolation(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Individual requirements we are looking for:", + "\t// - CPU Requests and Limits must be in the form of whole units", + "\t// - Resource Requests and Limits must be provided and identical", + "", + "\t// Additional checks if the above pass", + "\t// - 'runtimeClassName' must be specified", + "\t// - Annotations must be provided disabling CPU and IRQ load-balancing.", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.GetGuaranteedPodsWithExclusiveCPUs() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif !put.IsCPUIsolationCompliant() {", + "\t\t\tcheck.LogError(\"Pod %q is not CPU isolated\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not CPU isolated\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is CPU isolated\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is CPU isolated\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoAffinityRequiredPodsSkipFn", + "kind": "function", + "source": [ + "func GetNoAffinityRequiredPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetAffinityRequiredPods()) == 0 {", + "\t\t\treturn true, \"no pods with required affinity found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testAffinityRequiredPods", + "kind": "function", + "source": [ + "func testAffinityRequiredPods(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.GetAffinityRequiredPods() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\t// Check if the pod is Affinity compliant.", + "\t\tresult, err := put.IsAffinityCompliant()", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Pod %q is not Affinity compliant, reason=%v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not Affinity compliant\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is Affinity compliant\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is Affinity compliant\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodTolerationBypass", + "kind": "function", + "source": [ + "func testPodTolerationBypass(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tpodIsCompliant := true", + "\t\tfor _, t := range put.Spec.Tolerations {", + "\t\t\t// Check if the tolerations fall outside the 'default' and are modified versions", + "\t\t\t// Take also into account the qosClass applied to the pod", + "\t\t\tif tolerations.IsTolerationModified(t, put.Status.QOSClass) {", + "\t\t\t\tcheck.LogError(\"Pod %q has been found with non-default toleration %s/%s which is not allowed.\", put, t.Key, t.Effect)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has non-default toleration\", false).", + "\t\t\t\t\tAddField(testhelper.TolerationKey, t.Key).", + "\t\t\t\t\tAddField(testhelper.TolerationEffect, string(t.Effect)))", + "\t\t\t\tpodIsCompliant = false", + "\t\t\t}", + "\t\t}", + "", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogInfo(\"Pod %q has default toleration\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has default toleration\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoStorageClassesSkipFn", + "kind": "function", + "source": [ + "func GetNoStorageClassesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.StorageClassList) == 0 {", + "\t\t\treturn true, \"no storage classes found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPersistentVolumeClaimsSkipFn", + "kind": "function", + "source": [ + "func GetNoPersistentVolumeClaimsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.PersistentVolumeClaims) == 0 {", + "\t\t\treturn true, \"no persistent volume claims found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testStorageProvisioner", + "kind": "function", + "source": [ + "func testStorageProvisioner(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tconst localStorageProvisioner = \"kubernetes.io/no-provisioner\"", + "\tconst lvmProvisioner = \"topolvm.io\"", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tvar StorageClasses = env.StorageClassList", + "\tvar Pvc = env.PersistentVolumeClaims", + "\tsnoSingleLocalStorageProvisionner := \"\"", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tusesPvcAndStorageClass := false", + "\t\tfor pvIndex := range put.Spec.Volumes {", + "\t\t\t// Skip any nil persistentClaims.", + "\t\t\tvolume := put.Spec.Volumes[pvIndex]", + "\t\t\tif volume.PersistentVolumeClaim == nil {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\t// We have the list of pods/volumes/claims.", + "\t\t\t// Look through the storageClass list for a match.", + "\t\t\tfor i := range Pvc {", + "\t\t\t\tif Pvc[i].Name == put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName \u0026\u0026 Pvc[i].Namespace == put.Namespace {", + "\t\t\t\t\tfor j := range StorageClasses {", + "\t\t\t\t\t\tif Pvc[i].Spec.StorageClassName != nil \u0026\u0026 StorageClasses[j].Name == *Pvc[i].Spec.StorageClassName {", + "\t\t\t\t\t\t\tusesPvcAndStorageClass = true", + "\t\t\t\t\t\t\tcheck.LogDebug(\"Pod %q pvc_name: %s, storageclass_name: %s, provisioner_name: %s\", put, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName,", + "\t\t\t\t\t\t\t\tStorageClasses[j].Name, StorageClasses[j].Provisioner)", + "", + "\t\t\t\t\t\t\tif env.IsSNO() {", + "\t\t\t\t\t\t\t\t// For SNO, only one local storage provisionner is allowed. The first local storage provisioner for this pod is assumed to be the only local storage provisioner allowed in the cluster.", + "\t\t\t\t\t\t\t\tif snoSingleLocalStorageProvisionner == \"\" \u0026\u0026", + "\t\t\t\t\t\t\t\t\t(StorageClasses[j].Provisioner == localStorageProvisioner ||", + "\t\t\t\t\t\t\t\t\t\tStorageClasses[j].Provisioner == lvmProvisioner) {", + "\t\t\t\t\t\t\t\t\tsnoSingleLocalStorageProvisionner = StorageClasses[j].Provisioner", + "\t\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t\tif StorageClasses[j].Provisioner == snoSingleLocalStorageProvisionner {", + "\t\t\t\t\t\t\t\t\tcheck.LogInfo(\"Pod %q: Local storage (no provisioner or lvms) is recommended for SNO clusters.\", put)", + "\t\t\t\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Local storage (no provisioner or lvms) is recommended for SNO clusters.\", false).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t\t\tcontinue", + "\t\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t\tif StorageClasses[j].Provisioner == localStorageProvisioner || StorageClasses[j].Provisioner == lvmProvisioner {", + "\t\t\t\t\t\t\t\t\tcheck.LogError(\"Pod %q: A single type of local storage cluster is recommended for single node clusters. Use lvms or kubernetes noprovisioner, but not both.\", put)", + "\t\t\t\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\t\t\t\t\"A single type of local storage cluster is recommended for single node clusters. Use lvms or kubernetes noprovisioner, but not both.\", false).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t\t\tcontinue", + "\t\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t\tcheck.LogError(\"Pod %q: Non local storage not recommended in single node clusters.\", put)", + "\t\t\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Non local storage not recommended in single node clusters.\", false).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t} else {", + "\t\t\t\t\t\t\t\tif StorageClasses[j].Provisioner == localStorageProvisioner || StorageClasses[j].Provisioner == lvmProvisioner {", + "\t\t\t\t\t\t\t\t\tcheck.LogError(\"Pod %q: Local storage provisioner (no provisioner or lvms) not recommended in multinode clusters.\", put)", + "\t\t\t\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Local storage provisioner (no provisioner or lvms) not recommended in multinode clusters.\", false).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t\t\tcontinue", + "\t\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t\tcheck.LogInfo(\"Pod %q: Non local storage provisioner recommended in multinode clusters.\", put)", + "\t\t\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Non local storage provisioner recommended in multinode clusters.\", false).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t}", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\t// Save as compliant pod in case it's not using any of the existing PVC/StorageClasses of the cluster.", + "\t\t\t// Otherwise, in this cases the check will be marked as skipped.", + "\t\t\t// ToDo: improve this function.", + "\t\t\tif !usesPvcAndStorageClass {", + "\t\t\t\tcheck.LogInfo(\"Pod %q not configured to use local storage\", put)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod not configured to use local storage.\", true))", + "\t\t\t}", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadInternalChecksDB", + "kind": "function", + "source": [ + "func LoadInternalChecksDB() {", + "\taccesscontrol.LoadChecks()", + "\tcertification.LoadChecks()", + "\tlifecycle.LoadChecks()", + "\tmanageability.LoadChecks()", + "\tnetworking.LoadChecks()", + "\tobservability.LoadChecks()", + "\tperformance.LoadChecks()", + "\tplatform.LoadChecks()", + "\toperator.LoadChecks()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "name": "nameInDeploymentSkipList", + "qualifiedName": "nameInDeploymentSkipList", + "exported": false, + "signature": "func(string, string, []configuration.SkipScalingTestDeploymentsInfo)(bool)", + "doc": "nameInDeploymentSkipList Checks if a deployment is excluded from scaling tests\n\nThe function iterates through a slice of configuration entries that specify\ndeployments to skip, comparing the supplied name and namespace with each\nentry. If an exact match is found, it returns true indicating the deployment\nshould be omitted from further testing. Otherwise, it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:452", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testDeploymentScaling", + "kind": "function", + "source": [ + "func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, deployment := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", deployment.ToString())", + "\t\tif scaling.IsManaged(deployment.Name, env.Config.ManagedDeployments) {", + "\t\t\tif !scaling.CheckOwnerReference(deployment.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"Deployment %q scaling failed due to OwnerReferences that are not scalable\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Deployment %q scaling skipped due to scalable OwnerReferences, test will run on the CR scaling\", deployment.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip deployment if it is allowed by config", + "\t\tif nameInDeploymentSkipList(deployment.Name, deployment.Namespace, env.Config.SkipScalingTestDeployments) {", + "\t\t\tcheck.LogInfo(\"Deployment %q is being skipped due to configuration setting\", deployment.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TestDeploymentScaling test scaling of deployment", + "\t\t// This is the entry point for deployment scaling tests", + "\t\tns, name := deployment.Namespace, deployment.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"Deployment\"); hpa != nil {", + "\t\t\t// if the deployment is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the deployment", + "\t\t\tif !scaling.TestScaleHpaDeployment(deployment, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"Deployment %q has failed the HPA scale test\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the HPA scale test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the deployment is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleDeployment(deployment.Deployment, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Deployment %q has failed the non-HPA scale test\", deployment.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q is scalable\", deployment.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func nameInDeploymentSkipList(name, namespace string, list []configuration.SkipScalingTestDeploymentsInfo) bool {", + "\tfor _, l := range list {", + "\t\tif name == l.Name \u0026\u0026 namespace == l.Namespace {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "nameInStatefulSetSkipList", + "qualifiedName": "nameInStatefulSetSkipList", + "exported": false, + "signature": "func(string, string, []configuration.SkipScalingTestStatefulSetsInfo)(bool)", + "doc": "nameInStatefulSetSkipList checks if a StatefulSet should be excluded from scaling tests\n\nThe function iterates over a slice of configuration entries, each containing\na name and namespace pair. If the provided StatefulSet matches any entry in\nthe list, it returns true indicating that the test should skip this object;\notherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:467", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testStatefulSetScaling", + "kind": "function", + "source": [ + "func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, statefulSet := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", statefulSet.ToString())", + "\t\tif scaling.IsManaged(statefulSet.Name, env.Config.ManagedStatefulsets) {", + "\t\t\tif !scaling.CheckOwnerReference(statefulSet.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q scaling failed due to OwnerReferences that are not scalable\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has OwnerReferences that are not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"StatefulSet %q scaling skipped due to scalable OwnerReferences, test will run on te CR scaling\", statefulSet.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip statefulset if it is allowed by config", + "\t\tif nameInStatefulSetSkipList(statefulSet.Name, statefulSet.Namespace, env.Config.SkipScalingTestStatefulSets) {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q is being skipped due to configuration setting\", statefulSet.String())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TeststatefulsetScaling test scaling of statefulset", + "\t\t// This is the entry point for statefulset scaling tests", + "\t\tns, name := statefulSet.Namespace, statefulSet.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"StatefulSet\"); hpa != nil {", + "\t\t\t// if the statefulset is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the statefulset", + "\t\t\tif !scaling.TestScaleHpaStatefulSet(statefulSet.StatefulSet, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %q\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the statefulset is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleStatefulSet(statefulSet.StatefulSet, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %s\", statefulSet.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func nameInStatefulSetSkipList(name, namespace string, list []configuration.SkipScalingTestStatefulSetsInfo) bool {", + "\tfor _, l := range list {", + "\t\tif name == l.Name \u0026\u0026 namespace == l.Namespace {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "testAffinityRequiredPods", + "qualifiedName": "testAffinityRequiredPods", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testAffinityRequiredPods Verifies pod affinity compliance for pods requiring node selector or affinity rules\n\nThe routine iterates over all pods flagged as needing affinity, checks each\npod's affinity configuration, logs the outcome, and records compliant and\nnon‑compliant cases in report objects. It aggregates results and sets them\non the test check to summarize compliance status.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:889", + "calls": [ + { + "name": "GetAffinityRequiredPods", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsAffinityCompliant", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testAffinityRequiredPods(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.GetAffinityRequiredPods() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\t// Check if the pod is Affinity compliant.", + "\t\tresult, err := put.IsAffinityCompliant()", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Pod %q is not Affinity compliant, reason=%v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not Affinity compliant\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is Affinity compliant\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is Affinity compliant\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testCPUIsolation", + "qualifiedName": "testCPUIsolation", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testCPUIsolation Verifies CPU isolation compliance for guaranteed pods\n\nThe function iterates over all pods that request exclusive CPUs, checking\neach pod’s resource requests, limits, runtime class name, and annotations\nto ensure they meet the criteria for CPU isolation. For every pod it logs\nwhether the pod is isolated or not, creating a report object accordingly.\nAfter processing all pods, it sets the test result with lists of compliant\nand non‑compliant objects.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:857", + "calls": [ + { + "name": "GetGuaranteedPodsWithExclusiveCPUs", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsCPUIsolationCompliant", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testCPUIsolation(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Individual requirements we are looking for:", + "\t// - CPU Requests and Limits must be in the form of whole units", + "\t// - Resource Requests and Limits must be provided and identical", + "", + "\t// Additional checks if the above pass", + "\t// - 'runtimeClassName' must be specified", + "\t// - Annotations must be provided disabling CPU and IRQ load-balancing.", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.GetGuaranteedPodsWithExclusiveCPUs() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tif !put.IsCPUIsolationCompliant() {", + "\t\t\tcheck.LogError(\"Pod %q is not CPU isolated\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not CPU isolated\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is CPU isolated\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is CPU isolated\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testContainersImagePolicy", + "qualifiedName": "testContainersImagePolicy", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testContainersImagePolicy Verifies that all containers use IfNotPresent image pull policy\n\nThe function iterates over each container in the test environment, logging\nits name and checking whether its ImagePullPolicy equals PullIfNotPresent.\nContainers not meeting this requirement are recorded as non‑compliant with\nan error log, while compliant ones are logged positively. Finally, it\naggregates the results into two report lists and sets them on the check.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:305", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainersImagePolicy(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.ImagePullPolicy != corev1.PullIfNotPresent {", + "\t\t\tcheck.LogError(\"Container %q is using %q as ImagePullPolicy (compliant containers must use %q)\", cut, cut.ImagePullPolicy, corev1.PullIfNotPresent)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not using IfNotPresent as ImagePullPolicy\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q is using %q as ImagePullPolicy\", cut, cut.ImagePullPolicy)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is using IfNotPresent as ImagePullPolicy\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testContainersLivenessProbe", + "qualifiedName": "testContainersLivenessProbe", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testContainersLivenessProbe Verifies that every container has a liveness probe defined\n\nThe function iterates over all containers in the test environment, logging\nwhether each container includes a liveness probe. Containers lacking this\nprobe are recorded as non‑compliant with an explanatory report object;\nthose that have it are marked compliant. After processing all containers, the\nresults are stored back into the check for reporting.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:350", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainersLivenessProbe(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.LivenessProbe == nil {", + "\t\t\tcheck.LogError(\"Container %q does not have LivenessProbe defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have LivenessProbe defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has LivenessProbe defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has LivenessProbe defined\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testContainersPostStart", + "qualifiedName": "testContainersPostStart", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testContainersPostStart Verifies that each container has a postStart lifecycle hook defined\n\nThe function iterates over all containers in the test environment, logging\ninformation about each one. For containers missing a postStart hook it\nrecords a non‑compliant report object; otherwise it records a compliant\nreport with a note on a known upstream bug. Finally, it sets the check result\nwith the lists of compliant and non‑compliant objects.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:279", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainersPostStart(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\tif cut.Lifecycle == nil || (cut.Lifecycle != nil \u0026\u0026 cut.Lifecycle.PostStart == nil) {", + "\t\t\tcheck.LogError(\"Container %q does not have postStart defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have postStart defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has postStart defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has postStart defined.\"+", + "\t\t\t\t\"Attention: There is a known upstream bug where a pod with a still-running postStart lifecycle hook that is deleted may not be terminated even after \"+", + "\t\t\t\t\"the terminationGracePeriod k8s bug link: kubernetes/kubernetes#116032\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testContainersPreStop", + "qualifiedName": "testContainersPreStop", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testContainersPreStop Verifies that containers declare a preStop lifecycle hook\n\nThe routine iterates over all test environment containers, checking whether\neach has a defined preStop hook. Containers missing the hook are logged as\nerrors and recorded in a non‑compliant list; those with the hook are noted\nas compliant. Finally, the check result aggregates both lists for reporting.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:255", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainersPreStop(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\tif cut.Lifecycle == nil || (cut.Lifecycle != nil \u0026\u0026 cut.Lifecycle.PreStop == nil) {", + "\t\t\tcheck.LogError(\"Container %q does not have preStop defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have preStop defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has preStop defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has preStop defined\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testContainersReadinessProbe", + "qualifiedName": "testContainersReadinessProbe", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testContainersReadinessProbe Verifies that each container has a readiness probe\n\nThe routine iterates over all containers in the test environment, logging\nwhether each one defines a readiness probe. Containers lacking this\nconfiguration are recorded as non‑compliant, while those with a probe are\nmarked compliant. Finally, the check results are set for reporting.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:327", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainersReadinessProbe(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.ReadinessProbe == nil {", + "\t\t\tcheck.LogError(\"Container %q does not have ReadinessProbe defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have ReadinessProbe defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has ReadinessProbe defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has ReadinessProbe defined\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testContainersStartupProbe", + "qualifiedName": "testContainersStartupProbe", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testContainersStartupProbe verifies that each container has a StartupProbe configured\n\nThe function walks through all containers in the test environment, logging\ninformation about each one. If a container lacks a StartupProbe it logs an\nerror and records a non‑compliant report object; otherwise it logs success\nand records a compliant object. Finally, it sets the check result with the\nlists of compliant and non‑compliant objects.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:373", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainersStartupProbe(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.StartupProbe == nil {", + "\t\t\tcheck.LogError(\"Container %q does not have StartupProbe defined\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not have StartupProbe defined\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has StartupProbe defined\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has StartupProbe defined\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testDeploymentScaling", + "qualifiedName": "testDeploymentScaling", + "exported": false, + "signature": "func(*provider.TestEnvironment, time.Duration, *checksdb.Check)()", + "doc": "testDeploymentScaling Verifies deployment scalability via HPA or direct scaling\n\nIt iterates through all deployments in the test environment, skipping those\nmanaged by CRDs or listed in a configuration skip list. For each remaining\ndeployment, it checks if an associated horizontal pod autoscaler exists; if\nso, it runs the HPA scaling test, otherwise it performs a direct scale test.\nResults are logged and collected into compliant or non‑compliant report\nobjects for later reporting.\n\nnolint:dupl", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:486", + "calls": [ + { + "name": "SetNeedsRefresh", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "IsManaged", + "kind": "function", + "source": [ + "func IsManaged(podSetName string, managedPodSet []configuration.ManagedDeploymentsStatefulsets) bool {", + "\tfor _, ps := range managedPodSet {", + "\t\tif ps.Name == podSetName {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "CheckOwnerReference", + "kind": "function", + "source": [ + "func CheckOwnerReference(ownerReference []apiv1.OwnerReference, crdFilter []configuration.CrdFilter, crds []*apiextv1.CustomResourceDefinition) bool {", + "\tfor _, owner := range ownerReference {", + "\t\tfor _, aCrd := range crds {", + "\t\t\tif aCrd.Spec.Names.Kind == owner.Kind {", + "\t\t\t\tfor _, crdF := range crdFilter {", + "\t\t\t\t\tif strings.HasSuffix(aCrd.Name, crdF.NameSuffix) {", + "\t\t\t\t\t\treturn crdF.Scalable", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "GetOwnerReferences", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewDeploymentReportObject", + "kind": "function", + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "nameInDeploymentSkipList", + "kind": "function", + "source": [ + "func nameInDeploymentSkipList(name, namespace string, list []configuration.SkipScalingTestDeploymentsInfo) bool {", + "\tfor _, l := range list {", + "\t\tif name == l.Name \u0026\u0026 namespace == l.Namespace {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "GetResourceHPA", + "kind": "function", + "source": [ + "func GetResourceHPA(hpaList []*scalingv1.HorizontalPodAutoscaler, name, namespace, kind string) *scalingv1.HorizontalPodAutoscaler {", + "\tfor _, hpa := range hpaList {", + "\t\tif hpa.Spec.ScaleTargetRef.Kind == kind \u0026\u0026 hpa.Spec.ScaleTargetRef.Name == name \u0026\u0026 hpa.Namespace == namespace {", + "\t\t\treturn hpa", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleHpaDeployment", + "kind": "function", + "source": [ + "func TestScaleHpaDeployment(deployment *provider.Deployment, hpa *v1autoscaling.HorizontalPodAutoscaler, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\thpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(deployment.Namespace)", + "\tvar min int32", + "\tif hpa.Spec.MinReplicas != nil {", + "\t\tmin = *hpa.Spec.MinReplicas", + "\t} else {", + "\t\tmin = 1", + "\t}", + "\treplicas := int32(1)", + "\tif deployment.Spec.Replicas != nil {", + "\t\treplicas = *deployment.Spec.Replicas", + "\t}", + "\tmax := hpa.Spec.MaxReplicas", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, min, max, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\t// back the min and the max value of the hpa", + "\tlogger.Debug(\"Back HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, min, max)", + "\treturn scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, min, max, timeout, logger)", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewDeploymentReportObject", + "kind": "function", + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleDeployment", + "kind": "function", + "source": [ + "func TestScaleDeployment(deployment *appsv1.Deployment, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\tlogger.Info(\"Deployment not using HPA: %s:%s\", deployment.Namespace, deployment.Name)", + "\tvar replicas int32", + "\tif deployment.Spec.Replicas != nil {", + "\t\treplicas = *deployment.Spec.Replicas", + "\t} else {", + "\t\treplicas = 1", + "\t}", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, true, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, false, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, false, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t} // scale up", + "\t\treplicas++", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, true, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewDeploymentReportObject", + "kind": "function", + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewDeploymentReportObject", + "kind": "function", + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, deployment := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", deployment.ToString())", + "\t\tif scaling.IsManaged(deployment.Name, env.Config.ManagedDeployments) {", + "\t\t\tif !scaling.CheckOwnerReference(deployment.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"Deployment %q scaling failed due to OwnerReferences that are not scalable\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Deployment %q scaling skipped due to scalable OwnerReferences, test will run on the CR scaling\", deployment.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip deployment if it is allowed by config", + "\t\tif nameInDeploymentSkipList(deployment.Name, deployment.Namespace, env.Config.SkipScalingTestDeployments) {", + "\t\t\tcheck.LogInfo(\"Deployment %q is being skipped due to configuration setting\", deployment.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TestDeploymentScaling test scaling of deployment", + "\t\t// This is the entry point for deployment scaling tests", + "\t\tns, name := deployment.Namespace, deployment.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"Deployment\"); hpa != nil {", + "\t\t\t// if the deployment is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the deployment", + "\t\t\tif !scaling.TestScaleHpaDeployment(deployment, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"Deployment %q has failed the HPA scale test\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the HPA scale test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the deployment is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleDeployment(deployment.Deployment, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Deployment %q has failed the non-HPA scale test\", deployment.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q is scalable\", deployment.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testHighAvailability", + "qualifiedName": "testHighAvailability", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testHighAvailability Verifies high availability settings for deployments and statefulsets\n\nThe function iterates over all deployments and statefulsets in the test\nenvironment, checking that each has more than one replica and defines pod\nanti‑affinity rules unless an \"AffinityRequired\" label is present. It logs\ninformative messages for compliant objects and error messages for\nnon‑compliant ones, creating report entries accordingly. Finally it sets\nthe check result with lists of compliant and non‑compliant objects.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:632", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewDeploymentReportObject", + "kind": "function", + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewDeploymentReportObject", + "kind": "function", + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewDeploymentReportObject", + "kind": "function", + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewStatefulSetReportObject", + "kind": "function", + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewStatefulSetReportObject", + "kind": "function", + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewStatefulSetReportObject", + "kind": "function", + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testHighAvailability(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, dp := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", dp.ToString())", + "\t\tif dp.Spec.Replicas == nil || *(dp.Spec.Replicas) \u003c= 1 {", + "\t\t\tcheck.LogError(\"Deployment %q found without valid high availability (number of replicas must be greater than 1)\", dp.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, \"Deployment found without valid high availability\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Skip any AffinityRequired pods", + "\t\t//nolint:goconst", + "\t\tif dp.Spec.Template.Labels[\"AffinityRequired\"] == \"true\" {", + "\t\t\tcheck.LogInfo(\"Skipping Deployment %q with affinity required\", dp.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif dp.Spec.Template.Spec.Affinity == nil ||", + "\t\t\tdp.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {", + "\t\t\tcheck.LogError(\"Deployment %q found without valid high availability (PodAntiAffinity must be defined)\", dp.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, \"Deployment found without valid high availability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q has valid high availability\", dp.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dp.Namespace, dp.Name, \"Deployment has valid high availability\", true))", + "\t\t}", + "\t}", + "\tfor _, st := range env.StatefulSets {", + "\t\tif st.Spec.Replicas == nil || *(st.Spec.Replicas) \u003c= 1 {", + "\t\t\tcheck.LogError(\"StatefulSet %q found without valid high availability (number of replicas must be greater than 1)\", st.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, \"StatefulSet found without valid high availability\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Skip any AffinityRequired pods", + "\t\tif st.Spec.Template.Labels[\"AffinityRequired\"] == \"true\" {", + "\t\t\tcheck.LogInfo(\"Skipping StatefulSet %q with affinity required\", st.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif st.Spec.Template.Spec.Affinity == nil ||", + "\t\t\tst.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {", + "\t\t\tcheck.LogError(\"StatefulSet %q found without valid high availability (PodAntiAffinity must be defined)\", st.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, \"StatefulSet found without valid high availability\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q has valid high availability\", st.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(st.Namespace, st.Name, \"StatefulSet has valid high availability\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodNodeSelectorAndAffinityBestPractices", + "qualifiedName": "testPodNodeSelectorAndAffinityBestPractices", + "exported": false, + "signature": "func([]*provider.Pod, *checksdb.Check)()", + "doc": "testPodNodeSelectorAndAffinityBestPractices Checks that pods do not use node selectors or affinity\n\nThe routine iterates over a list of pods, logging each one. It flags any pod\nthat specifies a node selector or node affinity as non‑compliant, creating\nreport objects for those cases. Pods lacking both fields are marked compliant\nand reported accordingly. Finally, it records the results in the supplied\ncheck object.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:421", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "HasNodeSelector", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodNodeSelectorAndAffinityBestPractices(testPods []*provider.Pod, check *checksdb.Check) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range testPods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tcompliantPod := true", + "\t\tif put.HasNodeSelector() {", + "\t\t\tcheck.LogError(\"Pod %q has a node selector. Node selector: %v\", put, put.Spec.NodeSelector)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has node selector\", false))", + "\t\t\tcompliantPod = false", + "\t\t}", + "\t\tif put.Spec.Affinity != nil \u0026\u0026 put.Spec.Affinity.NodeAffinity != nil {", + "\t\t\tcheck.LogError(\"Pod %q has a node affinity clause. Node affinity: %v\", put, put.Spec.Affinity.NodeAffinity)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has node affinity\", false))", + "\t\t\tcompliantPod = false", + "\t\t}", + "", + "\t\tif compliantPod {", + "\t\t\tcheck.LogInfo(\"Pod %q has no node selector or affinity\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has no node selector or affinity\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodPersistentVolumeReclaimPolicy", + "qualifiedName": "testPodPersistentVolumeReclaimPolicy", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodPersistentVolumeReclaimPolicy Verifies that all pod volumes use persistent volumes with a DELETE reclaim policy\n\nThe function iterates over each pod in the test environment, examining every\nvolume attached to the pod. For volumes backed by a PersistentVolumeClaim, it\nchecks whether the corresponding PersistentVolume has a reclaim policy of\nDelete; non‑compliant cases are recorded with detailed fields. Finally,\ncompliant and non‑compliant results are aggregated into the check’s\nreport.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:813", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/volumes", + "name": "IsPodVolumeReclaimPolicyDelete", + "kind": "function", + "source": [ + "func IsPodVolumeReclaimPolicyDelete(vol *corev1.Volume, pvs []corev1.PersistentVolume, pvcs []corev1.PersistentVolumeClaim) bool {", + "\t// Check if the Volume is bound to a PVC.", + "\tif putPVC := getPVCFromSlice(pvcs, vol.PersistentVolumeClaim.ClaimName); putPVC != nil {", + "\t\t// Loop through the PersistentVolumes in the cluster, looking for bound PV/PVCs.", + "\t\tfor pvIndex := range pvs {", + "\t\t\t// Check to make sure its reclaim policy is DELETE.", + "\t\t\tif putPVC.Spec.VolumeName == pvs[pvIndex].Name \u0026\u0026 pvs[pvIndex].Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimDelete {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodPersistentVolumeReclaimPolicy(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Look through all of the pods, matching their persistent volumes to the list of overall cluster PVs and checking their reclaim status.", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tcompliantPod := true", + "\t\t// Loop through all of the volumes attached to the pod.", + "\t\tfor pvIndex := range put.Spec.Volumes {", + "\t\t\t// Skip any volumes that do not have a PVC. No need to test them.", + "\t\t\tif put.Spec.Volumes[pvIndex].PersistentVolumeClaim == nil {", + "\t\t\t\tcheck.LogInfo(\"Pod %q does not have a PVC\", put)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// If the Pod Volume is not tied back to a PVC and corresponding PV that has a reclaim policy of DELETE.", + "\t\t\tif !volumes.IsPodVolumeReclaimPolicyDelete(\u0026put.Spec.Volumes[pvIndex], env.PersistentVolumes, env.PersistentVolumeClaims) {", + "\t\t\t\tcheck.LogError(\"Pod %q with volume %q has been found without a reclaim policy of DELETE.\", put, put.Spec.Volumes[pvIndex].Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod contains volume without a reclaim policy of DELETE\", false).", + "\t\t\t\t\tAddField(testhelper.PersistentVolumeName, put.Spec.Volumes[pvIndex].Name).", + "\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\tcompliantPod = false", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif compliantPod {", + "\t\t\tcheck.LogInfo(\"Pod %q complies with volume reclaim policy rules\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod complies with volume reclaim policy rules\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodTolerationBypass", + "qualifiedName": "testPodTolerationBypass", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodTolerationBypass Verifies that pod tolerations remain default\n\nThe routine iterates over each pod in the test environment, checking every\ntoleration against the Kubernetes default set and whether it has been altered\nfor the pod's QoS class. If a non‑default or modified toleration is found,\nit records the pod as non‑compliant and logs an error; otherwise it marks\nthe pod compliant. Finally, the check aggregates all compliant and\nnon‑compliant reports into the test result.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:915", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/tolerations", + "name": "IsTolerationModified", + "kind": "function", + "source": [ + "func IsTolerationModified(t corev1.Toleration, qosClass corev1.PodQOSClass) bool {", + "\tconst (", + "\t\tnotReadyStr = \"node.kubernetes.io/not-ready\"", + "\t\tunreachableStr = \"node.kubernetes.io/unreachable\"", + "\t\tmemoryPressureStr = \"node.kubernetes.io/memory-pressure\"", + "\t)", + "\t// Check each of the tolerations to make sure they are the default tolerations added by k8s:", + "\t// tolerations:", + "\t// - effect: NoExecute", + "\t// key: node.kubernetes.io/not-ready", + "\t// operator: Exists", + "\t// tolerationSeconds: 300", + "\t// - effect: NoExecute", + "\t// key: node.kubernetes.io/unreachable", + "\t// operator: Exists", + "\t// tolerationSeconds: 300", + "\t// # this last one, only if QoS class for the pod is different than BestEffort", + "\t// - effect: NoSchedule", + "\t// key: node.kubernetes.io/memory-pressure", + "\t// operator: Exists", + "", + "\t// Short circuit. Anything that is not 'node.kubernetes.io' is considered a modified toleration immediately.", + "\tif !IsTolerationDefault(t) {", + "\t\treturn true", + "\t}", + "", + "\tswitch t.Effect {", + "\tcase corev1.TaintEffectNoExecute:", + "\t\tif t.Key == notReadyStr || t.Key == unreachableStr {", + "\t\t\t// 300 seconds is the default, return false for not modified", + "\t\t\tif t.Operator == corev1.TolerationOpExists \u0026\u0026 t.TolerationSeconds != nil \u0026\u0026 *t.TolerationSeconds == int64(tolerationSecondsDefault) {", + "\t\t\t\treturn false", + "\t\t\t}", + "", + "\t\t\t// Toleration seconds has been modified, return true.", + "\t\t\treturn true", + "\t\t}", + "\tcase corev1.TaintEffectNoSchedule:", + "\t\t// If toleration is NoSchedule - node.kubernetes.io/memory-pressure - Exists and the QoS class for", + "\t\t// the pod is different than BestEffort, it is also a default toleration added by k8s", + "\t\tif (t.Key == memoryPressureStr) \u0026\u0026", + "\t\t\t(t.Operator == corev1.TolerationOpExists) \u0026\u0026", + "\t\t\t(qosClass != corev1.PodQOSBestEffort) {", + "\t\t\treturn false", + "\t\t}", + "\tcase corev1.TaintEffectPreferNoSchedule:", + "\t\t// PreferNoSchedule is not a default toleration added by k8s", + "\t\treturn true", + "\t}", + "", + "\t// Check through the list of non-compliant tolerations to see if anything snuck by the above short circuit", + "\tfor _, nct := range nonCompliantTolerations {", + "\t\tif t.Effect == nct {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "string", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodTolerationBypass(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tpodIsCompliant := true", + "\t\tfor _, t := range put.Spec.Tolerations {", + "\t\t\t// Check if the tolerations fall outside the 'default' and are modified versions", + "\t\t\t// Take also into account the qosClass applied to the pod", + "\t\t\tif tolerations.IsTolerationModified(t, put.Status.QOSClass) {", + "\t\t\t\tcheck.LogError(\"Pod %q has been found with non-default toleration %s/%s which is not allowed.\", put, t.Key, t.Effect)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has non-default toleration\", false).", + "\t\t\t\t\tAddField(testhelper.TolerationKey, t.Key).", + "\t\t\t\t\tAddField(testhelper.TolerationEffect, string(t.Effect)))", + "\t\t\t\tpodIsCompliant = false", + "\t\t\t}", + "\t\t}", + "", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogInfo(\"Pod %q has default toleration\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has default toleration\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodsOwnerReference", + "qualifiedName": "testPodsOwnerReference", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodsOwnerReference Verifies that each pod’s owner reference follows best‑practice rules\n\nThe function iterates over all pods in the test environment, creating an\nOwnerReference object for each one. It runs a compliance check on the\nreference; non‑compliant pods are logged and recorded as failures, while\ncompliant ones are noted as successes. Finally, it records the results with\nthe test framework.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:396", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/ownerreference", + "name": "NewOwnerReference", + "kind": "function", + "source": [ + "func NewOwnerReference(put *corev1.Pod) *OwnerReference {", + "\to := OwnerReference{", + "\t\tput: put,", + "\t\tresult: testhelper.ERROR,", + "\t}", + "\treturn \u0026o", + "}" + ] + }, + { + "name": "RunTest", + "kind": "function" + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "GetResults", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodsOwnerReference(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\to := ownerreference.NewOwnerReference(put.Pod)", + "\t\to.RunTest(check.GetLogger())", + "\t\tif o.GetResults() != testhelper.SUCCESS {", + "\t\t\tcheck.LogError(\"Pod %q found with non-compliant owner reference\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has non-compliant owner reference\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has compliant owner reference\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has compliant owner reference\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodsRecreation", + "qualifiedName": "testPodsRecreation", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodsRecreation Verifies pods in deployments and statefulsets are recreated after node loss\n\nThe function drains each node used by pod sets, ensuring that pods belonging\nto deployments or statefulsets are rescheduled and become ready again. It\nfirst confirms all pod sets are initially ready, then iterates over nodes,\ncordoning them, counting affected pods, performing a safe drain, and finally\nuncordoning the node. Any failure in readiness or draining results in\nnon‑compliant reports; if all succeed, compliant objects are recorded.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:693", + "calls": [ + { + "name": "LogDebug", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/postmortem", + "name": "Log", + "kind": "function", + "source": [ + "func Log() (out string) {", + "\t// Get current environment", + "\tenv := provider.GetTestEnvironment()", + "", + "\t// Set refresh", + "\tenv.SetNeedsRefresh()", + "", + "\t// Get up-to-date environment", + "\tenv = provider.GetTestEnvironment()", + "", + "\tout += \"\\nNode Status:\\n\"", + "\tfor _, n := range env.Nodes {", + "\t\tout += fmt.Sprintf(\"node name=%s taints=%+v\", n.Data.Name, n.Data.Spec.Taints) + \"\\n\"", + "\t}", + "\tout += \"\\nPending Pods:\\n\"", + "\tfor _, p := range env.AllPods {", + "\t\tif p.Status.Phase != corev1.PodSucceeded \u0026\u0026 p.Status.Phase != corev1.PodRunning {", + "\t\t\tout += p.String() + \"\\n\"", + "\t\t}", + "\t}", + "\tout += \"\\nAbnormal events:\\n\"", + "\tfor _, e := range env.AbnormalEvents {", + "\t\tout += e.String() + \"\\n\"", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "SetNeedsRefresh", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Duration", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForAllPodSetsReady", + "kind": "function", + "source": [ + "func WaitForAllPodSetsReady(env *provider.TestEnvironment, timeout time.Duration, logger *log.Logger) (", + "\tnotReadyDeployments []*provider.Deployment,", + "\tnotReadyStatefulSets []*provider.StatefulSet) {", + "\tconst queryInterval = 15 * time.Second", + "", + "\tdeploymentsToCheck := env.Deployments", + "\tstatefulSetsToCheck := env.StatefulSets", + "", + "\tlogger.Info(\"Waiting %s for %d podsets to be ready.\", timeout, len(deploymentsToCheck)+len(statefulSetsToCheck))", + "\tfor startTime := time.Now(); time.Since(startTime) \u003c timeout; {", + "\t\tlogger.Info(\"Checking Deployments readiness of Deployments %v\", getDeploymentsInfo(deploymentsToCheck))", + "\t\tnotReadyDeployments = getNotReadyDeployments(deploymentsToCheck)", + "", + "\t\tlogger.Info(\"Checking StatefulSets readiness of StatefulSets %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "\t\tnotReadyStatefulSets = getNotReadyStatefulSets(statefulSetsToCheck)", + "", + "\t\tlogger.Info(\"Not ready Deployments: %v\", getDeploymentsInfo(notReadyDeployments))", + "\t\tlogger.Info(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(notReadyStatefulSets))", + "", + "\t\tdeploymentsToCheck = notReadyDeployments", + "\t\tstatefulSetsToCheck = notReadyStatefulSets", + "", + "\t\tif len(deploymentsToCheck) == 0 \u0026\u0026 len(statefulSetsToCheck) == 0 {", + "\t\t\t// No more podsets to check.", + "\t\t\tbreak", + "\t\t}", + "", + "\t\ttime.Sleep(queryInterval)", + "\t}", + "", + "\t// Here, either we reached the timeout or there's no more not-ready deployments or statefulsets.", + "\tlogger.Error(\"Not ready Deployments: %v\", getDeploymentsInfo(deploymentsToCheck))", + "\tlogger.Error(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "", + "\treturn deploymentsToCheck, statefulSetsToCheck", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewDeploymentReportObject", + "kind": "function", + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewStatefulSetReportObject", + "kind": "function", + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + }, + { + "name": "IsRuntimeClassNameSpecified", + "kind": "function" + }, + { + "name": "HasNodeSelector", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "GetAllNodesForAllPodSets", + "kind": "function", + "source": [ + "func GetAllNodesForAllPodSets(pods []*provider.Pod) (nodes map[string]bool) {", + "\tnodes = make(map[string]bool)", + "\tfor _, put := range pods {", + "\t\tfor _, or := range put.OwnerReferences {", + "\t\t\tif or.Kind != ReplicaSetString \u0026\u0026 or.Kind != StatefulsetString {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tnodes[put.Spec.NodeName] = true", + "\t\t\tbreak", + "\t\t}", + "\t}", + "\treturn nodes", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "CordonCleanup", + "kind": "function", + "source": [ + "func CordonCleanup(node string, check *checksdb.Check) {", + "\terr := CordonHelper(node, Uncordon)", + "\tif err != nil {", + "\t\tcheck.Abort(fmt.Sprintf(\"cleanup: error uncordoning the node: %s, err=%s\", node, err))", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "CordonHelper", + "kind": "function", + "source": [ + "func CordonHelper(name, operation string) error {", + "\tclients := clientsholder.GetClientsHolder()", + "", + "\tlog.Info(\"Performing %s operation on node %s\", operation, name)", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Fetch node object", + "\t\tnode, err := clients.K8sClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t\tswitch operation {", + "\t\tcase Cordon:", + "\t\t\tnode.Spec.Unschedulable = true", + "\t\tcase Uncordon:", + "\t\t\tnode.Spec.Unschedulable = false", + "\t\tdefault:", + "\t\t\treturn fmt.Errorf(\"cordonHelper: Unsupported operation:%s\", operation)", + "\t\t}", + "\t\t// Update the node", + "\t\t_, err = clients.K8sClient.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})", + "\t\treturn err", + "\t})", + "\tif retryErr != nil {", + "\t\tlog.Error(\"can not %s node: %s, err=%v\", operation, name, retryErr)", + "\t}", + "\treturn retryErr", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "CountPodsWithDelete", + "kind": "function", + "source": [ + "func CountPodsWithDelete(pods []*provider.Pod, nodeName, mode string) (count int, err error) {", + "\tcount = 0", + "\tvar wg sync.WaitGroup", + "\tfor _, put := range pods {", + "\t\t_, isDeployment := put.Labels[\"pod-template-hash\"]", + "\t\t_, isStatefulset := put.Labels[\"controller-revision-hash\"]", + "\t\tif put.Spec.NodeName == nodeName \u0026\u0026", + "\t\t\t(isDeployment || isStatefulset) {", + "\t\t\tif skipDaemonPod(put.Pod) {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tcount++", + "\t\t\tif mode == NoDelete {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\terr := deletePod(put.Pod, mode, \u0026wg)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error deleting %s\", put)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\twg.Wait()", + "\treturn count, nil", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "time", + "name": "Duration", + "kind": "function" + }, + { + "name": "LogDebug", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "CountPodsWithDelete", + "kind": "function", + "source": [ + "func CountPodsWithDelete(pods []*provider.Pod, nodeName, mode string) (count int, err error) {", + "\tcount = 0", + "\tvar wg sync.WaitGroup", + "\tfor _, put := range pods {", + "\t\t_, isDeployment := put.Labels[\"pod-template-hash\"]", + "\t\t_, isStatefulset := put.Labels[\"controller-revision-hash\"]", + "\t\tif put.Spec.NodeName == nodeName \u0026\u0026", + "\t\t\t(isDeployment || isStatefulset) {", + "\t\t\tif skipDaemonPod(put.Pod) {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tcount++", + "\t\t\tif mode == NoDelete {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\terr := deletePod(put.Pod, mode, \u0026wg)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error deleting %s\", put)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\twg.Wait()", + "\treturn count, nil", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForAllPodSetsReady", + "kind": "function", + "source": [ + "func WaitForAllPodSetsReady(env *provider.TestEnvironment, timeout time.Duration, logger *log.Logger) (", + "\tnotReadyDeployments []*provider.Deployment,", + "\tnotReadyStatefulSets []*provider.StatefulSet) {", + "\tconst queryInterval = 15 * time.Second", + "", + "\tdeploymentsToCheck := env.Deployments", + "\tstatefulSetsToCheck := env.StatefulSets", + "", + "\tlogger.Info(\"Waiting %s for %d podsets to be ready.\", timeout, len(deploymentsToCheck)+len(statefulSetsToCheck))", + "\tfor startTime := time.Now(); time.Since(startTime) \u003c timeout; {", + "\t\tlogger.Info(\"Checking Deployments readiness of Deployments %v\", getDeploymentsInfo(deploymentsToCheck))", + "\t\tnotReadyDeployments = getNotReadyDeployments(deploymentsToCheck)", + "", + "\t\tlogger.Info(\"Checking StatefulSets readiness of StatefulSets %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "\t\tnotReadyStatefulSets = getNotReadyStatefulSets(statefulSetsToCheck)", + "", + "\t\tlogger.Info(\"Not ready Deployments: %v\", getDeploymentsInfo(notReadyDeployments))", + "\t\tlogger.Info(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(notReadyStatefulSets))", + "", + "\t\tdeploymentsToCheck = notReadyDeployments", + "\t\tstatefulSetsToCheck = notReadyStatefulSets", + "", + "\t\tif len(deploymentsToCheck) == 0 \u0026\u0026 len(statefulSetsToCheck) == 0 {", + "\t\t\t// No more podsets to check.", + "\t\t\tbreak", + "\t\t}", + "", + "\t\ttime.Sleep(queryInterval)", + "\t}", + "", + "\t// Here, either we reached the timeout or there's no more not-ready deployments or statefulsets.", + "\tlogger.Error(\"Not ready Deployments: %v\", getDeploymentsInfo(deploymentsToCheck))", + "\tlogger.Error(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "", + "\treturn deploymentsToCheck, statefulSetsToCheck", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewDeploymentReportObject", + "kind": "function", + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewStatefulSetReportObject", + "kind": "function", + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "CordonHelper", + "kind": "function", + "source": [ + "func CordonHelper(name, operation string) error {", + "\tclients := clientsholder.GetClientsHolder()", + "", + "\tlog.Info(\"Performing %s operation on node %s\", operation, name)", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Fetch node object", + "\t\tnode, err := clients.K8sClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t\tswitch operation {", + "\t\tcase Cordon:", + "\t\t\tnode.Spec.Unschedulable = true", + "\t\tcase Uncordon:", + "\t\t\tnode.Spec.Unschedulable = false", + "\t\tdefault:", + "\t\t\treturn fmt.Errorf(\"cordonHelper: Unsupported operation:%s\", operation)", + "\t\t}", + "\t\t// Update the node", + "\t\t_, err = clients.K8sClient.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})", + "\t\treturn err", + "\t})", + "\tif retryErr != nil {", + "\t\tlog.Error(\"can not %s node: %s, err=%v\", operation, name, retryErr)", + "\t}", + "\treturn retryErr", + "}" + ] + }, + { + "name": "LogFatal", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewDeploymentReportObject", + "kind": "function", + "source": [ + "func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, DeploymentType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(DeploymentName, aDeploymentName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewStatefulSetReportObject", + "kind": "function", + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tneedsPostMortemInfo := true", + "\tdefer func() {", + "\t\tif needsPostMortemInfo {", + "\t\t\tcheck.LogDebug(\"%s\", postmortem.Log())", + "\t\t}", + "\t\t// Since we are possible exiting early, we need to make sure we set the result at the end of the function.", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}()", + "\tcheck.LogInfo(\"Testing node draining effect of deployment\")", + "\tcheck.LogInfo(\"Testing initial state for deployments\")", + "\tdefer env.SetNeedsRefresh()", + "", + "\t// Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check.", + "\t// timeout = k-mins + (1min * (num-deployments + num-statefulsets))", + "\tallPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets))", + "\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout, check.GetLogger())", + "\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\tfor _, dep := range notReadyDeployments {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment was not ready before draining any node.\", false))", + "\t\t}", + "\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset was not ready before draining any node.\", false))", + "\t\t}", + "\t\treturn", + "\t}", + "", + "\t// Filter out pods with Node Assignments present and FAIL them.", + "\t// We run into problems with this test when there are nodeSelectors assigned affecting where", + "\t// pods are scheduled. Also, they are not allowed in general, see the node-selector test case.", + "\t// Skip the safeguard for any pods that are using a runtimeClassName. This is potentially", + "\t// because pods that are derived from a performance profile might have a built-in nodeSelector.", + "\tvar podsWithNodeAssignment []*provider.Pod", + "\tfor _, put := range env.Pods {", + "\t\tif !put.IsRuntimeClassNameSpecified() \u0026\u0026 put.HasNodeSelector() {", + "\t\t\tpodsWithNodeAssignment = append(podsWithNodeAssignment, put)", + "\t\t\tcheck.LogError(\"Pod %q has been found with node selector(s): %v\", put, put.Spec.NodeSelector)", + "\t\t}", + "\t}", + "\tif len(podsWithNodeAssignment) \u003e 0 {", + "\t\tcheck.LogError(\"Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v\", podsWithNodeAssignment)", + "\t\tfor _, pod := range podsWithNodeAssignment {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has node assignment.\", false))", + "\t\t}", + "", + "\t\treturn", + "\t}", + "", + "\tfor nodeName := range podsets.GetAllNodesForAllPodSets(env.Pods) {", + "\t\tdefer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node", + "\t\terr := podrecreation.CordonHelper(nodeName, podrecreation.Cordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Error cordoning the node: %s\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node cordoning failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tcheck.LogInfo(\"Draining and Cordoning node %s: \", nodeName)", + "\t\tcount, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Getting pods list to drain failed, err=%v\", err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Getting pods list to drain failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tnodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count)", + "\t\tcheck.LogDebug(\"Draining node: %s with timeout: %s\", nodeName, nodeTimeout)", + "\t\t_, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Draining node %q failed, err=%v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Draining node failed\", false))", + "\t\t\treturn", + "\t\t}", + "", + "\t\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout, check.GetLogger())", + "\t\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\t\tfor _, dep := range notReadyDeployments {", + "\t\t\t\tcheck.LogError(\"Deployment %q not ready after draining node %q\", dep.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q not ready after draining node %q\", sts.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\treturn", + "\t\t}", + "", + "\t\terr = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogFatal(\"Error uncordoning the node: %s\", nodeName)", + "\t\t}", + "\t}", + "", + "\t// If everything went well for all nodes, the nonCompliantObjects should be empty. We need to", + "\t// manually add all the deps/sts into the compliant object lists so the check is marked as skipped.", + "\t// ToDo: Improve this.", + "\tif len(nonCompliantObjects) == 0 {", + "\t\tfor _, dep := range env.Deployments {", + "\t\t\tcheck.LogInfo(\"Deployment's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "", + "\t\tfor _, sts := range env.StatefulSets {", + "\t\t\tcheck.LogInfo(\"Statefulset's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "\t}", + "", + "\tneedsPostMortemInfo = false", + "}" + ] + }, + { + "name": "testScaleCrd", + "qualifiedName": "testScaleCrd", + "exported": false, + "signature": "func(*provider.TestEnvironment, time.Duration, *checksdb.Check)()", + "doc": "testScaleCrd Evaluates scaling behavior of custom resources\n\nThis function iterates over all custom resources scheduled for testing,\nchecks if an HPA exists for each, and runs the appropriate scaling test. It\nrecords compliant or non‑compliant results in report objects and logs\nerrors when scaling fails. Finally, it stores the outcome in the check\nresult.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:541", + "calls": [ + { + "name": "SetNeedsRefresh", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "GetResourceHPA", + "kind": "function", + "source": [ + "func GetResourceHPA(hpaList []*scalingv1.HorizontalPodAutoscaler, name, namespace, kind string) *scalingv1.HorizontalPodAutoscaler {", + "\tfor _, hpa := range hpaList {", + "\t\tif hpa.Spec.ScaleTargetRef.Kind == kind \u0026\u0026 hpa.Spec.ScaleTargetRef.Name == name \u0026\u0026 hpa.Namespace == namespace {", + "\t\t\treturn hpa", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleHPACrd", + "kind": "function", + "source": [ + "func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscaler, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool {", + "\tif cr == nil {", + "\t\tlogger.Error(\"CR object is nill\")", + "\t\treturn false", + "\t}", + "\tclients := clientsholder.GetClientsHolder()", + "\tnamespace := cr.GetNamespace()", + "", + "\thpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(namespace)", + "\tmin := int32(1)", + "\tif hpa.Spec.MinReplicas != nil {", + "\t\tmin = *hpa.Spec.MinReplicas", + "\t}", + "\treplicas := cr.Spec.Replicas", + "\tname := cr.GetName()", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, hpa.Spec.MaxReplicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\t// back the min and the max value of the hpa", + "\tlogger.Debug(\"Back HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, min, hpa.Spec.MaxReplicas)", + "\treturn scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, hpa.Spec.MaxReplicas, timeout, groupResourceSchema, logger)", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "GetName", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCrdReportObject", + "kind": "function", + "source": [ + "func NewCrdReportObject(aName, aVersion, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CustomResourceDefinitionType, isCompliant)", + "\tout.AddField(CustomResourceDefinitionName, aName)", + "\tout.AddField(CustomResourceDefinitionVersion, aVersion)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleCrd", + "kind": "function", + "source": [ + "func TestScaleCrd(crScale *provider.CrScale, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool {", + "\tif crScale == nil {", + "\t\tlogger.Error(\"CR object is nill\")", + "\t\treturn false", + "\t}", + "\tclients := clientsholder.GetClientsHolder()", + "\treplicas := crScale.Spec.Replicas", + "\tname := crScale.GetName()", + "\tnamespace := crScale.GetNamespace()", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t} // scale up", + "\t\treplicas++", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "GetName", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCrdReportObject", + "kind": "function", + "source": [ + "func NewCrdReportObject(aName, aVersion, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CustomResourceDefinitionType, isCompliant)", + "\tout.AddField(CustomResourceDefinitionName, aName)", + "\tout.AddField(CustomResourceDefinitionVersion, aVersion)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCrdReportObject", + "kind": "function", + "source": [ + "func NewCrdReportObject(aName, aVersion, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CustomResourceDefinitionType, isCompliant)", + "\tout.AddField(CustomResourceDefinitionName, aName)", + "\tout.AddField(CustomResourceDefinitionVersion, aVersion)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.ScaleCrUnderTest {", + "\t\tgroupResourceSchema := env.ScaleCrUnderTest[i].GroupResourceSchema", + "\t\tscaleCr := env.ScaleCrUnderTest[i].Scale", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, scaleCr.Name, scaleCr.Namespace, scaleCr.Kind); hpa != nil {", + "\t\t\tif !scaling.TestScaleHPACrd(\u0026scaleCr, hpa, groupResourceSchema, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"CR has failed the scaling test: %s\", scaleCr.GetName())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"cr has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\tif !scaling.TestScaleCrd(\u0026scaleCr, groupResourceSchema, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"CR has failed the non-HPA scale test: %s\", scaleCr.GetName())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"CR has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"CR is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"CR is scalable\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testStatefulSetScaling", + "qualifiedName": "testStatefulSetScaling", + "exported": false, + "signature": "func(*provider.TestEnvironment, time.Duration, *checksdb.Check)()", + "doc": "testStatefulSetScaling Verifies scaling behavior of StatefulSets\n\nThis routine iterates over all StatefulSet resources in the test environment,\nskipping those that are managed by CRDs or configured to be excluded from\nscaling tests. For each remaining set it checks whether an HPA controls it;\nif so, it runs a dedicated HPA scaling test, otherwise it scales the\nStatefulSet directly. Results of compliant and non‑compliant objects are\ncollected and reported back to the check framework.\n\nnolint:dupl", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:576", + "calls": [ + { + "name": "SetNeedsRefresh", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "IsManaged", + "kind": "function", + "source": [ + "func IsManaged(podSetName string, managedPodSet []configuration.ManagedDeploymentsStatefulsets) bool {", + "\tfor _, ps := range managedPodSet {", + "\t\tif ps.Name == podSetName {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "CheckOwnerReference", + "kind": "function", + "source": [ + "func CheckOwnerReference(ownerReference []apiv1.OwnerReference, crdFilter []configuration.CrdFilter, crds []*apiextv1.CustomResourceDefinition) bool {", + "\tfor _, owner := range ownerReference {", + "\t\tfor _, aCrd := range crds {", + "\t\t\tif aCrd.Spec.Names.Kind == owner.Kind {", + "\t\t\t\tfor _, crdF := range crdFilter {", + "\t\t\t\t\tif strings.HasSuffix(aCrd.Name, crdF.NameSuffix) {", + "\t\t\t\t\t\treturn crdF.Scalable", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "GetOwnerReferences", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewStatefulSetReportObject", + "kind": "function", + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "nameInStatefulSetSkipList", + "kind": "function", + "source": [ + "func nameInStatefulSetSkipList(name, namespace string, list []configuration.SkipScalingTestStatefulSetsInfo) bool {", + "\tfor _, l := range list {", + "\t\tif name == l.Name \u0026\u0026 namespace == l.Namespace {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "GetResourceHPA", + "kind": "function", + "source": [ + "func GetResourceHPA(hpaList []*scalingv1.HorizontalPodAutoscaler, name, namespace, kind string) *scalingv1.HorizontalPodAutoscaler {", + "\tfor _, hpa := range hpaList {", + "\t\tif hpa.Spec.ScaleTargetRef.Kind == kind \u0026\u0026 hpa.Spec.ScaleTargetRef.Name == name \u0026\u0026 hpa.Namespace == namespace {", + "\t\t\treturn hpa", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleHpaStatefulSet", + "kind": "function", + "source": [ + "func TestScaleHpaStatefulSet(statefulset *appsv1.StatefulSet, hpa *v1autoscaling.HorizontalPodAutoscaler, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\thpaName := hpa.Name", + "\tname, namespace := statefulset.Name, statefulset.Namespace", + "\thpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(namespace)", + "\tmin := int32(1)", + "\tif hpa.Spec.MinReplicas != nil {", + "\t\tmin = *hpa.Spec.MinReplicas", + "\t}", + "\treplicas := int32(1)", + "\tif statefulset.Spec.Replicas != nil {", + "\t\treplicas = *statefulset.Spec.Replicas", + "\t}", + "\tmax := hpa.Spec.MaxReplicas", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpaName, replicas, replicas)", + "\t\tpass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpaName, replicas, replicas)", + "\t\tpass = scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpaName, replicas, replicas)", + "\t\tpass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpaName, min, max)", + "\t\tpass = scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\t// back the min and the max value of the hpa", + "\tlogger.Debug(\"Back HPA %s:%s to min=%d max=%d\", namespace, hpaName, min, max)", + "\tpass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, min, max, timeout, logger)", + "\treturn pass", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewStatefulSetReportObject", + "kind": "function", + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleStatefulSet", + "kind": "function", + "source": [ + "func TestScaleStatefulSet(statefulset *appsv1.StatefulSet, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\tname, namespace := statefulset.Name, statefulset.Namespace", + "\tssClients := clients.K8sClient.AppsV1().StatefulSets(namespace)", + "\tlogger.Debug(\"Scale statefulset not using HPA %s:%s\", namespace, name)", + "\treplicas := int32(1)", + "\tif statefulset.Spec.Replicas != nil {", + "\t\treplicas = *statefulset.Spec.Replicas", + "\t}", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t} // scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewStatefulSetReportObject", + "kind": "function", + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewStatefulSetReportObject", + "kind": "function", + "source": [ + "func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, StatefulSetType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(StatefulSetName, aStatefulSetName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, statefulSet := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", statefulSet.ToString())", + "\t\tif scaling.IsManaged(statefulSet.Name, env.Config.ManagedStatefulsets) {", + "\t\t\tif !scaling.CheckOwnerReference(statefulSet.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q scaling failed due to OwnerReferences that are not scalable\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has OwnerReferences that are not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"StatefulSet %q scaling skipped due to scalable OwnerReferences, test will run on te CR scaling\", statefulSet.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip statefulset if it is allowed by config", + "\t\tif nameInStatefulSetSkipList(statefulSet.Name, statefulSet.Namespace, env.Config.SkipScalingTestStatefulSets) {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q is being skipped due to configuration setting\", statefulSet.String())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TeststatefulsetScaling test scaling of statefulset", + "\t\t// This is the entry point for statefulset scaling tests", + "\t\tns, name := statefulSet.Namespace, statefulSet.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"StatefulSet\"); hpa != nil {", + "\t\t\t// if the statefulset is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the statefulset", + "\t\t\tif !scaling.TestScaleHpaStatefulSet(statefulSet.StatefulSet, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %q\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the statefulset is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleStatefulSet(statefulSet.StatefulSet, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %s\", statefulSet.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testStorageProvisioner", + "qualifiedName": "testStorageProvisioner", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testStorageProvisioner Verifies pod storage provisioning compliance across cluster types\n\nThe function iterates over all pods, inspecting each volume that references a\npersistent volume claim. For every matched claim it looks up the associated\nstorage class to determine its provisioner type. Based on whether the\nenvironment is single‑node or multi‑node and whether local is used, it\nlogs compliance or non‑compliance and records the outcome in report\nobjects. Finally, it sets the check result with lists of compliant and\nnon‑compliant pods.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:954", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogDebug", + "kind": "function" + }, + { + "name": "IsSNO", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.LifecycleTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.LifecycleTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Prestop test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPrestopIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPreStop(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Scale CRD test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoCrdsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\t// Note: We skip this test because 'testHighAvailability' in the lifecycle suite is already", + "\t\t\t// testing the replicas and antiaffinity rules that should already be in place for crd.", + "\t\t\ttestScaleCrd(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Poststart test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPostStartIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersPostStart(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Image pull policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestImagePullPolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Readiness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReadinessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersReadinessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Liveness probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLivenessProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLivenessProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Startup probe test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStartupProbeIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersStartupProbe(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod owner reference test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDeploymentBestPracticesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsOwnerReference(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// High availability test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHighAvailabilityBestPractices)).", + "\t\tWithSkipCheckFn(testhelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHighAvailability(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Selector and affinity best practices test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodNodeSelectorAndAffinityBestPractices)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetPodsWithoutAffinityRequiredLabelSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodNodeSelectorAndAffinityBestPractices(env.GetPodsWithoutAffinityRequiredLabel(), c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod recreation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodRecreationIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle),", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodsRecreation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Deployment scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDeploymentScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDeploymentScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Statefulset scaling test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStatefulSetScalingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNotIntrusiveSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNotEnoughWorkersSkipFn(\u0026env, minWorkerNodesForLifecycle)).", + "\t\tWithSkipCheckFn(skipIfNoPodSetsetsUnderTest).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStatefulSetScaling(\u0026env, timeout, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Persistent volume reclaim policy test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPersistentVolumeReclaimPolicyIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPersistentVolumesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodPersistentVolumeReclaimPolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// CPU Isolation test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUIsolationIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCPUIsolation(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Affinity required pods test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAffinityRequiredPods)).", + "\t\tWithSkipCheckFn(testhelper.GetNoAffinityRequiredPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAffinityRequiredPods(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Pod toleration bypass test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodTolerationBypassIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodTolerationBypass(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Storage provisioner test", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestStorageProvisioner)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoStorageClassesSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPersistentVolumeClaimsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestStorageProvisioner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testStorageProvisioner(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tconst localStorageProvisioner = \"kubernetes.io/no-provisioner\"", + "\tconst lvmProvisioner = \"topolvm.io\"", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tvar StorageClasses = env.StorageClassList", + "\tvar Pvc = env.PersistentVolumeClaims", + "\tsnoSingleLocalStorageProvisionner := \"\"", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tusesPvcAndStorageClass := false", + "\t\tfor pvIndex := range put.Spec.Volumes {", + "\t\t\t// Skip any nil persistentClaims.", + "\t\t\tvolume := put.Spec.Volumes[pvIndex]", + "\t\t\tif volume.PersistentVolumeClaim == nil {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\t// We have the list of pods/volumes/claims.", + "\t\t\t// Look through the storageClass list for a match.", + "\t\t\tfor i := range Pvc {", + "\t\t\t\tif Pvc[i].Name == put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName \u0026\u0026 Pvc[i].Namespace == put.Namespace {", + "\t\t\t\t\tfor j := range StorageClasses {", + "\t\t\t\t\t\tif Pvc[i].Spec.StorageClassName != nil \u0026\u0026 StorageClasses[j].Name == *Pvc[i].Spec.StorageClassName {", + "\t\t\t\t\t\t\tusesPvcAndStorageClass = true", + "\t\t\t\t\t\t\tcheck.LogDebug(\"Pod %q pvc_name: %s, storageclass_name: %s, provisioner_name: %s\", put, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName,", + "\t\t\t\t\t\t\t\tStorageClasses[j].Name, StorageClasses[j].Provisioner)", + "", + "\t\t\t\t\t\t\tif env.IsSNO() {", + "\t\t\t\t\t\t\t\t// For SNO, only one local storage provisionner is allowed. The first local storage provisioner for this pod is assumed to be the only local storage provisioner allowed in the cluster.", + "\t\t\t\t\t\t\t\tif snoSingleLocalStorageProvisionner == \"\" \u0026\u0026", + "\t\t\t\t\t\t\t\t\t(StorageClasses[j].Provisioner == localStorageProvisioner ||", + "\t\t\t\t\t\t\t\t\t\tStorageClasses[j].Provisioner == lvmProvisioner) {", + "\t\t\t\t\t\t\t\t\tsnoSingleLocalStorageProvisionner = StorageClasses[j].Provisioner", + "\t\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t\tif StorageClasses[j].Provisioner == snoSingleLocalStorageProvisionner {", + "\t\t\t\t\t\t\t\t\tcheck.LogInfo(\"Pod %q: Local storage (no provisioner or lvms) is recommended for SNO clusters.\", put)", + "\t\t\t\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Local storage (no provisioner or lvms) is recommended for SNO clusters.\", false).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t\t\tcontinue", + "\t\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t\tif StorageClasses[j].Provisioner == localStorageProvisioner || StorageClasses[j].Provisioner == lvmProvisioner {", + "\t\t\t\t\t\t\t\t\tcheck.LogError(\"Pod %q: A single type of local storage cluster is recommended for single node clusters. Use lvms or kubernetes noprovisioner, but not both.\", put)", + "\t\t\t\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\t\t\t\t\"A single type of local storage cluster is recommended for single node clusters. Use lvms or kubernetes noprovisioner, but not both.\", false).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t\t\tcontinue", + "\t\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t\tcheck.LogError(\"Pod %q: Non local storage not recommended in single node clusters.\", put)", + "\t\t\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Non local storage not recommended in single node clusters.\", false).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t} else {", + "\t\t\t\t\t\t\t\tif StorageClasses[j].Provisioner == localStorageProvisioner || StorageClasses[j].Provisioner == lvmProvisioner {", + "\t\t\t\t\t\t\t\t\tcheck.LogError(\"Pod %q: Local storage provisioner (no provisioner or lvms) not recommended in multinode clusters.\", put)", + "\t\t\t\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Local storage provisioner (no provisioner or lvms) not recommended in multinode clusters.\", false).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t\t\tcontinue", + "\t\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t\tcheck.LogInfo(\"Pod %q: Non local storage provisioner recommended in multinode clusters.\", put)", + "\t\t\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Non local storage provisioner recommended in multinode clusters.\", false).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassName, StorageClasses[j].Name).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.StorageClassProvisioner, StorageClasses[j].Provisioner).", + "\t\t\t\t\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t}", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\t// Save as compliant pod in case it's not using any of the existing PVC/StorageClasses of the cluster.", + "\t\t\t// Otherwise, in this cases the check will be marked as skipped.", + "\t\t\t// ToDo: improve this function.", + "\t\t\tif !usesPvcAndStorageClass {", + "\t\t\t\tcheck.LogInfo(\"Pod %q not configured to use local storage\", put)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod not configured to use local storage.\", true))", + "\t\t\t}", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "globals": [ + { + "name": "beforeEachFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:52" + }, + { + "name": "env", + "exported": false, + "type": "provider.TestEnvironment", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:50" + }, + { + "name": "skipIfNoPodSetsetsUnderTest", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:58" + } + ], + "consts": [ + { + "name": "intrusiveTcSkippedReason", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:46" + }, + { + "name": "localStorage", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:45" + }, + { + "name": "minWorkerNodesForLifecycle", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:43" + }, + { + "name": "statefulSet", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:44" + }, + { + "name": "timeout", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:40" + }, + { + "name": "timeoutPodRecreationPerPod", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:41" + }, + { + "name": "timeoutPodSetReady", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/suite.go:42" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/ownerreference", + "name": "ownerreference", + "files": 1, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "k8s.io/api/core/v1" + ], + "structs": [ + { + "name": "OwnerReference", + "exported": true, + "doc": "OwnerReference Tracks a pod's ownership status\n\nThis structure stores a reference to a pod and an integer indicating the test\noutcome. The RunTest method examines each owner reference of the pod, logging\ninformation or errors based on whether the kind matches expected values such\nas StatefulSet or ReplicaSet. If any mismatches are found, it records a\nfailure; otherwise, it marks success. GetResults simply returns the stored\nresult value.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/ownerreference/ownerreference.go:40", + "fields": { + "put": "*corev1.Pod", + "result": "int" + }, + "methodNames": [ + "GetResults", + "RunTest" + ], + "source": [ + "type OwnerReference struct {", + "\tput *corev1.Pod", + "\tresult int", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "NewOwnerReference", + "qualifiedName": "NewOwnerReference", + "exported": true, + "signature": "func(*corev1.Pod)(*OwnerReference)", + "doc": "NewOwnerReference Creates a new owner reference checker for a Pod\n\nThe function accepts a pointer to a Pod object and constructs an\nOwnerReference instance configured to evaluate the pod's owner references. It\nsets the initial result status to an error state, indicating that validation\nhas not yet succeeded. The constructed instance is returned as a pointer so\nit can be used for further testing or result retrieval.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/ownerreference/ownerreference.go:52", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodsOwnerReference", + "kind": "function", + "source": [ + "func testPodsOwnerReference(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\to := ownerreference.NewOwnerReference(put.Pod)", + "\t\to.RunTest(check.GetLogger())", + "\t\tif o.GetResults() != testhelper.SUCCESS {", + "\t\t\tcheck.LogError(\"Pod %q found with non-compliant owner reference\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has non-compliant owner reference\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has compliant owner reference\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has compliant owner reference\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewOwnerReference(put *corev1.Pod) *OwnerReference {", + "\to := OwnerReference{", + "\t\tput: put,", + "\t\tresult: testhelper.ERROR,", + "\t}", + "\treturn \u0026o", + "}" + ] + }, + { + "name": "GetResults", + "qualifiedName": "OwnerReference.GetResults", + "exported": true, + "receiver": "OwnerReference", + "signature": "func()(int)", + "doc": "OwnerReference.GetResults retrieves the stored result value\n\nThe method returns the integer stored in the OwnerReference instance’s\nresult field. It takes no arguments and simply accesses the private field to\nprovide its current value.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/ownerreference/ownerreference.go:84", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (o *OwnerReference) GetResults() int {", + "\treturn o.result", + "}" + ] + }, + { + "name": "RunTest", + "qualifiedName": "OwnerReference.RunTest", + "exported": true, + "receiver": "OwnerReference", + "signature": "func(*log.Logger)()", + "doc": "OwnerReference.RunTest verifies a pod’s owner references are either stateful set or replica set\n\nThe method iterates over all owner references attached to the pod. For each\nreference it logs the kind and marks the test as successful if the kind\nmatches one of the expected types; otherwise it logs an error, records\nfailure, and stops further checks.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/ownerreference/ownerreference.go:66", + "calls": [ + { + "name": "Info", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (o *OwnerReference) RunTest(logger *log.Logger) {", + "\tfor _, k := range o.put.OwnerReferences {", + "\t\tif k.Kind == statefulSet || k.Kind == replicaSet {", + "\t\t\tlogger.Info(\"Pod %q owner reference kind is %q\", o.put, k.Kind)", + "\t\t\to.result = testhelper.SUCCESS", + "\t\t} else {", + "\t\t\tlogger.Error(\"Pod %q has owner of type %q (%q or %q expected)\", o.put, k.Kind, replicaSet, statefulSet)", + "\t\t\to.result = testhelper.FAILURE", + "\t\t\treturn", + "\t\t}", + "\t}", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "replicaSet", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/ownerreference/ownerreference.go:29" + }, + { + "name": "statefulSet", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/ownerreference/ownerreference.go:27" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "podrecreation", + "files": 1, + "imports": [ + "context", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "k8s.io/api/core/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/watch", + "k8s.io/client-go/util/retry", + "sync", + "time" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "CordonCleanup", + "qualifiedName": "CordonCleanup", + "exported": true, + "signature": "func(string, *checksdb.Check)()", + "doc": "CordonCleanup Restores a node to schedulable state after draining\n\nThis routine attempts to uncordon the specified node by calling the helper\nfunction with an uncordon operation. If the uncordon fails, it aborts the\ncurrent check, logging the error and providing diagnostic information. The\nfunction is used as a cleanup step in tests that temporarily cordon nodes\nduring pod recreation scenarios.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:179", + "calls": [ + { + "name": "CordonHelper", + "kind": "function", + "source": [ + "func CordonHelper(name, operation string) error {", + "\tclients := clientsholder.GetClientsHolder()", + "", + "\tlog.Info(\"Performing %s operation on node %s\", operation, name)", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Fetch node object", + "\t\tnode, err := clients.K8sClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t\tswitch operation {", + "\t\tcase Cordon:", + "\t\t\tnode.Spec.Unschedulable = true", + "\t\tcase Uncordon:", + "\t\t\tnode.Spec.Unschedulable = false", + "\t\tdefault:", + "\t\t\treturn fmt.Errorf(\"cordonHelper: Unsupported operation:%s\", operation)", + "\t\t}", + "\t\t// Update the node", + "\t\t_, err = clients.K8sClient.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})", + "\t\treturn err", + "\t})", + "\tif retryErr != nil {", + "\t\tlog.Error(\"can not %s node: %s, err=%v\", operation, name, retryErr)", + "\t}", + "\treturn retryErr", + "}" + ] + }, + { + "name": "Abort", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodsRecreation", + "kind": "function", + "source": [ + "func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tneedsPostMortemInfo := true", + "\tdefer func() {", + "\t\tif needsPostMortemInfo {", + "\t\t\tcheck.LogDebug(\"%s\", postmortem.Log())", + "\t\t}", + "\t\t// Since we are possible exiting early, we need to make sure we set the result at the end of the function.", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}()", + "\tcheck.LogInfo(\"Testing node draining effect of deployment\")", + "\tcheck.LogInfo(\"Testing initial state for deployments\")", + "\tdefer env.SetNeedsRefresh()", + "", + "\t// Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check.", + "\t// timeout = k-mins + (1min * (num-deployments + num-statefulsets))", + "\tallPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets))", + "\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout, check.GetLogger())", + "\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\tfor _, dep := range notReadyDeployments {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment was not ready before draining any node.\", false))", + "\t\t}", + "\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset was not ready before draining any node.\", false))", + "\t\t}", + "\t\treturn", + "\t}", + "", + "\t// Filter out pods with Node Assignments present and FAIL them.", + "\t// We run into problems with this test when there are nodeSelectors assigned affecting where", + "\t// pods are scheduled. Also, they are not allowed in general, see the node-selector test case.", + "\t// Skip the safeguard for any pods that are using a runtimeClassName. This is potentially", + "\t// because pods that are derived from a performance profile might have a built-in nodeSelector.", + "\tvar podsWithNodeAssignment []*provider.Pod", + "\tfor _, put := range env.Pods {", + "\t\tif !put.IsRuntimeClassNameSpecified() \u0026\u0026 put.HasNodeSelector() {", + "\t\t\tpodsWithNodeAssignment = append(podsWithNodeAssignment, put)", + "\t\t\tcheck.LogError(\"Pod %q has been found with node selector(s): %v\", put, put.Spec.NodeSelector)", + "\t\t}", + "\t}", + "\tif len(podsWithNodeAssignment) \u003e 0 {", + "\t\tcheck.LogError(\"Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v\", podsWithNodeAssignment)", + "\t\tfor _, pod := range podsWithNodeAssignment {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has node assignment.\", false))", + "\t\t}", + "", + "\t\treturn", + "\t}", + "", + "\tfor nodeName := range podsets.GetAllNodesForAllPodSets(env.Pods) {", + "\t\tdefer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node", + "\t\terr := podrecreation.CordonHelper(nodeName, podrecreation.Cordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Error cordoning the node: %s\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node cordoning failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tcheck.LogInfo(\"Draining and Cordoning node %s: \", nodeName)", + "\t\tcount, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Getting pods list to drain failed, err=%v\", err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Getting pods list to drain failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tnodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count)", + "\t\tcheck.LogDebug(\"Draining node: %s with timeout: %s\", nodeName, nodeTimeout)", + "\t\t_, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Draining node %q failed, err=%v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Draining node failed\", false))", + "\t\t\treturn", + "\t\t}", + "", + "\t\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout, check.GetLogger())", + "\t\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\t\tfor _, dep := range notReadyDeployments {", + "\t\t\t\tcheck.LogError(\"Deployment %q not ready after draining node %q\", dep.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q not ready after draining node %q\", sts.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\treturn", + "\t\t}", + "", + "\t\terr = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogFatal(\"Error uncordoning the node: %s\", nodeName)", + "\t\t}", + "\t}", + "", + "\t// If everything went well for all nodes, the nonCompliantObjects should be empty. We need to", + "\t// manually add all the deps/sts into the compliant object lists so the check is marked as skipped.", + "\t// ToDo: Improve this.", + "\tif len(nonCompliantObjects) == 0 {", + "\t\tfor _, dep := range env.Deployments {", + "\t\t\tcheck.LogInfo(\"Deployment's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "", + "\t\tfor _, sts := range env.StatefulSets {", + "\t\t\tcheck.LogInfo(\"Statefulset's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "\t}", + "", + "\tneedsPostMortemInfo = false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CordonCleanup(node string, check *checksdb.Check) {", + "\terr := CordonHelper(node, Uncordon)", + "\tif err != nil {", + "\t\tcheck.Abort(fmt.Sprintf(\"cleanup: error uncordoning the node: %s, err=%s\", node, err))", + "\t}", + "}" + ] + }, + { + "name": "CordonHelper", + "qualifiedName": "CordonHelper", + "exported": true, + "signature": "func(string, string)(error)", + "doc": "CordonHelper Executes a cordon or uncordon operation on a node\n\nThe function retrieves the Kubernetes client holder, logs the requested\naction, and attempts to update the node’s unschedulable status using a\nretry loop that handles conflicts. It accepts a node name and an operation\nstring, applies the appropriate flag, and returns any error encountered\nduring retrieval or update.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:55", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "k8s.io/client-go/util/retry", + "name": "RetryOnConflict", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "name": "Nodes", + "kind": "function" + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Update", + "kind": "function" + }, + { + "name": "Nodes", + "kind": "function" + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodsRecreation", + "kind": "function", + "source": [ + "func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tneedsPostMortemInfo := true", + "\tdefer func() {", + "\t\tif needsPostMortemInfo {", + "\t\t\tcheck.LogDebug(\"%s\", postmortem.Log())", + "\t\t}", + "\t\t// Since we are possible exiting early, we need to make sure we set the result at the end of the function.", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}()", + "\tcheck.LogInfo(\"Testing node draining effect of deployment\")", + "\tcheck.LogInfo(\"Testing initial state for deployments\")", + "\tdefer env.SetNeedsRefresh()", + "", + "\t// Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check.", + "\t// timeout = k-mins + (1min * (num-deployments + num-statefulsets))", + "\tallPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets))", + "\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout, check.GetLogger())", + "\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\tfor _, dep := range notReadyDeployments {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment was not ready before draining any node.\", false))", + "\t\t}", + "\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset was not ready before draining any node.\", false))", + "\t\t}", + "\t\treturn", + "\t}", + "", + "\t// Filter out pods with Node Assignments present and FAIL them.", + "\t// We run into problems with this test when there are nodeSelectors assigned affecting where", + "\t// pods are scheduled. Also, they are not allowed in general, see the node-selector test case.", + "\t// Skip the safeguard for any pods that are using a runtimeClassName. This is potentially", + "\t// because pods that are derived from a performance profile might have a built-in nodeSelector.", + "\tvar podsWithNodeAssignment []*provider.Pod", + "\tfor _, put := range env.Pods {", + "\t\tif !put.IsRuntimeClassNameSpecified() \u0026\u0026 put.HasNodeSelector() {", + "\t\t\tpodsWithNodeAssignment = append(podsWithNodeAssignment, put)", + "\t\t\tcheck.LogError(\"Pod %q has been found with node selector(s): %v\", put, put.Spec.NodeSelector)", + "\t\t}", + "\t}", + "\tif len(podsWithNodeAssignment) \u003e 0 {", + "\t\tcheck.LogError(\"Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v\", podsWithNodeAssignment)", + "\t\tfor _, pod := range podsWithNodeAssignment {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has node assignment.\", false))", + "\t\t}", + "", + "\t\treturn", + "\t}", + "", + "\tfor nodeName := range podsets.GetAllNodesForAllPodSets(env.Pods) {", + "\t\tdefer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node", + "\t\terr := podrecreation.CordonHelper(nodeName, podrecreation.Cordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Error cordoning the node: %s\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node cordoning failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tcheck.LogInfo(\"Draining and Cordoning node %s: \", nodeName)", + "\t\tcount, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Getting pods list to drain failed, err=%v\", err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Getting pods list to drain failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tnodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count)", + "\t\tcheck.LogDebug(\"Draining node: %s with timeout: %s\", nodeName, nodeTimeout)", + "\t\t_, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Draining node %q failed, err=%v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Draining node failed\", false))", + "\t\t\treturn", + "\t\t}", + "", + "\t\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout, check.GetLogger())", + "\t\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\t\tfor _, dep := range notReadyDeployments {", + "\t\t\t\tcheck.LogError(\"Deployment %q not ready after draining node %q\", dep.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q not ready after draining node %q\", sts.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\treturn", + "\t\t}", + "", + "\t\terr = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogFatal(\"Error uncordoning the node: %s\", nodeName)", + "\t\t}", + "\t}", + "", + "\t// If everything went well for all nodes, the nonCompliantObjects should be empty. We need to", + "\t// manually add all the deps/sts into the compliant object lists so the check is marked as skipped.", + "\t// ToDo: Improve this.", + "\tif len(nonCompliantObjects) == 0 {", + "\t\tfor _, dep := range env.Deployments {", + "\t\t\tcheck.LogInfo(\"Deployment's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "", + "\t\tfor _, sts := range env.StatefulSets {", + "\t\t\tcheck.LogInfo(\"Statefulset's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "\t}", + "", + "\tneedsPostMortemInfo = false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "CordonCleanup", + "kind": "function", + "source": [ + "func CordonCleanup(node string, check *checksdb.Check) {", + "\terr := CordonHelper(node, Uncordon)", + "\tif err != nil {", + "\t\tcheck.Abort(fmt.Sprintf(\"cleanup: error uncordoning the node: %s, err=%s\", node, err))", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CordonHelper(name, operation string) error {", + "\tclients := clientsholder.GetClientsHolder()", + "", + "\tlog.Info(\"Performing %s operation on node %s\", operation, name)", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Fetch node object", + "\t\tnode, err := clients.K8sClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t\tswitch operation {", + "\t\tcase Cordon:", + "\t\t\tnode.Spec.Unschedulable = true", + "\t\tcase Uncordon:", + "\t\t\tnode.Spec.Unschedulable = false", + "\t\tdefault:", + "\t\t\treturn fmt.Errorf(\"cordonHelper: Unsupported operation:%s\", operation)", + "\t\t}", + "\t\t// Update the node", + "\t\t_, err = clients.K8sClient.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})", + "\t\treturn err", + "\t})", + "\tif retryErr != nil {", + "\t\tlog.Error(\"can not %s node: %s, err=%v\", operation, name, retryErr)", + "\t}", + "\treturn retryErr", + "}" + ] + }, + { + "name": "CountPodsWithDelete", + "qualifiedName": "CountPodsWithDelete", + "exported": true, + "signature": "func([]*provider.Pod, string, string)(int, error)", + "doc": "CountPodsWithDelete Counts pods scheduled on a node and optionally deletes them\n\nThe function iterates over all provided pods, selecting those belonging to\ndeployments or statefulsets that are running on the specified node and not\nmanaged by a DaemonSet. It increments a counter for each qualifying pod and,\nif deletion is requested, initiates the delete operation in either foreground\nor background mode while synchronizing with a wait group. Errors during\ndeletion are logged but do not abort the counting; the function returns the\ntotal count and any error encountered.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:92", + "calls": [ + { + "name": "skipDaemonPod", + "kind": "function", + "source": [ + "func skipDaemonPod(pod *corev1.Pod) bool {", + "\tfor _, or := range pod.OwnerReferences {", + "\t\tif or.Kind == DaemonSetString {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "deletePod", + "kind": "function", + "source": [ + "func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error {", + "\tclients := clientsholder.GetClientsHolder()", + "\tlog.Debug(\"deleting ns=%s pod=%s with %s mode\", pod.Namespace, pod.Name, mode)", + "\tgracePeriodSeconds := *pod.Spec.TerminationGracePeriodSeconds", + "\t// Create watcher before deleting pod", + "\twatcher, err := clients.K8sClient.CoreV1().Pods(pod.Namespace).Watch(context.TODO(), metav1.ListOptions{", + "\t\tFieldSelector: \"metadata.name=\" + pod.Name + \",metadata.namespace=\" + pod.Namespace,", + "\t})", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"waitPodDeleted ns=%s pod=%s, err=%s\", pod.Namespace, pod.Name, err)", + "\t}", + "\t// Actually deleting pod", + "\terr = clients.K8sClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{", + "\t\tGracePeriodSeconds: \u0026gracePeriodSeconds,", + "\t})", + "\tif err != nil {", + "\t\tlog.Error(\"Error deleting %s err: %v\", pod.String(), err)", + "\t\treturn err", + "\t}", + "\tif mode == DeleteBackground {", + "\t\treturn nil", + "\t}", + "\twg.Add(1)", + "\tpodName := pod.Name", + "\tnamespace := pod.Namespace", + "\tgo func() {", + "\t\tdefer wg.Done()", + "\t\twaitPodDeleted(namespace, podName, gracePeriodSeconds, watcher)", + "\t}()", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "Wait", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodsRecreation", + "kind": "function", + "source": [ + "func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tneedsPostMortemInfo := true", + "\tdefer func() {", + "\t\tif needsPostMortemInfo {", + "\t\t\tcheck.LogDebug(\"%s\", postmortem.Log())", + "\t\t}", + "\t\t// Since we are possible exiting early, we need to make sure we set the result at the end of the function.", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}()", + "\tcheck.LogInfo(\"Testing node draining effect of deployment\")", + "\tcheck.LogInfo(\"Testing initial state for deployments\")", + "\tdefer env.SetNeedsRefresh()", + "", + "\t// Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check.", + "\t// timeout = k-mins + (1min * (num-deployments + num-statefulsets))", + "\tallPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets))", + "\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout, check.GetLogger())", + "\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\tfor _, dep := range notReadyDeployments {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment was not ready before draining any node.\", false))", + "\t\t}", + "\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset was not ready before draining any node.\", false))", + "\t\t}", + "\t\treturn", + "\t}", + "", + "\t// Filter out pods with Node Assignments present and FAIL them.", + "\t// We run into problems with this test when there are nodeSelectors assigned affecting where", + "\t// pods are scheduled. Also, they are not allowed in general, see the node-selector test case.", + "\t// Skip the safeguard for any pods that are using a runtimeClassName. This is potentially", + "\t// because pods that are derived from a performance profile might have a built-in nodeSelector.", + "\tvar podsWithNodeAssignment []*provider.Pod", + "\tfor _, put := range env.Pods {", + "\t\tif !put.IsRuntimeClassNameSpecified() \u0026\u0026 put.HasNodeSelector() {", + "\t\t\tpodsWithNodeAssignment = append(podsWithNodeAssignment, put)", + "\t\t\tcheck.LogError(\"Pod %q has been found with node selector(s): %v\", put, put.Spec.NodeSelector)", + "\t\t}", + "\t}", + "\tif len(podsWithNodeAssignment) \u003e 0 {", + "\t\tcheck.LogError(\"Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v\", podsWithNodeAssignment)", + "\t\tfor _, pod := range podsWithNodeAssignment {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has node assignment.\", false))", + "\t\t}", + "", + "\t\treturn", + "\t}", + "", + "\tfor nodeName := range podsets.GetAllNodesForAllPodSets(env.Pods) {", + "\t\tdefer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node", + "\t\terr := podrecreation.CordonHelper(nodeName, podrecreation.Cordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Error cordoning the node: %s\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node cordoning failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tcheck.LogInfo(\"Draining and Cordoning node %s: \", nodeName)", + "\t\tcount, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Getting pods list to drain failed, err=%v\", err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Getting pods list to drain failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tnodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count)", + "\t\tcheck.LogDebug(\"Draining node: %s with timeout: %s\", nodeName, nodeTimeout)", + "\t\t_, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Draining node %q failed, err=%v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Draining node failed\", false))", + "\t\t\treturn", + "\t\t}", + "", + "\t\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout, check.GetLogger())", + "\t\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\t\tfor _, dep := range notReadyDeployments {", + "\t\t\t\tcheck.LogError(\"Deployment %q not ready after draining node %q\", dep.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q not ready after draining node %q\", sts.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\treturn", + "\t\t}", + "", + "\t\terr = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogFatal(\"Error uncordoning the node: %s\", nodeName)", + "\t\t}", + "\t}", + "", + "\t// If everything went well for all nodes, the nonCompliantObjects should be empty. We need to", + "\t// manually add all the deps/sts into the compliant object lists so the check is marked as skipped.", + "\t// ToDo: Improve this.", + "\tif len(nonCompliantObjects) == 0 {", + "\t\tfor _, dep := range env.Deployments {", + "\t\t\tcheck.LogInfo(\"Deployment's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "", + "\t\tfor _, sts := range env.StatefulSets {", + "\t\t\tcheck.LogInfo(\"Statefulset's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "\t}", + "", + "\tneedsPostMortemInfo = false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CountPodsWithDelete(pods []*provider.Pod, nodeName, mode string) (count int, err error) {", + "\tcount = 0", + "\tvar wg sync.WaitGroup", + "\tfor _, put := range pods {", + "\t\t_, isDeployment := put.Labels[\"pod-template-hash\"]", + "\t\t_, isStatefulset := put.Labels[\"controller-revision-hash\"]", + "\t\tif put.Spec.NodeName == nodeName \u0026\u0026", + "\t\t\t(isDeployment || isStatefulset) {", + "\t\t\tif skipDaemonPod(put.Pod) {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tcount++", + "\t\t\tif mode == NoDelete {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\terr := deletePod(put.Pod, mode, \u0026wg)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error deleting %s\", put)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\twg.Wait()", + "\treturn count, nil", + "}" + ] + }, + { + "name": "deletePod", + "qualifiedName": "deletePod", + "exported": false, + "signature": "func(*corev1.Pod, string, *sync.WaitGroup)(error)", + "doc": "deletePod removes a pod and optionally waits for its deletion\n\nThe function initiates the deletion of a specified pod using the Kubernetes\nclient, applying the pod's configured termination grace period. It creates a\nwatch on the pod to monitor its removal from the cluster; if the mode is not\nbackground, it launches a goroutine that blocks until the pod is confirmed\ndeleted or a timeout occurs. Errors during watcher creation or deletion are\nreturned for handling by the caller.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:140", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Watch", + "kind": "function" + }, + { + "name": "Pods", + "kind": "function" + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Delete", + "kind": "function" + }, + { + "name": "Pods", + "kind": "function" + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "Done", + "kind": "function" + }, + { + "name": "waitPodDeleted", + "kind": "function", + "source": [ + "func waitPodDeleted(ns, podName string, timeout int64, watcher watch.Interface) {", + "\tlog.Debug(\"Entering waitPodDeleted ns=%s pod=%s\", ns, podName)", + "\tdefer watcher.Stop()", + "", + "\tfor {", + "\t\tselect {", + "\t\tcase event := \u003c-watcher.ResultChan():", + "\t\t\tif event.Type == watch.Deleted || event.Type == \"\" {", + "\t\t\t\tlog.Debug(\"ns=%s pod=%s deleted\", ns, podName)", + "\t\t\t\treturn", + "\t\t\t}", + "\t\tcase \u003c-time.After(time.Duration(timeout) * time.Second):", + "\t\t\tlog.Info(\"watch for pod deletion timedout after %d seconds\", timeout)", + "\t\t\treturn", + "\t\t}", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "CountPodsWithDelete", + "kind": "function", + "source": [ + "func CountPodsWithDelete(pods []*provider.Pod, nodeName, mode string) (count int, err error) {", + "\tcount = 0", + "\tvar wg sync.WaitGroup", + "\tfor _, put := range pods {", + "\t\t_, isDeployment := put.Labels[\"pod-template-hash\"]", + "\t\t_, isStatefulset := put.Labels[\"controller-revision-hash\"]", + "\t\tif put.Spec.NodeName == nodeName \u0026\u0026", + "\t\t\t(isDeployment || isStatefulset) {", + "\t\t\tif skipDaemonPod(put.Pod) {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tcount++", + "\t\t\tif mode == NoDelete {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\terr := deletePod(put.Pod, mode, \u0026wg)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error deleting %s\", put)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\twg.Wait()", + "\treturn count, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error {", + "\tclients := clientsholder.GetClientsHolder()", + "\tlog.Debug(\"deleting ns=%s pod=%s with %s mode\", pod.Namespace, pod.Name, mode)", + "\tgracePeriodSeconds := *pod.Spec.TerminationGracePeriodSeconds", + "\t// Create watcher before deleting pod", + "\twatcher, err := clients.K8sClient.CoreV1().Pods(pod.Namespace).Watch(context.TODO(), metav1.ListOptions{", + "\t\tFieldSelector: \"metadata.name=\" + pod.Name + \",metadata.namespace=\" + pod.Namespace,", + "\t})", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"waitPodDeleted ns=%s pod=%s, err=%s\", pod.Namespace, pod.Name, err)", + "\t}", + "\t// Actually deleting pod", + "\terr = clients.K8sClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{", + "\t\tGracePeriodSeconds: \u0026gracePeriodSeconds,", + "\t})", + "\tif err != nil {", + "\t\tlog.Error(\"Error deleting %s err: %v\", pod.String(), err)", + "\t\treturn err", + "\t}", + "\tif mode == DeleteBackground {", + "\t\treturn nil", + "\t}", + "\twg.Add(1)", + "\tpodName := pod.Name", + "\tnamespace := pod.Namespace", + "\tgo func() {", + "\t\tdefer wg.Done()", + "\t\twaitPodDeleted(namespace, podName, gracePeriodSeconds, watcher)", + "\t}()", + "\treturn nil", + "}" + ] + }, + { + "name": "skipDaemonPod", + "qualifiedName": "skipDaemonPod", + "exported": false, + "signature": "func(*corev1.Pod)(bool)", + "doc": "skipDaemonPod identifies pods managed by a DaemonSet\n\nThis function examines the owner references of a pod and returns true if any\nreference is of kind DaemonSet. Pods owned by a DaemonSet are skipped from\ndeletion or recreation logic. Otherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:123", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "CountPodsWithDelete", + "kind": "function", + "source": [ + "func CountPodsWithDelete(pods []*provider.Pod, nodeName, mode string) (count int, err error) {", + "\tcount = 0", + "\tvar wg sync.WaitGroup", + "\tfor _, put := range pods {", + "\t\t_, isDeployment := put.Labels[\"pod-template-hash\"]", + "\t\t_, isStatefulset := put.Labels[\"controller-revision-hash\"]", + "\t\tif put.Spec.NodeName == nodeName \u0026\u0026", + "\t\t\t(isDeployment || isStatefulset) {", + "\t\t\tif skipDaemonPod(put.Pod) {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tcount++", + "\t\t\tif mode == NoDelete {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\terr := deletePod(put.Pod, mode, \u0026wg)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Error(\"Error deleting %s\", put)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\twg.Wait()", + "\treturn count, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func skipDaemonPod(pod *corev1.Pod) bool {", + "\tfor _, or := range pod.OwnerReferences {", + "\t\tif or.Kind == DaemonSetString {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "waitPodDeleted", + "qualifiedName": "waitPodDeleted", + "exported": false, + "signature": "func(string, string, int64, watch.Interface)()", + "doc": "waitPodDeleted waits for a pod to be deleted or times out\n\nThe function monitors the provided watcher until it receives a deletion event\nfor the specified pod, then stops the watch. If no deletion occurs within the\ntimeout period, it logs a timeout message and exits. It does not return a\nvalue but signals completion by stopping the watcher.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:192", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "Stop", + "kind": "function" + }, + { + "name": "ResultChan", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "time", + "name": "After", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Duration", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation", + "name": "deletePod", + "kind": "function", + "source": [ + "func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error {", + "\tclients := clientsholder.GetClientsHolder()", + "\tlog.Debug(\"deleting ns=%s pod=%s with %s mode\", pod.Namespace, pod.Name, mode)", + "\tgracePeriodSeconds := *pod.Spec.TerminationGracePeriodSeconds", + "\t// Create watcher before deleting pod", + "\twatcher, err := clients.K8sClient.CoreV1().Pods(pod.Namespace).Watch(context.TODO(), metav1.ListOptions{", + "\t\tFieldSelector: \"metadata.name=\" + pod.Name + \",metadata.namespace=\" + pod.Namespace,", + "\t})", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"waitPodDeleted ns=%s pod=%s, err=%s\", pod.Namespace, pod.Name, err)", + "\t}", + "\t// Actually deleting pod", + "\terr = clients.K8sClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{", + "\t\tGracePeriodSeconds: \u0026gracePeriodSeconds,", + "\t})", + "\tif err != nil {", + "\t\tlog.Error(\"Error deleting %s err: %v\", pod.String(), err)", + "\t\treturn err", + "\t}", + "\tif mode == DeleteBackground {", + "\t\treturn nil", + "\t}", + "\twg.Add(1)", + "\tpodName := pod.Name", + "\tnamespace := pod.Namespace", + "\tgo func() {", + "\t\tdefer wg.Done()", + "\t\twaitPodDeleted(namespace, podName, gracePeriodSeconds, watcher)", + "\t}()", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func waitPodDeleted(ns, podName string, timeout int64, watcher watch.Interface) {", + "\tlog.Debug(\"Entering waitPodDeleted ns=%s pod=%s\", ns, podName)", + "\tdefer watcher.Stop()", + "", + "\tfor {", + "\t\tselect {", + "\t\tcase event := \u003c-watcher.ResultChan():", + "\t\t\tif event.Type == watch.Deleted || event.Type == \"\" {", + "\t\t\t\tlog.Debug(\"ns=%s pod=%s deleted\", ns, podName)", + "\t\t\t\treturn", + "\t\t\t}", + "\t\tcase \u003c-time.After(time.Duration(timeout) * time.Second):", + "\t\t\tlog.Info(\"watch for pod deletion timedout after %d seconds\", timeout)", + "\t\t\treturn", + "\t\t}", + "\t}", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "Cordon", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:41" + }, + { + "name": "DaemonSetString", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:39" + }, + { + "name": "DefaultGracePeriodInSeconds", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:40" + }, + { + "name": "DeleteBackground", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:43" + }, + { + "name": "DeleteForeground", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:44" + }, + { + "name": "DeploymentString", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:37" + }, + { + "name": "NoDelete", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:45" + }, + { + "name": "ReplicaSetString", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:36" + }, + { + "name": "StatefulsetString", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:38" + }, + { + "name": "Uncordon", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podrecreation/podrecreation.go:42" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "podsets", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "k8s.io/apimachinery/pkg/runtime/schema", + "time" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "GetAllNodesForAllPodSets", + "qualifiedName": "GetAllNodesForAllPodSets", + "exported": true, + "signature": "func([]*provider.Pod)(map[string]bool)", + "doc": "GetAllNodesForAllPodSets Collects unique node names for pods owned by replicasets or statefulsets\n\nThe function iterates over each pod and inspects its owner references. When\nit finds an owner of kind ReplicaSet or StatefulSet, the pod’s node name is\nadded to a map that tracks distinct nodes. The resulting map contains one\nentry per node that hosts at least one such pod.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:272", + "calls": [ + { + "name": "make", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodsRecreation", + "kind": "function", + "source": [ + "func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tneedsPostMortemInfo := true", + "\tdefer func() {", + "\t\tif needsPostMortemInfo {", + "\t\t\tcheck.LogDebug(\"%s\", postmortem.Log())", + "\t\t}", + "\t\t// Since we are possible exiting early, we need to make sure we set the result at the end of the function.", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}()", + "\tcheck.LogInfo(\"Testing node draining effect of deployment\")", + "\tcheck.LogInfo(\"Testing initial state for deployments\")", + "\tdefer env.SetNeedsRefresh()", + "", + "\t// Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check.", + "\t// timeout = k-mins + (1min * (num-deployments + num-statefulsets))", + "\tallPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets))", + "\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout, check.GetLogger())", + "\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\tfor _, dep := range notReadyDeployments {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment was not ready before draining any node.\", false))", + "\t\t}", + "\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset was not ready before draining any node.\", false))", + "\t\t}", + "\t\treturn", + "\t}", + "", + "\t// Filter out pods with Node Assignments present and FAIL them.", + "\t// We run into problems with this test when there are nodeSelectors assigned affecting where", + "\t// pods are scheduled. Also, they are not allowed in general, see the node-selector test case.", + "\t// Skip the safeguard for any pods that are using a runtimeClassName. This is potentially", + "\t// because pods that are derived from a performance profile might have a built-in nodeSelector.", + "\tvar podsWithNodeAssignment []*provider.Pod", + "\tfor _, put := range env.Pods {", + "\t\tif !put.IsRuntimeClassNameSpecified() \u0026\u0026 put.HasNodeSelector() {", + "\t\t\tpodsWithNodeAssignment = append(podsWithNodeAssignment, put)", + "\t\t\tcheck.LogError(\"Pod %q has been found with node selector(s): %v\", put, put.Spec.NodeSelector)", + "\t\t}", + "\t}", + "\tif len(podsWithNodeAssignment) \u003e 0 {", + "\t\tcheck.LogError(\"Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v\", podsWithNodeAssignment)", + "\t\tfor _, pod := range podsWithNodeAssignment {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has node assignment.\", false))", + "\t\t}", + "", + "\t\treturn", + "\t}", + "", + "\tfor nodeName := range podsets.GetAllNodesForAllPodSets(env.Pods) {", + "\t\tdefer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node", + "\t\terr := podrecreation.CordonHelper(nodeName, podrecreation.Cordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Error cordoning the node: %s\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node cordoning failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tcheck.LogInfo(\"Draining and Cordoning node %s: \", nodeName)", + "\t\tcount, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Getting pods list to drain failed, err=%v\", err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Getting pods list to drain failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tnodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count)", + "\t\tcheck.LogDebug(\"Draining node: %s with timeout: %s\", nodeName, nodeTimeout)", + "\t\t_, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Draining node %q failed, err=%v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Draining node failed\", false))", + "\t\t\treturn", + "\t\t}", + "", + "\t\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout, check.GetLogger())", + "\t\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\t\tfor _, dep := range notReadyDeployments {", + "\t\t\t\tcheck.LogError(\"Deployment %q not ready after draining node %q\", dep.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q not ready after draining node %q\", sts.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\treturn", + "\t\t}", + "", + "\t\terr = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogFatal(\"Error uncordoning the node: %s\", nodeName)", + "\t\t}", + "\t}", + "", + "\t// If everything went well for all nodes, the nonCompliantObjects should be empty. We need to", + "\t// manually add all the deps/sts into the compliant object lists so the check is marked as skipped.", + "\t// ToDo: Improve this.", + "\tif len(nonCompliantObjects) == 0 {", + "\t\tfor _, dep := range env.Deployments {", + "\t\t\tcheck.LogInfo(\"Deployment's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "", + "\t\tfor _, sts := range env.StatefulSets {", + "\t\t\tcheck.LogInfo(\"Statefulset's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "\t}", + "", + "\tneedsPostMortemInfo = false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetAllNodesForAllPodSets(pods []*provider.Pod) (nodes map[string]bool) {", + "\tnodes = make(map[string]bool)", + "\tfor _, put := range pods {", + "\t\tfor _, or := range put.OwnerReferences {", + "\t\t\tif or.Kind != ReplicaSetString \u0026\u0026 or.Kind != StatefulsetString {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tnodes[put.Spec.NodeName] = true", + "\t\t\tbreak", + "\t\t}", + "\t}", + "\treturn nodes", + "}" + ] + }, + { + "name": "WaitForAllPodSetsReady", + "qualifiedName": "WaitForAllPodSetsReady", + "exported": true, + "signature": "func(*provider.TestEnvironment, time.Duration, *log.Logger)([]*provider.Deployment, []*provider.StatefulSet)", + "doc": "WaitForAllPodSetsReady waits until all deployments and stateful sets are ready or a timeout occurs\n\nThe function polls the readiness status of every deployment and stateful set\nin the test environment at fixed intervals, logging each check. It stops\nearly if all podsets become ready before the specified duration; otherwise it\nreturns the remaining not‑ready objects after the timeout. The returned\nslices allow callers to report which resources failed to reach readiness.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:229", + "calls": [ + { + "name": "Info", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Since", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "getDeploymentsInfo", + "kind": "function", + "source": [ + "func getDeploymentsInfo(deployments []*provider.Deployment) []string {", + "\tdeps := []string{}", + "\tfor _, dep := range deployments {", + "\t\tdeps = append(deps, fmt.Sprintf(\"%s:%s\", dep.Namespace, dep.Name))", + "\t}", + "", + "\treturn deps", + "}" + ] + }, + { + "name": "getNotReadyDeployments", + "kind": "function", + "source": [ + "func getNotReadyDeployments(deployments []*provider.Deployment) []*provider.Deployment {", + "\tnotReadyDeployments := []*provider.Deployment{}", + "\tfor _, dep := range deployments {", + "\t\tready, err := isDeploymentReady(dep.Name, dep.Namespace)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get %s: %v\", dep.ToString(), err)", + "\t\t\t// We'll mark it as not ready, anyways.", + "\t\t\tnotReadyDeployments = append(notReadyDeployments, dep)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif ready {", + "\t\t\tlog.Debug(\"%s is ready.\", dep.ToString())", + "\t\t} else {", + "\t\t\tnotReadyDeployments = append(notReadyDeployments, dep)", + "\t\t}", + "\t}", + "", + "\treturn notReadyDeployments", + "}" + ] + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "getStatefulSetsInfo", + "kind": "function", + "source": [ + "func getStatefulSetsInfo(statefulSets []*provider.StatefulSet) []string {", + "\tstsInfo := []string{}", + "\tfor _, sts := range statefulSets {", + "\t\tstsInfo = append(stsInfo, fmt.Sprintf(\"%s:%s\", sts.Namespace, sts.Name))", + "\t}", + "", + "\treturn stsInfo", + "}" + ] + }, + { + "name": "getNotReadyStatefulSets", + "kind": "function", + "source": [ + "func getNotReadyStatefulSets(statefulSets []*provider.StatefulSet) []*provider.StatefulSet {", + "\tnotReadyStatefulSets := []*provider.StatefulSet{}", + "\tfor _, sts := range statefulSets {", + "\t\tready, err := isStatefulSetReady(sts.Name, sts.Namespace)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get %s: %v\", sts.ToString(), err)", + "\t\t\t// We'll mark it as not ready, anyways.", + "\t\t\tnotReadyStatefulSets = append(notReadyStatefulSets, sts)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif ready {", + "\t\t\tlog.Debug(\"%s is ready.\", sts.ToString())", + "\t\t} else {", + "\t\t\tnotReadyStatefulSets = append(notReadyStatefulSets, sts)", + "\t\t}", + "\t}", + "", + "\treturn notReadyStatefulSets", + "}" + ] + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "getDeploymentsInfo", + "kind": "function", + "source": [ + "func getDeploymentsInfo(deployments []*provider.Deployment) []string {", + "\tdeps := []string{}", + "\tfor _, dep := range deployments {", + "\t\tdeps = append(deps, fmt.Sprintf(\"%s:%s\", dep.Namespace, dep.Name))", + "\t}", + "", + "\treturn deps", + "}" + ] + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "getStatefulSetsInfo", + "kind": "function", + "source": [ + "func getStatefulSetsInfo(statefulSets []*provider.StatefulSet) []string {", + "\tstsInfo := []string{}", + "\tfor _, sts := range statefulSets {", + "\t\tstsInfo = append(stsInfo, fmt.Sprintf(\"%s:%s\", sts.Namespace, sts.Name))", + "\t}", + "", + "\treturn stsInfo", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Sleep", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "getDeploymentsInfo", + "kind": "function", + "source": [ + "func getDeploymentsInfo(deployments []*provider.Deployment) []string {", + "\tdeps := []string{}", + "\tfor _, dep := range deployments {", + "\t\tdeps = append(deps, fmt.Sprintf(\"%s:%s\", dep.Namespace, dep.Name))", + "\t}", + "", + "\treturn deps", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "getStatefulSetsInfo", + "kind": "function", + "source": [ + "func getStatefulSetsInfo(statefulSets []*provider.StatefulSet) []string {", + "\tstsInfo := []string{}", + "\tfor _, sts := range statefulSets {", + "\t\tstsInfo = append(stsInfo, fmt.Sprintf(\"%s:%s\", sts.Namespace, sts.Name))", + "\t}", + "", + "\treturn stsInfo", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodsRecreation", + "kind": "function", + "source": [ + "func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tneedsPostMortemInfo := true", + "\tdefer func() {", + "\t\tif needsPostMortemInfo {", + "\t\t\tcheck.LogDebug(\"%s\", postmortem.Log())", + "\t\t}", + "\t\t// Since we are possible exiting early, we need to make sure we set the result at the end of the function.", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}()", + "\tcheck.LogInfo(\"Testing node draining effect of deployment\")", + "\tcheck.LogInfo(\"Testing initial state for deployments\")", + "\tdefer env.SetNeedsRefresh()", + "", + "\t// Before draining any node, wait until all podsets are ready. The timeout depends on the number of podsets to check.", + "\t// timeout = k-mins + (1min * (num-deployments + num-statefulsets))", + "\tallPodsetsReadyTimeout := timeoutPodSetReady + time.Minute*time.Duration(len(env.Deployments)+len(env.StatefulSets))", + "\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, allPodsetsReadyTimeout, check.GetLogger())", + "\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\tfor _, dep := range notReadyDeployments {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment was not ready before draining any node.\", false))", + "\t\t}", + "\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset was not ready before draining any node.\", false))", + "\t\t}", + "\t\treturn", + "\t}", + "", + "\t// Filter out pods with Node Assignments present and FAIL them.", + "\t// We run into problems with this test when there are nodeSelectors assigned affecting where", + "\t// pods are scheduled. Also, they are not allowed in general, see the node-selector test case.", + "\t// Skip the safeguard for any pods that are using a runtimeClassName. This is potentially", + "\t// because pods that are derived from a performance profile might have a built-in nodeSelector.", + "\tvar podsWithNodeAssignment []*provider.Pod", + "\tfor _, put := range env.Pods {", + "\t\tif !put.IsRuntimeClassNameSpecified() \u0026\u0026 put.HasNodeSelector() {", + "\t\t\tpodsWithNodeAssignment = append(podsWithNodeAssignment, put)", + "\t\t\tcheck.LogError(\"Pod %q has been found with node selector(s): %v\", put, put.Spec.NodeSelector)", + "\t\t}", + "\t}", + "\tif len(podsWithNodeAssignment) \u003e 0 {", + "\t\tcheck.LogError(\"Pod(s) have been found to contain a node assignment and cannot perform the pod-recreation test: %v\", podsWithNodeAssignment)", + "\t\tfor _, pod := range podsWithNodeAssignment {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has node assignment.\", false))", + "\t\t}", + "", + "\t\treturn", + "\t}", + "", + "\tfor nodeName := range podsets.GetAllNodesForAllPodSets(env.Pods) {", + "\t\tdefer podrecreation.CordonCleanup(nodeName, check) //nolint:gocritic // The defer in loop is intentional, calling the cleanup function once per node", + "\t\terr := podrecreation.CordonHelper(nodeName, podrecreation.Cordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Error cordoning the node: %s\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node cordoning failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tcheck.LogInfo(\"Draining and Cordoning node %s: \", nodeName)", + "\t\tcount, err := podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.NoDelete)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Getting pods list to drain failed, err=%v\", err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Getting pods list to drain failed\", false))", + "\t\t\treturn", + "\t\t}", + "\t\tnodeTimeout := timeoutPodSetReady + timeoutPodRecreationPerPod*time.Duration(count)", + "\t\tcheck.LogDebug(\"Draining node: %s with timeout: %s\", nodeName, nodeTimeout)", + "\t\t_, err = podrecreation.CountPodsWithDelete(env.Pods, nodeName, podrecreation.DeleteForeground)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Draining node %q failed, err=%v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Draining node failed\", false))", + "\t\t\treturn", + "\t\t}", + "", + "\t\tnotReadyDeployments, notReadyStatefulSets := podsets.WaitForAllPodSetsReady(env, nodeTimeout, check.GetLogger())", + "\t\tif len(notReadyDeployments) \u003e 0 || len(notReadyStatefulSets) \u003e 0 {", + "\t\t\tfor _, dep := range notReadyDeployments {", + "\t\t\t\tcheck.LogError(\"Deployment %q not ready after draining node %q\", dep.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\tfor _, sts := range notReadyStatefulSets {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q not ready after draining node %q\", sts.ToString(), nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset not ready after draining node \"+nodeName, false))", + "\t\t\t}", + "\t\t\treturn", + "\t\t}", + "", + "\t\terr = podrecreation.CordonHelper(nodeName, podrecreation.Uncordon)", + "\t\tif err != nil {", + "\t\t\tcheck.LogFatal(\"Error uncordoning the node: %s\", nodeName)", + "\t\t}", + "\t}", + "", + "\t// If everything went well for all nodes, the nonCompliantObjects should be empty. We need to", + "\t// manually add all the deps/sts into the compliant object lists so the check is marked as skipped.", + "\t// ToDo: Improve this.", + "\tif len(nonCompliantObjects) == 0 {", + "\t\tfor _, dep := range env.Deployments {", + "\t\t\tcheck.LogInfo(\"Deployment's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(dep.Namespace, dep.Name, \"Deployment's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "", + "\t\tfor _, sts := range env.StatefulSets {", + "\t\t\tcheck.LogInfo(\"Statefulset's pods successfully re-schedulled after node draining.\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(sts.Namespace, sts.Name, \"Statefulset's pods successfully re-schedulled after node draining.\", true))", + "\t\t}", + "\t}", + "", + "\tneedsPostMortemInfo = false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func WaitForAllPodSetsReady(env *provider.TestEnvironment, timeout time.Duration, logger *log.Logger) (", + "\tnotReadyDeployments []*provider.Deployment,", + "\tnotReadyStatefulSets []*provider.StatefulSet) {", + "\tconst queryInterval = 15 * time.Second", + "", + "\tdeploymentsToCheck := env.Deployments", + "\tstatefulSetsToCheck := env.StatefulSets", + "", + "\tlogger.Info(\"Waiting %s for %d podsets to be ready.\", timeout, len(deploymentsToCheck)+len(statefulSetsToCheck))", + "\tfor startTime := time.Now(); time.Since(startTime) \u003c timeout; {", + "\t\tlogger.Info(\"Checking Deployments readiness of Deployments %v\", getDeploymentsInfo(deploymentsToCheck))", + "\t\tnotReadyDeployments = getNotReadyDeployments(deploymentsToCheck)", + "", + "\t\tlogger.Info(\"Checking StatefulSets readiness of StatefulSets %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "\t\tnotReadyStatefulSets = getNotReadyStatefulSets(statefulSetsToCheck)", + "", + "\t\tlogger.Info(\"Not ready Deployments: %v\", getDeploymentsInfo(notReadyDeployments))", + "\t\tlogger.Info(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(notReadyStatefulSets))", + "", + "\t\tdeploymentsToCheck = notReadyDeployments", + "\t\tstatefulSetsToCheck = notReadyStatefulSets", + "", + "\t\tif len(deploymentsToCheck) == 0 \u0026\u0026 len(statefulSetsToCheck) == 0 {", + "\t\t\t// No more podsets to check.", + "\t\t\tbreak", + "\t\t}", + "", + "\t\ttime.Sleep(queryInterval)", + "\t}", + "", + "\t// Here, either we reached the timeout or there's no more not-ready deployments or statefulsets.", + "\tlogger.Error(\"Not ready Deployments: %v\", getDeploymentsInfo(deploymentsToCheck))", + "\tlogger.Error(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "", + "\treturn deploymentsToCheck, statefulSetsToCheck", + "}" + ] + }, + { + "name": "WaitForStatefulSetReady", + "qualifiedName": "WaitForStatefulSetReady", + "exported": true, + "signature": "func(string, string, time.Duration, *log.Logger)(bool)", + "doc": "WaitForStatefulSetReady waits until a StatefulSet reaches the ready state\n\nThe function polls the Kubernetes API at one‑second intervals, retrieving\nthe latest StatefulSet definition for the given namespace and name. It checks\nwhether all replicas are available and the update is complete; if so it logs\nsuccess and returns true. If the timeout expires before readiness, an error\nis logged and false is returned.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:83", + "calls": [ + { + "name": "Debug", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Since", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetUpdatedStatefulset", + "kind": "function", + "source": [ + "func GetUpdatedStatefulset(ac appv1client.AppsV1Interface, namespace, name string) (*StatefulSet, error) {", + "\tresult, err := autodiscover.FindStatefulsetByNameByNamespace(ac, namespace, name)", + "\treturn \u0026StatefulSet{", + "\t\tresult,", + "\t}, err", + "}" + ] + }, + { + "name": "AppsV1", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "IsStatefulSetReady", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Sleep", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "scaleHpaStatefulSetHelper", + "kind": "function", + "source": [ + "func scaleHpaStatefulSetHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, statefulsetName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, statefulsetName, timeout, logger) {", + "\t\t\tlogger.Error(\"StatefulSet not ready after scale operation %s:%s\", namespace, statefulsetName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "scaleStatefulsetHelper", + "kind": "function", + "source": [ + "func scaleStatefulsetHelper(clients *clientsholder.ClientsHolder, ssClient v1.StatefulSetInterface, statefulset *appsv1.StatefulSet, replicas int32, timeout time.Duration, logger *log.Logger) bool {", + "\tname := statefulset.Name", + "\tnamespace := statefulset.Namespace", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Retrieve the latest version of statefulset before attempting update", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tss, err := ssClient.Get(context.TODO(), name, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get latest version of statefulset %s:%s with error %s\", namespace, name, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tss.Spec.Replicas = \u0026replicas", + "\t\t_, err = clients.K8sClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), ss, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update statefulset %s:%s\", namespace, name)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, name, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot update statefulset %s:%s\", namespace, name)", + "\t\t\treturn errors.New(\"can not update statefulset\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale statefulset %s:%s, err=%v\", namespace, name, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func WaitForStatefulSetReady(ns, name string, timeout time.Duration, logger *log.Logger) bool {", + "\tlogger.Debug(\"Check if statefulset %s:%s is ready\", ns, name)", + "\tclients := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tss, err := provider.GetUpdatedStatefulset(clients.K8sClient.AppsV1(), ns, name)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Error while getting the %s, err: %v\", ss.ToString(), err)", + "\t\t} else if ss.IsStatefulSetReady() {", + "\t\t\tlogger.Info(\"%s is ready\", ss.ToString())", + "\t\t\treturn true", + "\t\t}", + "\t\ttime.Sleep(time.Second)", + "\t}", + "\tlogger.Error(\"Statefulset %s:%s is not ready\", ns, name)", + "\treturn false", + "}" + ] + }, + { + "name": "getDeploymentsInfo", + "qualifiedName": "getDeploymentsInfo", + "exported": false, + "signature": "func([]*provider.Deployment)([]string)", + "doc": "getDeploymentsInfo Collects deployment identifiers as namespace:name strings\n\nThe function iterates over a slice of deployment pointers, formatting each\ndeployment’s namespace and name into a string separated by a colon. It\nappends these formatted strings to a new slice, which is then returned. This\nhelper is used for logging or reporting purposes during test execution.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:141", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForAllPodSetsReady", + "kind": "function", + "source": [ + "func WaitForAllPodSetsReady(env *provider.TestEnvironment, timeout time.Duration, logger *log.Logger) (", + "\tnotReadyDeployments []*provider.Deployment,", + "\tnotReadyStatefulSets []*provider.StatefulSet) {", + "\tconst queryInterval = 15 * time.Second", + "", + "\tdeploymentsToCheck := env.Deployments", + "\tstatefulSetsToCheck := env.StatefulSets", + "", + "\tlogger.Info(\"Waiting %s for %d podsets to be ready.\", timeout, len(deploymentsToCheck)+len(statefulSetsToCheck))", + "\tfor startTime := time.Now(); time.Since(startTime) \u003c timeout; {", + "\t\tlogger.Info(\"Checking Deployments readiness of Deployments %v\", getDeploymentsInfo(deploymentsToCheck))", + "\t\tnotReadyDeployments = getNotReadyDeployments(deploymentsToCheck)", + "", + "\t\tlogger.Info(\"Checking StatefulSets readiness of StatefulSets %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "\t\tnotReadyStatefulSets = getNotReadyStatefulSets(statefulSetsToCheck)", + "", + "\t\tlogger.Info(\"Not ready Deployments: %v\", getDeploymentsInfo(notReadyDeployments))", + "\t\tlogger.Info(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(notReadyStatefulSets))", + "", + "\t\tdeploymentsToCheck = notReadyDeployments", + "\t\tstatefulSetsToCheck = notReadyStatefulSets", + "", + "\t\tif len(deploymentsToCheck) == 0 \u0026\u0026 len(statefulSetsToCheck) == 0 {", + "\t\t\t// No more podsets to check.", + "\t\t\tbreak", + "\t\t}", + "", + "\t\ttime.Sleep(queryInterval)", + "\t}", + "", + "\t// Here, either we reached the timeout or there's no more not-ready deployments or statefulsets.", + "\tlogger.Error(\"Not ready Deployments: %v\", getDeploymentsInfo(deploymentsToCheck))", + "\tlogger.Error(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "", + "\treturn deploymentsToCheck, statefulSetsToCheck", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getDeploymentsInfo(deployments []*provider.Deployment) []string {", + "\tdeps := []string{}", + "\tfor _, dep := range deployments {", + "\t\tdeps = append(deps, fmt.Sprintf(\"%s:%s\", dep.Namespace, dep.Name))", + "\t}", + "", + "\treturn deps", + "}" + ] + }, + { + "name": "getNotReadyDeployments", + "qualifiedName": "getNotReadyDeployments", + "exported": false, + "signature": "func([]*provider.Deployment)([]*provider.Deployment)", + "doc": "getNotReadyDeployments identifies deployments that are not yet ready\n\nThis helper inspects each deployment in the supplied slice, calling a\nreadiness check for its name and namespace. Deployments reported as ready are\nomitted from the result; any errors during the check also cause the\ndeployment to be considered not ready. The function returns a new slice\ncontaining only those deployments that failed the readiness test.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:173", + "calls": [ + { + "name": "isDeploymentReady", + "kind": "function", + "source": [ + "func isDeploymentReady(name, namespace string) (bool, error) {", + "\tappsV1Api := clientsholder.GetClientsHolder().K8sClient.AppsV1()", + "", + "\tdep, err := provider.GetUpdatedDeployment(appsV1Api, namespace, name)", + "\tif err != nil {", + "\t\treturn false, err", + "\t}", + "", + "\treturn dep.IsDeploymentReady(), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForAllPodSetsReady", + "kind": "function", + "source": [ + "func WaitForAllPodSetsReady(env *provider.TestEnvironment, timeout time.Duration, logger *log.Logger) (", + "\tnotReadyDeployments []*provider.Deployment,", + "\tnotReadyStatefulSets []*provider.StatefulSet) {", + "\tconst queryInterval = 15 * time.Second", + "", + "\tdeploymentsToCheck := env.Deployments", + "\tstatefulSetsToCheck := env.StatefulSets", + "", + "\tlogger.Info(\"Waiting %s for %d podsets to be ready.\", timeout, len(deploymentsToCheck)+len(statefulSetsToCheck))", + "\tfor startTime := time.Now(); time.Since(startTime) \u003c timeout; {", + "\t\tlogger.Info(\"Checking Deployments readiness of Deployments %v\", getDeploymentsInfo(deploymentsToCheck))", + "\t\tnotReadyDeployments = getNotReadyDeployments(deploymentsToCheck)", + "", + "\t\tlogger.Info(\"Checking StatefulSets readiness of StatefulSets %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "\t\tnotReadyStatefulSets = getNotReadyStatefulSets(statefulSetsToCheck)", + "", + "\t\tlogger.Info(\"Not ready Deployments: %v\", getDeploymentsInfo(notReadyDeployments))", + "\t\tlogger.Info(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(notReadyStatefulSets))", + "", + "\t\tdeploymentsToCheck = notReadyDeployments", + "\t\tstatefulSetsToCheck = notReadyStatefulSets", + "", + "\t\tif len(deploymentsToCheck) == 0 \u0026\u0026 len(statefulSetsToCheck) == 0 {", + "\t\t\t// No more podsets to check.", + "\t\t\tbreak", + "\t\t}", + "", + "\t\ttime.Sleep(queryInterval)", + "\t}", + "", + "\t// Here, either we reached the timeout or there's no more not-ready deployments or statefulsets.", + "\tlogger.Error(\"Not ready Deployments: %v\", getDeploymentsInfo(deploymentsToCheck))", + "\tlogger.Error(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "", + "\treturn deploymentsToCheck, statefulSetsToCheck", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getNotReadyDeployments(deployments []*provider.Deployment) []*provider.Deployment {", + "\tnotReadyDeployments := []*provider.Deployment{}", + "\tfor _, dep := range deployments {", + "\t\tready, err := isDeploymentReady(dep.Name, dep.Namespace)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get %s: %v\", dep.ToString(), err)", + "\t\t\t// We'll mark it as not ready, anyways.", + "\t\t\tnotReadyDeployments = append(notReadyDeployments, dep)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif ready {", + "\t\t\tlog.Debug(\"%s is ready.\", dep.ToString())", + "\t\t} else {", + "\t\t\tnotReadyDeployments = append(notReadyDeployments, dep)", + "\t\t}", + "\t}", + "", + "\treturn notReadyDeployments", + "}" + ] + }, + { + "name": "getNotReadyStatefulSets", + "qualifiedName": "getNotReadyStatefulSets", + "exported": false, + "signature": "func([]*provider.StatefulSet)([]*provider.StatefulSet)", + "doc": "getNotReadyStatefulSets filters stateful sets that are not ready\n\nThe function iterates over a slice of stateful set objects, checking each\none's readiness status via an external helper. If the check fails or\nindicates the set is not ready, it records the set in a new slice. The\nresulting slice contains only those stateful sets that are considered not\nready, and this list is returned to the caller.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:201", + "calls": [ + { + "name": "isStatefulSetReady", + "kind": "function", + "source": [ + "func isStatefulSetReady(name, namespace string) (bool, error) {", + "\tappsV1Api := clientsholder.GetClientsHolder().K8sClient.AppsV1()", + "", + "\tsts, err := provider.GetUpdatedStatefulset(appsV1Api, namespace, name)", + "\tif err != nil {", + "\t\treturn false, err", + "\t}", + "", + "\treturn sts.IsStatefulSetReady(), nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForAllPodSetsReady", + "kind": "function", + "source": [ + "func WaitForAllPodSetsReady(env *provider.TestEnvironment, timeout time.Duration, logger *log.Logger) (", + "\tnotReadyDeployments []*provider.Deployment,", + "\tnotReadyStatefulSets []*provider.StatefulSet) {", + "\tconst queryInterval = 15 * time.Second", + "", + "\tdeploymentsToCheck := env.Deployments", + "\tstatefulSetsToCheck := env.StatefulSets", + "", + "\tlogger.Info(\"Waiting %s for %d podsets to be ready.\", timeout, len(deploymentsToCheck)+len(statefulSetsToCheck))", + "\tfor startTime := time.Now(); time.Since(startTime) \u003c timeout; {", + "\t\tlogger.Info(\"Checking Deployments readiness of Deployments %v\", getDeploymentsInfo(deploymentsToCheck))", + "\t\tnotReadyDeployments = getNotReadyDeployments(deploymentsToCheck)", + "", + "\t\tlogger.Info(\"Checking StatefulSets readiness of StatefulSets %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "\t\tnotReadyStatefulSets = getNotReadyStatefulSets(statefulSetsToCheck)", + "", + "\t\tlogger.Info(\"Not ready Deployments: %v\", getDeploymentsInfo(notReadyDeployments))", + "\t\tlogger.Info(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(notReadyStatefulSets))", + "", + "\t\tdeploymentsToCheck = notReadyDeployments", + "\t\tstatefulSetsToCheck = notReadyStatefulSets", + "", + "\t\tif len(deploymentsToCheck) == 0 \u0026\u0026 len(statefulSetsToCheck) == 0 {", + "\t\t\t// No more podsets to check.", + "\t\t\tbreak", + "\t\t}", + "", + "\t\ttime.Sleep(queryInterval)", + "\t}", + "", + "\t// Here, either we reached the timeout or there's no more not-ready deployments or statefulsets.", + "\tlogger.Error(\"Not ready Deployments: %v\", getDeploymentsInfo(deploymentsToCheck))", + "\tlogger.Error(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "", + "\treturn deploymentsToCheck, statefulSetsToCheck", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getNotReadyStatefulSets(statefulSets []*provider.StatefulSet) []*provider.StatefulSet {", + "\tnotReadyStatefulSets := []*provider.StatefulSet{}", + "\tfor _, sts := range statefulSets {", + "\t\tready, err := isStatefulSetReady(sts.Name, sts.Namespace)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get %s: %v\", sts.ToString(), err)", + "\t\t\t// We'll mark it as not ready, anyways.", + "\t\t\tnotReadyStatefulSets = append(notReadyStatefulSets, sts)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif ready {", + "\t\t\tlog.Debug(\"%s is ready.\", sts.ToString())", + "\t\t} else {", + "\t\t\tnotReadyStatefulSets = append(notReadyStatefulSets, sts)", + "\t\t}", + "\t}", + "", + "\treturn notReadyStatefulSets", + "}" + ] + }, + { + "name": "getStatefulSetsInfo", + "qualifiedName": "getStatefulSetsInfo", + "exported": false, + "signature": "func([]*provider.StatefulSet)([]string)", + "doc": "getStatefulSetsInfo creates a list of namespace:name strings for each StatefulSet\n\nThe function iterates over the supplied slice, formatting each element’s\nnamespace and name into a single string separated by a colon. These formatted\nstrings are collected in a new slice which is then returned. The resulting\nslice provides a concise representation of the StatefulSets for logging or\nreporting purposes.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:157", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForAllPodSetsReady", + "kind": "function", + "source": [ + "func WaitForAllPodSetsReady(env *provider.TestEnvironment, timeout time.Duration, logger *log.Logger) (", + "\tnotReadyDeployments []*provider.Deployment,", + "\tnotReadyStatefulSets []*provider.StatefulSet) {", + "\tconst queryInterval = 15 * time.Second", + "", + "\tdeploymentsToCheck := env.Deployments", + "\tstatefulSetsToCheck := env.StatefulSets", + "", + "\tlogger.Info(\"Waiting %s for %d podsets to be ready.\", timeout, len(deploymentsToCheck)+len(statefulSetsToCheck))", + "\tfor startTime := time.Now(); time.Since(startTime) \u003c timeout; {", + "\t\tlogger.Info(\"Checking Deployments readiness of Deployments %v\", getDeploymentsInfo(deploymentsToCheck))", + "\t\tnotReadyDeployments = getNotReadyDeployments(deploymentsToCheck)", + "", + "\t\tlogger.Info(\"Checking StatefulSets readiness of StatefulSets %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "\t\tnotReadyStatefulSets = getNotReadyStatefulSets(statefulSetsToCheck)", + "", + "\t\tlogger.Info(\"Not ready Deployments: %v\", getDeploymentsInfo(notReadyDeployments))", + "\t\tlogger.Info(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(notReadyStatefulSets))", + "", + "\t\tdeploymentsToCheck = notReadyDeployments", + "\t\tstatefulSetsToCheck = notReadyStatefulSets", + "", + "\t\tif len(deploymentsToCheck) == 0 \u0026\u0026 len(statefulSetsToCheck) == 0 {", + "\t\t\t// No more podsets to check.", + "\t\t\tbreak", + "\t\t}", + "", + "\t\ttime.Sleep(queryInterval)", + "\t}", + "", + "\t// Here, either we reached the timeout or there's no more not-ready deployments or statefulsets.", + "\tlogger.Error(\"Not ready Deployments: %v\", getDeploymentsInfo(deploymentsToCheck))", + "\tlogger.Error(\"Not ready StatefulSets: %v\", getStatefulSetsInfo(statefulSetsToCheck))", + "", + "\treturn deploymentsToCheck, statefulSetsToCheck", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getStatefulSetsInfo(statefulSets []*provider.StatefulSet) []string {", + "\tstsInfo := []string{}", + "\tfor _, sts := range statefulSets {", + "\t\tstsInfo = append(stsInfo, fmt.Sprintf(\"%s:%s\", sts.Namespace, sts.Name))", + "\t}", + "", + "\treturn stsInfo", + "}" + ] + }, + { + "name": "isDeploymentReady", + "qualifiedName": "isDeploymentReady", + "exported": false, + "signature": "func(string, string)(bool, error)", + "doc": "isDeploymentReady checks if a deployment has finished rolling out\n\nThe function retrieves the current state of a deployment in a given namespace\nusing Kubernetes clients, then determines readiness by examining its status\nconditions. It returns true when all replicas are updated and available,\notherwise false, along with any error that occurred during retrieval.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:107", + "calls": [ + { + "name": "AppsV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetUpdatedDeployment", + "kind": "function", + "source": [ + "func GetUpdatedDeployment(ac appv1client.AppsV1Interface, namespace, name string) (*Deployment, error) {", + "\tresult, err := autodiscover.FindDeploymentByNameByNamespace(ac, namespace, name)", + "\treturn \u0026Deployment{", + "\t\tresult,", + "\t}, err", + "}" + ] + }, + { + "name": "IsDeploymentReady", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "getNotReadyDeployments", + "kind": "function", + "source": [ + "func getNotReadyDeployments(deployments []*provider.Deployment) []*provider.Deployment {", + "\tnotReadyDeployments := []*provider.Deployment{}", + "\tfor _, dep := range deployments {", + "\t\tready, err := isDeploymentReady(dep.Name, dep.Namespace)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get %s: %v\", dep.ToString(), err)", + "\t\t\t// We'll mark it as not ready, anyways.", + "\t\t\tnotReadyDeployments = append(notReadyDeployments, dep)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif ready {", + "\t\t\tlog.Debug(\"%s is ready.\", dep.ToString())", + "\t\t} else {", + "\t\t\tnotReadyDeployments = append(notReadyDeployments, dep)", + "\t\t}", + "\t}", + "", + "\treturn notReadyDeployments", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isDeploymentReady(name, namespace string) (bool, error) {", + "\tappsV1Api := clientsholder.GetClientsHolder().K8sClient.AppsV1()", + "", + "\tdep, err := provider.GetUpdatedDeployment(appsV1Api, namespace, name)", + "\tif err != nil {", + "\t\treturn false, err", + "\t}", + "", + "\treturn dep.IsDeploymentReady(), nil", + "}" + ] + }, + { + "name": "isStatefulSetReady", + "qualifiedName": "isStatefulSetReady", + "exported": false, + "signature": "func(string, string)(bool, error)", + "doc": "isStatefulSetReady determines if a StatefulSet is fully ready\n\nThe function retrieves the current state of a specified StatefulSet using\nKubernetes client APIs, then checks whether all its replicas are available.\nIt returns true when the StatefulSet meets readiness criteria or an error if\nretrieval fails.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:124", + "calls": [ + { + "name": "AppsV1", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetUpdatedStatefulset", + "kind": "function", + "source": [ + "func GetUpdatedStatefulset(ac appv1client.AppsV1Interface, namespace, name string) (*StatefulSet, error) {", + "\tresult, err := autodiscover.FindStatefulsetByNameByNamespace(ac, namespace, name)", + "\treturn \u0026StatefulSet{", + "\t\tresult,", + "\t}, err", + "}" + ] + }, + { + "name": "IsStatefulSetReady", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "getNotReadyStatefulSets", + "kind": "function", + "source": [ + "func getNotReadyStatefulSets(statefulSets []*provider.StatefulSet) []*provider.StatefulSet {", + "\tnotReadyStatefulSets := []*provider.StatefulSet{}", + "\tfor _, sts := range statefulSets {", + "\t\tready, err := isStatefulSetReady(sts.Name, sts.Namespace)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to get %s: %v\", sts.ToString(), err)", + "\t\t\t// We'll mark it as not ready, anyways.", + "\t\t\tnotReadyStatefulSets = append(notReadyStatefulSets, sts)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif ready {", + "\t\t\tlog.Debug(\"%s is ready.\", sts.ToString())", + "\t\t} else {", + "\t\t\tnotReadyStatefulSets = append(notReadyStatefulSets, sts)", + "\t\t}", + "\t}", + "", + "\treturn notReadyStatefulSets", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isStatefulSetReady(name, namespace string) (bool, error) {", + "\tappsV1Api := clientsholder.GetClientsHolder().K8sClient.AppsV1()", + "", + "\tsts, err := provider.GetUpdatedStatefulset(appsV1Api, namespace, name)", + "\tif err != nil {", + "\t\treturn false, err", + "\t}", + "", + "\treturn sts.IsStatefulSetReady(), nil", + "}" + ] + } + ], + "globals": [ + { + "name": "WaitForDeploymentSetReady", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:34" + }, + { + "name": "WaitForScalingToComplete", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:55" + } + ], + "consts": [ + { + "name": "ReplicaSetString", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:30" + }, + { + "name": "StatefulsetString", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/podsets/podsets.go:31" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "scaling", + "files": 4, + "imports": [ + "context", + "errors", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "k8s.io/api/apps/v1", + "k8s.io/api/autoscaling/v1", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/client-go/kubernetes/typed/apps/v1", + "k8s.io/client-go/kubernetes/typed/autoscaling/v1", + "k8s.io/client-go/scale", + "k8s.io/client-go/util/retry", + "strings", + "time" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "CheckOwnerReference", + "qualifiedName": "CheckOwnerReference", + "exported": true, + "signature": "func([]apiv1.OwnerReference, []configuration.CrdFilter, []*apiextv1.CustomResourceDefinition)(bool)", + "doc": "CheckOwnerReference Determines if owner references match scalable CRD filters\n\nThe function iterates over each OwnerReference of a resource, comparing its\nkind to the kinds defined in available CustomResourceDefinitions. For\nmatching kinds it checks whether the CRD name ends with any configured\nsuffix; if so, it returns the corresponding scalability flag from that\nfilter. If no match is found, it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/scaling_helper.go:50", + "calls": [ + { + "pkgPath": "strings", + "name": "HasSuffix", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testDeploymentScaling", + "kind": "function", + "source": [ + "func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, deployment := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", deployment.ToString())", + "\t\tif scaling.IsManaged(deployment.Name, env.Config.ManagedDeployments) {", + "\t\t\tif !scaling.CheckOwnerReference(deployment.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"Deployment %q scaling failed due to OwnerReferences that are not scalable\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Deployment %q scaling skipped due to scalable OwnerReferences, test will run on the CR scaling\", deployment.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip deployment if it is allowed by config", + "\t\tif nameInDeploymentSkipList(deployment.Name, deployment.Namespace, env.Config.SkipScalingTestDeployments) {", + "\t\t\tcheck.LogInfo(\"Deployment %q is being skipped due to configuration setting\", deployment.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TestDeploymentScaling test scaling of deployment", + "\t\t// This is the entry point for deployment scaling tests", + "\t\tns, name := deployment.Namespace, deployment.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"Deployment\"); hpa != nil {", + "\t\t\t// if the deployment is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the deployment", + "\t\t\tif !scaling.TestScaleHpaDeployment(deployment, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"Deployment %q has failed the HPA scale test\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the HPA scale test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the deployment is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleDeployment(deployment.Deployment, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Deployment %q has failed the non-HPA scale test\", deployment.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q is scalable\", deployment.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testStatefulSetScaling", + "kind": "function", + "source": [ + "func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, statefulSet := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", statefulSet.ToString())", + "\t\tif scaling.IsManaged(statefulSet.Name, env.Config.ManagedStatefulsets) {", + "\t\t\tif !scaling.CheckOwnerReference(statefulSet.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q scaling failed due to OwnerReferences that are not scalable\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has OwnerReferences that are not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"StatefulSet %q scaling skipped due to scalable OwnerReferences, test will run on te CR scaling\", statefulSet.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip statefulset if it is allowed by config", + "\t\tif nameInStatefulSetSkipList(statefulSet.Name, statefulSet.Namespace, env.Config.SkipScalingTestStatefulSets) {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q is being skipped due to configuration setting\", statefulSet.String())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TeststatefulsetScaling test scaling of statefulset", + "\t\t// This is the entry point for statefulset scaling tests", + "\t\tns, name := statefulSet.Namespace, statefulSet.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"StatefulSet\"); hpa != nil {", + "\t\t\t// if the statefulset is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the statefulset", + "\t\t\tif !scaling.TestScaleHpaStatefulSet(statefulSet.StatefulSet, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %q\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the statefulset is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleStatefulSet(statefulSet.StatefulSet, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %s\", statefulSet.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CheckOwnerReference(ownerReference []apiv1.OwnerReference, crdFilter []configuration.CrdFilter, crds []*apiextv1.CustomResourceDefinition) bool {", + "\tfor _, owner := range ownerReference {", + "\t\tfor _, aCrd := range crds {", + "\t\t\tif aCrd.Spec.Names.Kind == owner.Kind {", + "\t\t\t\tfor _, crdF := range crdFilter {", + "\t\t\t\t\tif strings.HasSuffix(aCrd.Name, crdF.NameSuffix) {", + "\t\t\t\t\t\treturn crdF.Scalable", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "GetResourceHPA", + "qualifiedName": "GetResourceHPA", + "exported": true, + "signature": "func([]*scalingv1.HorizontalPodAutoscaler, string, string, string)(*scalingv1.HorizontalPodAutoscaler)", + "doc": "GetResourceHPA Finds an HPA matching a resource name, namespace, and kind\n\nThe function iterates over a list of HorizontalPodAutoscaler objects,\nchecking each one's scale target reference for the specified kind, name, and\nnamespace. If a match is found, that HPA is returned; otherwise the function\nreturns nil to indicate no suitable HPA exists.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/scaling_helper.go:19", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testDeploymentScaling", + "kind": "function", + "source": [ + "func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, deployment := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", deployment.ToString())", + "\t\tif scaling.IsManaged(deployment.Name, env.Config.ManagedDeployments) {", + "\t\t\tif !scaling.CheckOwnerReference(deployment.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"Deployment %q scaling failed due to OwnerReferences that are not scalable\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Deployment %q scaling skipped due to scalable OwnerReferences, test will run on the CR scaling\", deployment.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip deployment if it is allowed by config", + "\t\tif nameInDeploymentSkipList(deployment.Name, deployment.Namespace, env.Config.SkipScalingTestDeployments) {", + "\t\t\tcheck.LogInfo(\"Deployment %q is being skipped due to configuration setting\", deployment.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TestDeploymentScaling test scaling of deployment", + "\t\t// This is the entry point for deployment scaling tests", + "\t\tns, name := deployment.Namespace, deployment.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"Deployment\"); hpa != nil {", + "\t\t\t// if the deployment is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the deployment", + "\t\t\tif !scaling.TestScaleHpaDeployment(deployment, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"Deployment %q has failed the HPA scale test\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the HPA scale test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the deployment is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleDeployment(deployment.Deployment, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Deployment %q has failed the non-HPA scale test\", deployment.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q is scalable\", deployment.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testScaleCrd", + "kind": "function", + "source": [ + "func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.ScaleCrUnderTest {", + "\t\tgroupResourceSchema := env.ScaleCrUnderTest[i].GroupResourceSchema", + "\t\tscaleCr := env.ScaleCrUnderTest[i].Scale", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, scaleCr.Name, scaleCr.Namespace, scaleCr.Kind); hpa != nil {", + "\t\t\tif !scaling.TestScaleHPACrd(\u0026scaleCr, hpa, groupResourceSchema, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"CR has failed the scaling test: %s\", scaleCr.GetName())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"cr has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\tif !scaling.TestScaleCrd(\u0026scaleCr, groupResourceSchema, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"CR has failed the non-HPA scale test: %s\", scaleCr.GetName())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"CR has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"CR is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"CR is scalable\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testStatefulSetScaling", + "kind": "function", + "source": [ + "func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, statefulSet := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", statefulSet.ToString())", + "\t\tif scaling.IsManaged(statefulSet.Name, env.Config.ManagedStatefulsets) {", + "\t\t\tif !scaling.CheckOwnerReference(statefulSet.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q scaling failed due to OwnerReferences that are not scalable\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has OwnerReferences that are not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"StatefulSet %q scaling skipped due to scalable OwnerReferences, test will run on te CR scaling\", statefulSet.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip statefulset if it is allowed by config", + "\t\tif nameInStatefulSetSkipList(statefulSet.Name, statefulSet.Namespace, env.Config.SkipScalingTestStatefulSets) {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q is being skipped due to configuration setting\", statefulSet.String())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TeststatefulsetScaling test scaling of statefulset", + "\t\t// This is the entry point for statefulset scaling tests", + "\t\tns, name := statefulSet.Namespace, statefulSet.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"StatefulSet\"); hpa != nil {", + "\t\t\t// if the statefulset is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the statefulset", + "\t\t\tif !scaling.TestScaleHpaStatefulSet(statefulSet.StatefulSet, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %q\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the statefulset is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleStatefulSet(statefulSet.StatefulSet, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %s\", statefulSet.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetResourceHPA(hpaList []*scalingv1.HorizontalPodAutoscaler, name, namespace, kind string) *scalingv1.HorizontalPodAutoscaler {", + "\tfor _, hpa := range hpaList {", + "\t\tif hpa.Spec.ScaleTargetRef.Kind == kind \u0026\u0026 hpa.Spec.ScaleTargetRef.Name == name \u0026\u0026 hpa.Namespace == namespace {", + "\t\t\treturn hpa", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "name": "IsManaged", + "qualifiedName": "IsManaged", + "exported": true, + "signature": "func(string, []configuration.ManagedDeploymentsStatefulsets)(bool)", + "doc": "IsManaged Checks if a deployment or stateful set is listed as managed\n\nThe function iterates over the provided slice of managed pod sets, comparing\neach name with the supplied pod set name. If a match is found it returns\ntrue, indicating that the object should be considered under management for\nscaling tests. Otherwise, it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/scaling_helper.go:34", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testDeploymentScaling", + "kind": "function", + "source": [ + "func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, deployment := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", deployment.ToString())", + "\t\tif scaling.IsManaged(deployment.Name, env.Config.ManagedDeployments) {", + "\t\t\tif !scaling.CheckOwnerReference(deployment.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"Deployment %q scaling failed due to OwnerReferences that are not scalable\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Deployment %q scaling skipped due to scalable OwnerReferences, test will run on the CR scaling\", deployment.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip deployment if it is allowed by config", + "\t\tif nameInDeploymentSkipList(deployment.Name, deployment.Namespace, env.Config.SkipScalingTestDeployments) {", + "\t\t\tcheck.LogInfo(\"Deployment %q is being skipped due to configuration setting\", deployment.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TestDeploymentScaling test scaling of deployment", + "\t\t// This is the entry point for deployment scaling tests", + "\t\tns, name := deployment.Namespace, deployment.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"Deployment\"); hpa != nil {", + "\t\t\t// if the deployment is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the deployment", + "\t\t\tif !scaling.TestScaleHpaDeployment(deployment, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"Deployment %q has failed the HPA scale test\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the HPA scale test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the deployment is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleDeployment(deployment.Deployment, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Deployment %q has failed the non-HPA scale test\", deployment.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q is scalable\", deployment.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testStatefulSetScaling", + "kind": "function", + "source": [ + "func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, statefulSet := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", statefulSet.ToString())", + "\t\tif scaling.IsManaged(statefulSet.Name, env.Config.ManagedStatefulsets) {", + "\t\t\tif !scaling.CheckOwnerReference(statefulSet.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q scaling failed due to OwnerReferences that are not scalable\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has OwnerReferences that are not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"StatefulSet %q scaling skipped due to scalable OwnerReferences, test will run on te CR scaling\", statefulSet.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip statefulset if it is allowed by config", + "\t\tif nameInStatefulSetSkipList(statefulSet.Name, statefulSet.Namespace, env.Config.SkipScalingTestStatefulSets) {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q is being skipped due to configuration setting\", statefulSet.String())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TeststatefulsetScaling test scaling of statefulset", + "\t\t// This is the entry point for statefulset scaling tests", + "\t\tns, name := statefulSet.Namespace, statefulSet.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"StatefulSet\"); hpa != nil {", + "\t\t\t// if the statefulset is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the statefulset", + "\t\t\tif !scaling.TestScaleHpaStatefulSet(statefulSet.StatefulSet, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %q\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the statefulset is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleStatefulSet(statefulSet.StatefulSet, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %s\", statefulSet.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsManaged(podSetName string, managedPodSet []configuration.ManagedDeploymentsStatefulsets) bool {", + "\tfor _, ps := range managedPodSet {", + "\t\tif ps.Name == podSetName {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "TestScaleCrd", + "qualifiedName": "TestScaleCrd", + "exported": true, + "signature": "func(*provider.CrScale, schema.GroupResource, time.Duration, *log.Logger)(bool)", + "doc": "TestScaleCrd Tests scaling of a custom resource by temporarily adjusting its replica count\n\nThe function receives a reference to a CR with desired replicas, a\ngroup‑resource schema, a timeout duration, and a logger. It retrieves\nKubernetes clients, then either increments or decrements the replica count\ndepending on whether the current value is one or more, calling an internal\nhelper to apply the change and wait for completion. Success of both\nscale‑up and scale‑down operations results in true; any failure logs an\nerror and returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/crd_scaling.go:48", + "calls": [ + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "GetName", + "kind": "function" + }, + { + "name": "GetNamespace", + "kind": "function" + }, + { + "name": "scaleCrHelper", + "kind": "function", + "source": [ + "func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, autoscalerpram *provider.CrScale, replicas int32, up bool, timeout time.Duration, logger *log.Logger) bool {", + "\tif up {", + "\t\tlogger.Debug(\"Scale UP CRS to %d replicas\", replicas)", + "\t} else {", + "\t\tlogger.Debug(\"Scale DOWN CRS to %d replicas\", replicas)", + "\t}", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tnamespace := autoscalerpram.GetNamespace()", + "\t\tname := autoscalerpram.GetName()", + "\t\tscalingObject, err := scalesGetter.Scales(namespace).Get(context.TODO(), rc, name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t\tscalingObject.Spec.Replicas = replicas", + "\t\t_, err = scalesGetter.Scales(namespace).Update(context.TODO(), rc, scalingObject, metav1.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update DynamicClient, err=%v\", err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForScalingToComplete(namespace, name, timeout, rc, logger) {", + "\t\t\tlogger.Error(\"Cannot update CR %s:%s\", namespace, name)", + "\t\t\treturn errors.New(\"can not update cr\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Can notscale DynamicClient, err=%v\", retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "scaleCrHelper", + "kind": "function", + "source": [ + "func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, autoscalerpram *provider.CrScale, replicas int32, up bool, timeout time.Duration, logger *log.Logger) bool {", + "\tif up {", + "\t\tlogger.Debug(\"Scale UP CRS to %d replicas\", replicas)", + "\t} else {", + "\t\tlogger.Debug(\"Scale DOWN CRS to %d replicas\", replicas)", + "\t}", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tnamespace := autoscalerpram.GetNamespace()", + "\t\tname := autoscalerpram.GetName()", + "\t\tscalingObject, err := scalesGetter.Scales(namespace).Get(context.TODO(), rc, name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t\tscalingObject.Spec.Replicas = replicas", + "\t\t_, err = scalesGetter.Scales(namespace).Update(context.TODO(), rc, scalingObject, metav1.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update DynamicClient, err=%v\", err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForScalingToComplete(namespace, name, timeout, rc, logger) {", + "\t\t\tlogger.Error(\"Cannot update CR %s:%s\", namespace, name)", + "\t\t\treturn errors.New(\"can not update cr\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Can notscale DynamicClient, err=%v\", retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "scaleCrHelper", + "kind": "function", + "source": [ + "func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, autoscalerpram *provider.CrScale, replicas int32, up bool, timeout time.Duration, logger *log.Logger) bool {", + "\tif up {", + "\t\tlogger.Debug(\"Scale UP CRS to %d replicas\", replicas)", + "\t} else {", + "\t\tlogger.Debug(\"Scale DOWN CRS to %d replicas\", replicas)", + "\t}", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tnamespace := autoscalerpram.GetNamespace()", + "\t\tname := autoscalerpram.GetName()", + "\t\tscalingObject, err := scalesGetter.Scales(namespace).Get(context.TODO(), rc, name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t\tscalingObject.Spec.Replicas = replicas", + "\t\t_, err = scalesGetter.Scales(namespace).Update(context.TODO(), rc, scalingObject, metav1.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update DynamicClient, err=%v\", err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForScalingToComplete(namespace, name, timeout, rc, logger) {", + "\t\t\tlogger.Error(\"Cannot update CR %s:%s\", namespace, name)", + "\t\t\treturn errors.New(\"can not update cr\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Can notscale DynamicClient, err=%v\", retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "scaleCrHelper", + "kind": "function", + "source": [ + "func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, autoscalerpram *provider.CrScale, replicas int32, up bool, timeout time.Duration, logger *log.Logger) bool {", + "\tif up {", + "\t\tlogger.Debug(\"Scale UP CRS to %d replicas\", replicas)", + "\t} else {", + "\t\tlogger.Debug(\"Scale DOWN CRS to %d replicas\", replicas)", + "\t}", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tnamespace := autoscalerpram.GetNamespace()", + "\t\tname := autoscalerpram.GetName()", + "\t\tscalingObject, err := scalesGetter.Scales(namespace).Get(context.TODO(), rc, name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t\tscalingObject.Spec.Replicas = replicas", + "\t\t_, err = scalesGetter.Scales(namespace).Update(context.TODO(), rc, scalingObject, metav1.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update DynamicClient, err=%v\", err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForScalingToComplete(namespace, name, timeout, rc, logger) {", + "\t\t\tlogger.Error(\"Cannot update CR %s:%s\", namespace, name)", + "\t\t\treturn errors.New(\"can not update cr\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Can notscale DynamicClient, err=%v\", retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testScaleCrd", + "kind": "function", + "source": [ + "func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.ScaleCrUnderTest {", + "\t\tgroupResourceSchema := env.ScaleCrUnderTest[i].GroupResourceSchema", + "\t\tscaleCr := env.ScaleCrUnderTest[i].Scale", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, scaleCr.Name, scaleCr.Namespace, scaleCr.Kind); hpa != nil {", + "\t\t\tif !scaling.TestScaleHPACrd(\u0026scaleCr, hpa, groupResourceSchema, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"CR has failed the scaling test: %s\", scaleCr.GetName())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"cr has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\tif !scaling.TestScaleCrd(\u0026scaleCr, groupResourceSchema, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"CR has failed the non-HPA scale test: %s\", scaleCr.GetName())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"CR has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"CR is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"CR is scalable\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func TestScaleCrd(crScale *provider.CrScale, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool {", + "\tif crScale == nil {", + "\t\tlogger.Error(\"CR object is nill\")", + "\t\treturn false", + "\t}", + "\tclients := clientsholder.GetClientsHolder()", + "\treplicas := crScale.Spec.Replicas", + "\tname := crScale.GetName()", + "\tnamespace := crScale.GetNamespace()", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t} // scale up", + "\t\treplicas++", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + }, + { + "name": "TestScaleDeployment", + "qualifiedName": "TestScaleDeployment", + "exported": true, + "signature": "func(*appsv1.Deployment, time.Duration, *log.Logger)(bool)", + "doc": "TestScaleDeployment Tests scaling behavior of a Deployment without HPA\n\nThe function obtains Kubernetes clients, determines the current replica count\nor defaults to one, then performs a scale-up followed by a scale-down if the\ndeployment has fewer than two replicas; otherwise it scales down first and\nthen up. Each scaling operation is executed through a helper that retries on\nconflicts and waits for pods to become ready. It logs success or failure and\nreturns true only when both scaling steps complete successfully.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/deployment_scaling.go:48", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "scaleDeploymentHelper", + "kind": "function", + "source": [ + "func scaleDeploymentHelper(client typedappsv1.AppsV1Interface, deployment *appsv1.Deployment, replicas int32, timeout time.Duration, up bool, logger *log.Logger) bool {", + "\tif up {", + "\t\tlogger.Info(\"Scale UP deployment to %d replicas\", replicas)", + "\t} else {", + "\t\tlogger.Info(\"Scale DOWN deployment to %d replicas\", replicas)", + "\t}", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Retrieve the latest version of Deployment before attempting update", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tdp, err := client.Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get latest version of Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn err", + "\t\t}", + "\t\tdp.Spec.Replicas = \u0026replicas", + "\t\t_, err = client.Deployments(deployment.Namespace).Update(context.TODO(), dp, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForDeploymentSetReady(deployment.Namespace, deployment.Name, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot update Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn errors.New(\"can not update deployment\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale Deployment %s:%s, err=%v\", deployment.Namespace, deployment.Name, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "AppsV1", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "scaleDeploymentHelper", + "kind": "function", + "source": [ + "func scaleDeploymentHelper(client typedappsv1.AppsV1Interface, deployment *appsv1.Deployment, replicas int32, timeout time.Duration, up bool, logger *log.Logger) bool {", + "\tif up {", + "\t\tlogger.Info(\"Scale UP deployment to %d replicas\", replicas)", + "\t} else {", + "\t\tlogger.Info(\"Scale DOWN deployment to %d replicas\", replicas)", + "\t}", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Retrieve the latest version of Deployment before attempting update", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tdp, err := client.Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get latest version of Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn err", + "\t\t}", + "\t\tdp.Spec.Replicas = \u0026replicas", + "\t\t_, err = client.Deployments(deployment.Namespace).Update(context.TODO(), dp, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForDeploymentSetReady(deployment.Namespace, deployment.Name, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot update Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn errors.New(\"can not update deployment\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale Deployment %s:%s, err=%v\", deployment.Namespace, deployment.Name, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "AppsV1", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "scaleDeploymentHelper", + "kind": "function", + "source": [ + "func scaleDeploymentHelper(client typedappsv1.AppsV1Interface, deployment *appsv1.Deployment, replicas int32, timeout time.Duration, up bool, logger *log.Logger) bool {", + "\tif up {", + "\t\tlogger.Info(\"Scale UP deployment to %d replicas\", replicas)", + "\t} else {", + "\t\tlogger.Info(\"Scale DOWN deployment to %d replicas\", replicas)", + "\t}", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Retrieve the latest version of Deployment before attempting update", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tdp, err := client.Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get latest version of Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn err", + "\t\t}", + "\t\tdp.Spec.Replicas = \u0026replicas", + "\t\t_, err = client.Deployments(deployment.Namespace).Update(context.TODO(), dp, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForDeploymentSetReady(deployment.Namespace, deployment.Name, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot update Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn errors.New(\"can not update deployment\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale Deployment %s:%s, err=%v\", deployment.Namespace, deployment.Name, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "AppsV1", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "scaleDeploymentHelper", + "kind": "function", + "source": [ + "func scaleDeploymentHelper(client typedappsv1.AppsV1Interface, deployment *appsv1.Deployment, replicas int32, timeout time.Duration, up bool, logger *log.Logger) bool {", + "\tif up {", + "\t\tlogger.Info(\"Scale UP deployment to %d replicas\", replicas)", + "\t} else {", + "\t\tlogger.Info(\"Scale DOWN deployment to %d replicas\", replicas)", + "\t}", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Retrieve the latest version of Deployment before attempting update", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tdp, err := client.Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get latest version of Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn err", + "\t\t}", + "\t\tdp.Spec.Replicas = \u0026replicas", + "\t\t_, err = client.Deployments(deployment.Namespace).Update(context.TODO(), dp, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForDeploymentSetReady(deployment.Namespace, deployment.Name, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot update Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn errors.New(\"can not update deployment\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale Deployment %s:%s, err=%v\", deployment.Namespace, deployment.Name, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "AppsV1", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testDeploymentScaling", + "kind": "function", + "source": [ + "func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, deployment := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", deployment.ToString())", + "\t\tif scaling.IsManaged(deployment.Name, env.Config.ManagedDeployments) {", + "\t\t\tif !scaling.CheckOwnerReference(deployment.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"Deployment %q scaling failed due to OwnerReferences that are not scalable\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Deployment %q scaling skipped due to scalable OwnerReferences, test will run on the CR scaling\", deployment.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip deployment if it is allowed by config", + "\t\tif nameInDeploymentSkipList(deployment.Name, deployment.Namespace, env.Config.SkipScalingTestDeployments) {", + "\t\t\tcheck.LogInfo(\"Deployment %q is being skipped due to configuration setting\", deployment.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TestDeploymentScaling test scaling of deployment", + "\t\t// This is the entry point for deployment scaling tests", + "\t\tns, name := deployment.Namespace, deployment.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"Deployment\"); hpa != nil {", + "\t\t\t// if the deployment is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the deployment", + "\t\t\tif !scaling.TestScaleHpaDeployment(deployment, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"Deployment %q has failed the HPA scale test\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the HPA scale test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the deployment is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleDeployment(deployment.Deployment, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Deployment %q has failed the non-HPA scale test\", deployment.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q is scalable\", deployment.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func TestScaleDeployment(deployment *appsv1.Deployment, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\tlogger.Info(\"Deployment not using HPA: %s:%s\", deployment.Namespace, deployment.Name)", + "\tvar replicas int32", + "\tif deployment.Spec.Replicas != nil {", + "\t\treplicas = *deployment.Spec.Replicas", + "\t} else {", + "\t\treplicas = 1", + "\t}", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, true, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, false, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, false, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t} // scale up", + "\t\treplicas++", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, true, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "TestScaleHPACrd", + "qualifiedName": "TestScaleHPACrd", + "exported": true, + "signature": "func(*provider.CrScale, *scalingv1.HorizontalPodAutoscaler, schema.GroupResource, time.Duration, *log.Logger)(bool)", + "doc": "TestScaleHPACrd Validates HPA scaling for a custom resource\n\nThe function checks that an associated horizontal pod autoscaler can scale\nthe target CR up and down within a timeout, restoring original limits\nafterward. It updates the HPA spec to match the CR’s desired replica count,\nwaits for the CR to reach that state, then reverts to its original min/max\nsettings. If any step fails it logs an error and returns false; otherwise\ntrue.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/crd_scaling.go:137", + "calls": [ + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "GetNamespace", + "kind": "function" + }, + { + "name": "HorizontalPodAutoscalers", + "kind": "function" + }, + { + "name": "AutoscalingV1", + "kind": "function" + }, + { + "name": "int32", + "kind": "function" + }, + { + "name": "GetName", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaCRDHelper", + "kind": "function", + "source": [ + "func scaleHpaCRDHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, crName, namespace string, min, max int32, timeout time.Duration, groupResourceSchema schema.GroupResource, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, crName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, metav1.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, crName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForScalingToComplete(namespace, crName, timeout, groupResourceSchema, logger) {", + "\t\t\tlogger.Error(\"Cannot update CR %s:%s\", namespace, crName)", + "\t\t\treturn errors.New(\"can not update cr\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaCRDHelper", + "kind": "function", + "source": [ + "func scaleHpaCRDHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, crName, namespace string, min, max int32, timeout time.Duration, groupResourceSchema schema.GroupResource, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, crName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, metav1.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, crName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForScalingToComplete(namespace, crName, timeout, groupResourceSchema, logger) {", + "\t\t\tlogger.Error(\"Cannot update CR %s:%s\", namespace, crName)", + "\t\t\treturn errors.New(\"can not update cr\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaCRDHelper", + "kind": "function", + "source": [ + "func scaleHpaCRDHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, crName, namespace string, min, max int32, timeout time.Duration, groupResourceSchema schema.GroupResource, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, crName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, metav1.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, crName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForScalingToComplete(namespace, crName, timeout, groupResourceSchema, logger) {", + "\t\t\tlogger.Error(\"Cannot update CR %s:%s\", namespace, crName)", + "\t\t\treturn errors.New(\"can not update cr\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaCRDHelper", + "kind": "function", + "source": [ + "func scaleHpaCRDHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, crName, namespace string, min, max int32, timeout time.Duration, groupResourceSchema schema.GroupResource, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, crName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, metav1.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, crName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForScalingToComplete(namespace, crName, timeout, groupResourceSchema, logger) {", + "\t\t\tlogger.Error(\"Cannot update CR %s:%s\", namespace, crName)", + "\t\t\treturn errors.New(\"can not update cr\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaCRDHelper", + "kind": "function", + "source": [ + "func scaleHpaCRDHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, crName, namespace string, min, max int32, timeout time.Duration, groupResourceSchema schema.GroupResource, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, crName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, metav1.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, crName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForScalingToComplete(namespace, crName, timeout, groupResourceSchema, logger) {", + "\t\t\tlogger.Error(\"Cannot update CR %s:%s\", namespace, crName)", + "\t\t\treturn errors.New(\"can not update cr\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testScaleCrd", + "kind": "function", + "source": [ + "func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.ScaleCrUnderTest {", + "\t\tgroupResourceSchema := env.ScaleCrUnderTest[i].GroupResourceSchema", + "\t\tscaleCr := env.ScaleCrUnderTest[i].Scale", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, scaleCr.Name, scaleCr.Namespace, scaleCr.Kind); hpa != nil {", + "\t\t\tif !scaling.TestScaleHPACrd(\u0026scaleCr, hpa, groupResourceSchema, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"CR has failed the scaling test: %s\", scaleCr.GetName())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"cr has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\tif !scaling.TestScaleCrd(\u0026scaleCr, groupResourceSchema, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"CR has failed the non-HPA scale test: %s\", scaleCr.GetName())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"CR has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"CR is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCrdReportObject(scaleCr.Namespace, scaleCr.Name, \"CR is scalable\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscaler, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool {", + "\tif cr == nil {", + "\t\tlogger.Error(\"CR object is nill\")", + "\t\treturn false", + "\t}", + "\tclients := clientsholder.GetClientsHolder()", + "\tnamespace := cr.GetNamespace()", + "", + "\thpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(namespace)", + "\tmin := int32(1)", + "\tif hpa.Spec.MinReplicas != nil {", + "\t\tmin = *hpa.Spec.MinReplicas", + "\t}", + "\treplicas := cr.Spec.Replicas", + "\tname := cr.GetName()", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, hpa.Spec.MaxReplicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\t// back the min and the max value of the hpa", + "\tlogger.Debug(\"Back HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, min, hpa.Spec.MaxReplicas)", + "\treturn scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, hpa.Spec.MaxReplicas, timeout, groupResourceSchema, logger)", + "}" + ] + }, + { + "name": "TestScaleHpaDeployment", + "qualifiedName": "TestScaleHpaDeployment", + "exported": true, + "signature": "func(*provider.Deployment, *v1autoscaling.HorizontalPodAutoscaler, time.Duration, *log.Logger)(bool)", + "doc": "TestScaleHpaDeployment Verifies that an HPA can scale a deployment up and down correctly\n\nThe function retrieves the Kubernetes client and determines the current\nreplica count of the deployment, as well as the min and max values from the\nHPA specification. It then performs a sequence of scaling operations: if\nreplicas are low it scales up to the minimum, restores to the original, or if\nhigh it scales down to one replica before restoring. After each adjustment it\ncalls a helper that updates the HPA and waits for the deployment to become\nready. If any step fails, false is returned; otherwise true indicates\nsuccessful round‑trip scaling.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/deployment_scaling.go:139", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "HorizontalPodAutoscalers", + "kind": "function" + }, + { + "name": "AutoscalingV1", + "kind": "function" + }, + { + "name": "int32", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaDeploymentHelper", + "kind": "function", + "source": [ + "func scaleHpaDeploymentHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, deploymentName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s , err=%v\", namespace, deploymentName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, deploymentName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForDeploymentSetReady(namespace, deploymentName, timeout, logger) {", + "\t\t\tlogger.Error(\"Deployment not ready after scale operation %s:%s\", namespace, deploymentName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s , err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaDeploymentHelper", + "kind": "function", + "source": [ + "func scaleHpaDeploymentHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, deploymentName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s , err=%v\", namespace, deploymentName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, deploymentName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForDeploymentSetReady(namespace, deploymentName, timeout, logger) {", + "\t\t\tlogger.Error(\"Deployment not ready after scale operation %s:%s\", namespace, deploymentName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s , err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaDeploymentHelper", + "kind": "function", + "source": [ + "func scaleHpaDeploymentHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, deploymentName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s , err=%v\", namespace, deploymentName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, deploymentName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForDeploymentSetReady(namespace, deploymentName, timeout, logger) {", + "\t\t\tlogger.Error(\"Deployment not ready after scale operation %s:%s\", namespace, deploymentName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s , err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaDeploymentHelper", + "kind": "function", + "source": [ + "func scaleHpaDeploymentHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, deploymentName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s , err=%v\", namespace, deploymentName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, deploymentName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForDeploymentSetReady(namespace, deploymentName, timeout, logger) {", + "\t\t\tlogger.Error(\"Deployment not ready after scale operation %s:%s\", namespace, deploymentName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s , err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaDeploymentHelper", + "kind": "function", + "source": [ + "func scaleHpaDeploymentHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, deploymentName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s , err=%v\", namespace, deploymentName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, deploymentName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForDeploymentSetReady(namespace, deploymentName, timeout, logger) {", + "\t\t\tlogger.Error(\"Deployment not ready after scale operation %s:%s\", namespace, deploymentName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s , err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testDeploymentScaling", + "kind": "function", + "source": [ + "func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, deployment := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", deployment.ToString())", + "\t\tif scaling.IsManaged(deployment.Name, env.Config.ManagedDeployments) {", + "\t\t\tif !scaling.CheckOwnerReference(deployment.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"Deployment %q scaling failed due to OwnerReferences that are not scalable\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Deployment %q scaling skipped due to scalable OwnerReferences, test will run on the CR scaling\", deployment.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip deployment if it is allowed by config", + "\t\tif nameInDeploymentSkipList(deployment.Name, deployment.Namespace, env.Config.SkipScalingTestDeployments) {", + "\t\t\tcheck.LogInfo(\"Deployment %q is being skipped due to configuration setting\", deployment.ToString())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TestDeploymentScaling test scaling of deployment", + "\t\t// This is the entry point for deployment scaling tests", + "\t\tns, name := deployment.Namespace, deployment.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"Deployment\"); hpa != nil {", + "\t\t\t// if the deployment is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the deployment", + "\t\t\tif !scaling.TestScaleHpaDeployment(deployment, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"Deployment %q has failed the HPA scale test\", deployment.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the HPA scale test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the deployment is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleDeployment(deployment.Deployment, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"Deployment %q has failed the non-HPA scale test\", deployment.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Deployment %q is scalable\", deployment.ToString())", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewDeploymentReportObject(deployment.Namespace, deployment.Name, \"Deployment is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func TestScaleHpaDeployment(deployment *provider.Deployment, hpa *v1autoscaling.HorizontalPodAutoscaler, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\thpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(deployment.Namespace)", + "\tvar min int32", + "\tif hpa.Spec.MinReplicas != nil {", + "\t\tmin = *hpa.Spec.MinReplicas", + "\t} else {", + "\t\tmin = 1", + "\t}", + "\treplicas := int32(1)", + "\tif deployment.Spec.Replicas != nil {", + "\t\treplicas = *deployment.Spec.Replicas", + "\t}", + "\tmax := hpa.Spec.MaxReplicas", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, min, max, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\t// back the min and the max value of the hpa", + "\tlogger.Debug(\"Back HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, min, max)", + "\treturn scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, min, max, timeout, logger)", + "}" + ] + }, + { + "name": "TestScaleHpaStatefulSet", + "qualifiedName": "TestScaleHpaStatefulSet", + "exported": true, + "signature": "func(*appsv1.StatefulSet, *v1autoscaling.HorizontalPodAutoscaler, time.Duration, *log.Logger)(bool)", + "doc": "TestScaleHpaStatefulSet Verifies HPA scaling of a StatefulSet\n\nThe function obtains Kubernetes clients, then adjusts the HPA’s min and max\nreplica counts to test both up‑scaling and down‑scaling scenarios on the\ntarget StatefulSet. It calls a helper that updates the HPA, waits for the\nStatefulSet to become ready, and reports success or failure. Finally it\nrestores the original HPA settings and returns whether all scaling steps\nsucceeded.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/statefulset_scaling.go:134", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "HorizontalPodAutoscalers", + "kind": "function" + }, + { + "name": "AutoscalingV1", + "kind": "function" + }, + { + "name": "int32", + "kind": "function" + }, + { + "name": "int32", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaStatefulSetHelper", + "kind": "function", + "source": [ + "func scaleHpaStatefulSetHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, statefulsetName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, statefulsetName, timeout, logger) {", + "\t\t\tlogger.Error(\"StatefulSet not ready after scale operation %s:%s\", namespace, statefulsetName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaStatefulSetHelper", + "kind": "function", + "source": [ + "func scaleHpaStatefulSetHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, statefulsetName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, statefulsetName, timeout, logger) {", + "\t\t\tlogger.Error(\"StatefulSet not ready after scale operation %s:%s\", namespace, statefulsetName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaStatefulSetHelper", + "kind": "function", + "source": [ + "func scaleHpaStatefulSetHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, statefulsetName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, statefulsetName, timeout, logger) {", + "\t\t\tlogger.Error(\"StatefulSet not ready after scale operation %s:%s\", namespace, statefulsetName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaStatefulSetHelper", + "kind": "function", + "source": [ + "func scaleHpaStatefulSetHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, statefulsetName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, statefulsetName, timeout, logger) {", + "\t\t\tlogger.Error(\"StatefulSet not ready after scale operation %s:%s\", namespace, statefulsetName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleHpaStatefulSetHelper", + "kind": "function", + "source": [ + "func scaleHpaStatefulSetHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, statefulsetName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, statefulsetName, timeout, logger) {", + "\t\t\tlogger.Error(\"StatefulSet not ready after scale operation %s:%s\", namespace, statefulsetName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testStatefulSetScaling", + "kind": "function", + "source": [ + "func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, statefulSet := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", statefulSet.ToString())", + "\t\tif scaling.IsManaged(statefulSet.Name, env.Config.ManagedStatefulsets) {", + "\t\t\tif !scaling.CheckOwnerReference(statefulSet.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q scaling failed due to OwnerReferences that are not scalable\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has OwnerReferences that are not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"StatefulSet %q scaling skipped due to scalable OwnerReferences, test will run on te CR scaling\", statefulSet.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip statefulset if it is allowed by config", + "\t\tif nameInStatefulSetSkipList(statefulSet.Name, statefulSet.Namespace, env.Config.SkipScalingTestStatefulSets) {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q is being skipped due to configuration setting\", statefulSet.String())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TeststatefulsetScaling test scaling of statefulset", + "\t\t// This is the entry point for statefulset scaling tests", + "\t\tns, name := statefulSet.Namespace, statefulSet.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"StatefulSet\"); hpa != nil {", + "\t\t\t// if the statefulset is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the statefulset", + "\t\t\tif !scaling.TestScaleHpaStatefulSet(statefulSet.StatefulSet, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %q\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the statefulset is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleStatefulSet(statefulSet.StatefulSet, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %s\", statefulSet.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func TestScaleHpaStatefulSet(statefulset *appsv1.StatefulSet, hpa *v1autoscaling.HorizontalPodAutoscaler, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\thpaName := hpa.Name", + "\tname, namespace := statefulset.Name, statefulset.Namespace", + "\thpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(namespace)", + "\tmin := int32(1)", + "\tif hpa.Spec.MinReplicas != nil {", + "\t\tmin = *hpa.Spec.MinReplicas", + "\t}", + "\treplicas := int32(1)", + "\tif statefulset.Spec.Replicas != nil {", + "\t\treplicas = *statefulset.Spec.Replicas", + "\t}", + "\tmax := hpa.Spec.MaxReplicas", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpaName, replicas, replicas)", + "\t\tpass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpaName, replicas, replicas)", + "\t\tpass = scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpaName, replicas, replicas)", + "\t\tpass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpaName, min, max)", + "\t\tpass = scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\t// back the min and the max value of the hpa", + "\tlogger.Debug(\"Back HPA %s:%s to min=%d max=%d\", namespace, hpaName, min, max)", + "\tpass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, min, max, timeout, logger)", + "\treturn pass", + "}" + ] + }, + { + "name": "TestScaleStatefulSet", + "qualifiedName": "TestScaleStatefulSet", + "exported": true, + "signature": "func(*appsv1.StatefulSet, time.Duration, *log.Logger)(bool)", + "doc": "TestScaleStatefulSet Tests scaling of a StatefulSet by adjusting replicas\n\nThe function retrieves Kubernetes clients, determines the current replica\ncount, and then performs an up‑then‑down or down‑then‑up scale\nsequence using a helper that retries on conflict. It logs each action and\nreturns false if any scaling step fails. A true value indicates both scale\noperations succeeded within the given timeout.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/statefulset_scaling.go:46", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "StatefulSets", + "kind": "function" + }, + { + "name": "AppsV1", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "int32", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleStatefulsetHelper", + "kind": "function", + "source": [ + "func scaleStatefulsetHelper(clients *clientsholder.ClientsHolder, ssClient v1.StatefulSetInterface, statefulset *appsv1.StatefulSet, replicas int32, timeout time.Duration, logger *log.Logger) bool {", + "\tname := statefulset.Name", + "\tnamespace := statefulset.Namespace", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Retrieve the latest version of statefulset before attempting update", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tss, err := ssClient.Get(context.TODO(), name, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get latest version of statefulset %s:%s with error %s\", namespace, name, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tss.Spec.Replicas = \u0026replicas", + "\t\t_, err = clients.K8sClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), ss, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update statefulset %s:%s\", namespace, name)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, name, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot update statefulset %s:%s\", namespace, name)", + "\t\t\treturn errors.New(\"can not update statefulset\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale statefulset %s:%s, err=%v\", namespace, name, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleStatefulsetHelper", + "kind": "function", + "source": [ + "func scaleStatefulsetHelper(clients *clientsholder.ClientsHolder, ssClient v1.StatefulSetInterface, statefulset *appsv1.StatefulSet, replicas int32, timeout time.Duration, logger *log.Logger) bool {", + "\tname := statefulset.Name", + "\tnamespace := statefulset.Namespace", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Retrieve the latest version of statefulset before attempting update", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tss, err := ssClient.Get(context.TODO(), name, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get latest version of statefulset %s:%s with error %s\", namespace, name, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tss.Spec.Replicas = \u0026replicas", + "\t\t_, err = clients.K8sClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), ss, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update statefulset %s:%s\", namespace, name)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, name, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot update statefulset %s:%s\", namespace, name)", + "\t\t\treturn errors.New(\"can not update statefulset\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale statefulset %s:%s, err=%v\", namespace, name, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleStatefulsetHelper", + "kind": "function", + "source": [ + "func scaleStatefulsetHelper(clients *clientsholder.ClientsHolder, ssClient v1.StatefulSetInterface, statefulset *appsv1.StatefulSet, replicas int32, timeout time.Duration, logger *log.Logger) bool {", + "\tname := statefulset.Name", + "\tnamespace := statefulset.Namespace", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Retrieve the latest version of statefulset before attempting update", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tss, err := ssClient.Get(context.TODO(), name, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get latest version of statefulset %s:%s with error %s\", namespace, name, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tss.Spec.Replicas = \u0026replicas", + "\t\t_, err = clients.K8sClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), ss, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update statefulset %s:%s\", namespace, name)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, name, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot update statefulset %s:%s\", namespace, name)", + "\t\t\treturn errors.New(\"can not update statefulset\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale statefulset %s:%s, err=%v\", namespace, name, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "scaleStatefulsetHelper", + "kind": "function", + "source": [ + "func scaleStatefulsetHelper(clients *clientsholder.ClientsHolder, ssClient v1.StatefulSetInterface, statefulset *appsv1.StatefulSet, replicas int32, timeout time.Duration, logger *log.Logger) bool {", + "\tname := statefulset.Name", + "\tnamespace := statefulset.Namespace", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Retrieve the latest version of statefulset before attempting update", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tss, err := ssClient.Get(context.TODO(), name, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get latest version of statefulset %s:%s with error %s\", namespace, name, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tss.Spec.Replicas = \u0026replicas", + "\t\t_, err = clients.K8sClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), ss, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update statefulset %s:%s\", namespace, name)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, name, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot update statefulset %s:%s\", namespace, name)", + "\t\t\treturn errors.New(\"can not update statefulset\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale statefulset %s:%s, err=%v\", namespace, name, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testStatefulSetScaling", + "kind": "function", + "source": [ + "func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {", + "\tdefer env.SetNeedsRefresh()", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, statefulSet := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", statefulSet.ToString())", + "\t\tif scaling.IsManaged(statefulSet.Name, env.Config.ManagedStatefulsets) {", + "\t\t\tif !scaling.CheckOwnerReference(statefulSet.GetOwnerReferences(), env.Config.CrdFilters, env.Crds) {", + "\t\t\t\tcheck.LogError(\"StatefulSet %q scaling failed due to OwnerReferences that are not scalable\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has OwnerReferences that are not scalable\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"StatefulSet %q scaling skipped due to scalable OwnerReferences, test will run on te CR scaling\", statefulSet.ToString())", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// Skip statefulset if it is allowed by config", + "\t\tif nameInStatefulSetSkipList(statefulSet.Name, statefulSet.Namespace, env.Config.SkipScalingTestStatefulSets) {", + "\t\t\tcheck.LogInfo(\"StatefulSet %q is being skipped due to configuration setting\", statefulSet.String())", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// TeststatefulsetScaling test scaling of statefulset", + "\t\t// This is the entry point for statefulset scaling tests", + "\t\tns, name := statefulSet.Namespace, statefulSet.Name", + "\t\tif hpa := scaling.GetResourceHPA(env.HorizontalScaler, name, ns, \"StatefulSet\"); hpa != nil {", + "\t\t\t// if the statefulset is controller by", + "\t\t\t// horizontal scaler, then test that scaler", + "\t\t\t// can scale the statefulset", + "\t\t\tif !scaling.TestScaleHpaStatefulSet(statefulSet.StatefulSet, hpa, timeout, check.GetLogger()) {", + "\t\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %q\", statefulSet.ToString())", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the HPA scaling test\", false))", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "\t\t// if the statefulset is not controller by HPA", + "\t\t// scale it directly", + "\t\tif !scaling.TestScaleStatefulSet(statefulSet.StatefulSet, timeout, check.GetLogger()) {", + "\t\t\tcheck.LogError(\"StatefulSet has failed the scaling test: %s\", statefulSet.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet has failed the non-HPA scale test\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"StatefulSet is scalable\")", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewStatefulSetReportObject(statefulSet.Namespace, statefulSet.Name, \"StatefulSet is scalable\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func TestScaleStatefulSet(statefulset *appsv1.StatefulSet, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\tname, namespace := statefulset.Name, statefulset.Namespace", + "\tssClients := clients.K8sClient.AppsV1().StatefulSets(namespace)", + "\tlogger.Debug(\"Scale statefulset not using HPA %s:%s\", namespace, name)", + "\treplicas := int32(1)", + "\tif statefulset.Spec.Replicas != nil {", + "\t\treplicas = *statefulset.Spec.Replicas", + "\t}", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t} // scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "scaleCrHelper", + "qualifiedName": "scaleCrHelper", + "exported": false, + "signature": "func(scale.ScalesGetter, schema.GroupResource, *provider.CrScale, int32, bool, time.Duration, *log.Logger)(bool)", + "doc": "scaleCrHelper adjusts the replica count of a custom resource\n\nThe function takes a scaling client, a group-resource descriptor, a CR\nobject, desired replicas, direction flag, timeout, and logger. It updates the\nscaling specification for the CR, retries on conflict using exponential\nbackoff, waits for scaling to finish, logs errors if any, and returns true\nwhen successful.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/crd_scaling.go:95", + "calls": [ + { + "name": "Debug", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/util/retry", + "name": "RetryOnConflict", + "kind": "function" + }, + { + "name": "GetNamespace", + "kind": "function" + }, + { + "name": "GetName", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "name": "Scales", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "Update", + "kind": "function" + }, + { + "name": "Scales", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForScalingToComplete", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "errors", + "name": "New", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleCrd", + "kind": "function", + "source": [ + "func TestScaleCrd(crScale *provider.CrScale, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool {", + "\tif crScale == nil {", + "\t\tlogger.Error(\"CR object is nill\")", + "\t\treturn false", + "\t}", + "\tclients := clientsholder.GetClientsHolder()", + "\treplicas := crScale.Spec.Replicas", + "\tname := crScale.GetName()", + "\tnamespace := crScale.GetNamespace()", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, false, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t} // scale up", + "\t\treplicas++", + "\t\tif !scaleCrHelper(clients.ScalingClient, groupResourceSchema, crScale, replicas, true, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale CR %q in namespace %q\", name, namespace)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "", + "\treturn true", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, autoscalerpram *provider.CrScale, replicas int32, up bool, timeout time.Duration, logger *log.Logger) bool {", + "\tif up {", + "\t\tlogger.Debug(\"Scale UP CRS to %d replicas\", replicas)", + "\t} else {", + "\t\tlogger.Debug(\"Scale DOWN CRS to %d replicas\", replicas)", + "\t}", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tnamespace := autoscalerpram.GetNamespace()", + "\t\tname := autoscalerpram.GetName()", + "\t\tscalingObject, err := scalesGetter.Scales(namespace).Get(context.TODO(), rc, name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\treturn err", + "\t\t}", + "\t\tscalingObject.Spec.Replicas = replicas", + "\t\t_, err = scalesGetter.Scales(namespace).Update(context.TODO(), rc, scalingObject, metav1.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update DynamicClient, err=%v\", err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForScalingToComplete(namespace, name, timeout, rc, logger) {", + "\t\t\tlogger.Error(\"Cannot update CR %s:%s\", namespace, name)", + "\t\t\treturn errors.New(\"can not update cr\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Can notscale DynamicClient, err=%v\", retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "scaleDeploymentHelper", + "qualifiedName": "scaleDeploymentHelper", + "exported": false, + "signature": "func(typedappsv1.AppsV1Interface, *appsv1.Deployment, int32, time.Duration, bool, *log.Logger)(bool)", + "doc": "scaleDeploymentHelper Adjusts a Deployment's replica count with conflict handling\n\nThis routine logs the scaling action, retrieves the current Deployment\nobject, updates its desired replica count, and applies the change using a\nretry loop to handle conflicts. After a successful update it waits for all\npods in the set to become ready within a specified timeout, reporting any\nerrors through logging. The function returns true if the scaling succeeds and\nfalse otherwise.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/deployment_scaling.go:95", + "calls": [ + { + "name": "Info", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "pkgPath": "k8s.io/client-go/util/retry", + "name": "RetryOnConflict", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "name": "Deployments", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "Update", + "kind": "function" + }, + { + "name": "Deployments", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForDeploymentSetReady", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "errors", + "name": "New", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleDeployment", + "kind": "function", + "source": [ + "func TestScaleDeployment(deployment *appsv1.Deployment, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\tlogger.Info(\"Deployment not using HPA: %s:%s\", deployment.Namespace, deployment.Name)", + "\tvar replicas int32", + "\tif deployment.Spec.Replicas != nil {", + "\t\treplicas = *deployment.Spec.Replicas", + "\t} else {", + "\t\treplicas = 1", + "\t}", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, true, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, false, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, false, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t} // scale up", + "\t\treplicas++", + "\t\tif !scaleDeploymentHelper(clients.K8sClient.AppsV1(), deployment, replicas, timeout, true, logger) {", + "\t\t\tlogger.Error(\"Cannot scale Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func scaleDeploymentHelper(client typedappsv1.AppsV1Interface, deployment *appsv1.Deployment, replicas int32, timeout time.Duration, up bool, logger *log.Logger) bool {", + "\tif up {", + "\t\tlogger.Info(\"Scale UP deployment to %d replicas\", replicas)", + "\t} else {", + "\t\tlogger.Info(\"Scale DOWN deployment to %d replicas\", replicas)", + "\t}", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Retrieve the latest version of Deployment before attempting update", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tdp, err := client.Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get latest version of Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn err", + "\t\t}", + "\t\tdp.Spec.Replicas = \u0026replicas", + "\t\t_, err = client.Deployments(deployment.Namespace).Update(context.TODO(), dp, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForDeploymentSetReady(deployment.Namespace, deployment.Name, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot update Deployment %s:%s\", deployment.Namespace, deployment.Name)", + "\t\t\treturn errors.New(\"can not update deployment\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale Deployment %s:%s, err=%v\", deployment.Namespace, deployment.Name, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "scaleHpaCRDHelper", + "qualifiedName": "scaleHpaCRDHelper", + "exported": false, + "signature": "func(hps.HorizontalPodAutoscalerInterface, string, string, string, int32, int32, time.Duration, schema.GroupResource, *log.Logger)(bool)", + "doc": "scaleHpaCRDHelper Attempts to scale an HPA by updating its replica bounds\n\nThe function retrieves the specified HorizontalPodAutoscaler, sets new\nminimum and maximum replica counts, and updates it in a retry loop that\nhandles conflicts. After a successful update, it waits for the associated\ncustom resource to reach the desired state within a timeout period. It logs\nany errors encountered and returns true on success or false if scaling fails.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/crd_scaling.go:196", + "calls": [ + { + "pkgPath": "k8s.io/client-go/util/retry", + "name": "RetryOnConflict", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "Update", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForScalingToComplete", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "errors", + "name": "New", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleHPACrd", + "kind": "function", + "source": [ + "func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscaler, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool {", + "\tif cr == nil {", + "\t\tlogger.Error(\"CR object is nill\")", + "\t\treturn false", + "\t}", + "\tclients := clientsholder.GetClientsHolder()", + "\tnamespace := cr.GetNamespace()", + "", + "\thpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(namespace)", + "\tmin := int32(1)", + "\tif hpa.Spec.MinReplicas != nil {", + "\t\tmin = *hpa.Spec.MinReplicas", + "\t}", + "\treplicas := cr.Spec.Replicas", + "\tname := cr.GetName()", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, hpa.Spec.MaxReplicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, replicas, replicas, timeout, groupResourceSchema, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\t// back the min and the max value of the hpa", + "\tlogger.Debug(\"Back HPA %s:%s to min=%d max=%d\", namespace, hpa.Name, min, hpa.Spec.MaxReplicas)", + "\treturn scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, hpa.Spec.MaxReplicas, timeout, groupResourceSchema, logger)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func scaleHpaCRDHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, crName, namespace string, min, max int32, timeout time.Duration, groupResourceSchema schema.GroupResource, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, crName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, metav1.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, crName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForScalingToComplete(namespace, crName, timeout, groupResourceSchema, logger) {", + "\t\t\tlogger.Error(\"Cannot update CR %s:%s\", namespace, crName)", + "\t\t\treturn errors.New(\"can not update cr\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "scaleHpaDeploymentHelper", + "qualifiedName": "scaleHpaDeploymentHelper", + "exported": false, + "signature": "func(hps.HorizontalPodAutoscalerInterface, string, string, string, int32, int32, time.Duration, *log.Logger)(bool)", + "doc": "scaleHpaDeploymentHelper Adjusts the minimum and maximum replica counts for a horizontal pod autoscaler and waits for the deployment to stabilize\n\nThe helper updates an HPA's MinReplicas and MaxReplicas fields using retry\nlogic to handle conflicts, then triggers a wait until the associated\ndeployment is ready or times out. It logs any errors encountered during get,\nupdate, or readiness checks and returns true only when all operations\nsucceed.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/deployment_scaling.go:196", + "calls": [ + { + "pkgPath": "k8s.io/client-go/util/retry", + "name": "RetryOnConflict", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "Update", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForDeploymentSetReady", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleHpaDeployment", + "kind": "function", + "source": [ + "func TestScaleHpaDeployment(deployment *provider.Deployment, hpa *v1autoscaling.HorizontalPodAutoscaler, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\thpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(deployment.Namespace)", + "\tvar min int32", + "\tif hpa.Spec.MinReplicas != nil {", + "\t\tmin = *hpa.Spec.MinReplicas", + "\t} else {", + "\t\tmin = 1", + "\t}", + "\treplicas := int32(1)", + "\tif deployment.Spec.Replicas != nil {", + "\t\treplicas = *deployment.Spec.Replicas", + "\t}", + "\tmax := hpa.Spec.MaxReplicas", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, min, max, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass := scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, replicas, replicas)", + "\t\tpass = scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\t// back the min and the max value of the hpa", + "\tlogger.Debug(\"Back HPA %s:%s to min=%d max=%d\", deployment.Namespace, hpa.Name, min, max)", + "\treturn scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, min, max, timeout, logger)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func scaleHpaDeploymentHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, deploymentName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s , err=%v\", namespace, deploymentName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, deploymentName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForDeploymentSetReady(namespace, deploymentName, timeout, logger) {", + "\t\t\tlogger.Error(\"Deployment not ready after scale operation %s:%s\", namespace, deploymentName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s , err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "scaleHpaStatefulSetHelper", + "qualifiedName": "scaleHpaStatefulSetHelper", + "exported": false, + "signature": "func(hps.HorizontalPodAutoscalerInterface, string, string, string, int32, int32, time.Duration, *log.Logger)(bool)", + "doc": "scaleHpaStatefulSetHelper updates HPA replica limits and waits for StatefulSet readiness\n\nThe function attempts to set the horizontal pod autoscaler's minimum and\nmaximum replicas, retrying on conflicts until success or timeout. After each\nupdate it polls the target StatefulSet to confirm it reaches a ready state\nwithin the given duration, logging errors if not. It returns true when both\nthe HPA update and readiness check succeed, otherwise false.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/statefulset_scaling.go:192", + "calls": [ + { + "pkgPath": "k8s.io/client-go/util/retry", + "name": "RetryOnConflict", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "Update", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForStatefulSetReady", + "kind": "function", + "source": [ + "func WaitForStatefulSetReady(ns, name string, timeout time.Duration, logger *log.Logger) bool {", + "\tlogger.Debug(\"Check if statefulset %s:%s is ready\", ns, name)", + "\tclients := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tss, err := provider.GetUpdatedStatefulset(clients.K8sClient.AppsV1(), ns, name)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Error while getting the %s, err: %v\", ss.ToString(), err)", + "\t\t} else if ss.IsStatefulSetReady() {", + "\t\t\tlogger.Info(\"%s is ready\", ss.ToString())", + "\t\t\treturn true", + "\t\t}", + "\t\ttime.Sleep(time.Second)", + "\t}", + "\tlogger.Error(\"Statefulset %s:%s is not ready\", ns, name)", + "\treturn false", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleHpaStatefulSet", + "kind": "function", + "source": [ + "func TestScaleHpaStatefulSet(statefulset *appsv1.StatefulSet, hpa *v1autoscaling.HorizontalPodAutoscaler, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\thpaName := hpa.Name", + "\tname, namespace := statefulset.Name, statefulset.Namespace", + "\thpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(namespace)", + "\tmin := int32(1)", + "\tif hpa.Spec.MinReplicas != nil {", + "\t\tmin = *hpa.Spec.MinReplicas", + "\t}", + "\treplicas := int32(1)", + "\tif statefulset.Spec.Replicas != nil {", + "\t\treplicas = *statefulset.Spec.Replicas", + "\t}", + "\tmax := hpa.Spec.MaxReplicas", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpaName, replicas, replicas)", + "\t\tpass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpaName, replicas, replicas)", + "\t\tpass = scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN HPA %s:%s to min=%d max=%d\", namespace, hpaName, replicas, replicas)", + "\t\tpass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP HPA %s:%s to min=%d max=%d\", namespace, hpaName, min, max)", + "\t\tpass = scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, replicas, replicas, timeout, logger)", + "\t\tif !pass {", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\t// back the min and the max value of the hpa", + "\tlogger.Debug(\"Back HPA %s:%s to min=%d max=%d\", namespace, hpaName, min, max)", + "\tpass := scaleHpaStatefulSetHelper(hpscaler, hpaName, name, namespace, min, max, timeout, logger)", + "\treturn pass", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func scaleHpaStatefulSetHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, statefulsetName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool {", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\thpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\thpa.Spec.MinReplicas = \u0026min", + "\t\thpa.Spec.MaxReplicas = max", + "\t\t_, err = hpscaler.Update(context.TODO(), hpa, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update autoscaler to scale %s:%s, err=%v\", namespace, statefulsetName, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, statefulsetName, timeout, logger) {", + "\t\t\tlogger.Error(\"StatefulSet not ready after scale operation %s:%s\", namespace, statefulsetName)", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale hpa %s:%s, err=%v\", namespace, hpaName, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + }, + { + "name": "scaleStatefulsetHelper", + "qualifiedName": "scaleStatefulsetHelper", + "exported": false, + "signature": "func(*clientsholder.ClientsHolder, v1.StatefulSetInterface, *appsv1.StatefulSet, int32, time.Duration, *log.Logger)(bool)", + "doc": "scaleStatefulsetHelper updates a StatefulSet replica count and waits for readiness\n\nThe helper retrieves the current StatefulSet, sets its desired replicas, and\nupdates it using retry logic to handle conflicts. After each successful\nupdate it polls until the set reports ready or times out. It logs failures\nand returns a boolean indicating success.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/scaling/statefulset_scaling.go:95", + "calls": [ + { + "pkgPath": "k8s.io/client-go/util/retry", + "name": "RetryOnConflict", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "Update", + "kind": "function" + }, + { + "name": "StatefulSets", + "kind": "function" + }, + { + "name": "AppsV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets", + "name": "WaitForStatefulSetReady", + "kind": "function", + "source": [ + "func WaitForStatefulSetReady(ns, name string, timeout time.Duration, logger *log.Logger) bool {", + "\tlogger.Debug(\"Check if statefulset %s:%s is ready\", ns, name)", + "\tclients := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tss, err := provider.GetUpdatedStatefulset(clients.K8sClient.AppsV1(), ns, name)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Error while getting the %s, err: %v\", ss.ToString(), err)", + "\t\t} else if ss.IsStatefulSetReady() {", + "\t\t\tlogger.Info(\"%s is ready\", ss.ToString())", + "\t\t\treturn true", + "\t\t}", + "\t\ttime.Sleep(time.Second)", + "\t}", + "\tlogger.Error(\"Statefulset %s:%s is not ready\", ns, name)", + "\treturn false", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "errors", + "name": "New", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling", + "name": "TestScaleStatefulSet", + "kind": "function", + "source": [ + "func TestScaleStatefulSet(statefulset *appsv1.StatefulSet, timeout time.Duration, logger *log.Logger) bool {", + "\tclients := clientsholder.GetClientsHolder()", + "\tname, namespace := statefulset.Name, statefulset.Namespace", + "\tssClients := clients.K8sClient.AppsV1().StatefulSets(namespace)", + "\tlogger.Debug(\"Scale statefulset not using HPA %s:%s\", namespace, name)", + "\treplicas := int32(1)", + "\tif statefulset.Spec.Replicas != nil {", + "\t\treplicas = *statefulset.Spec.Replicas", + "\t}", + "", + "\tif replicas \u003c= 1 {", + "\t\t// scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t}", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t}", + "\t} else {", + "\t\t// scale down", + "\t\treplicas--", + "\t\tlogger.Debug(\"Scale DOWN statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t} // scale up", + "\t\treplicas++", + "\t\tlogger.Debug(\"Scale UP statefulset to %d replicas\", replicas)", + "\t\tif !scaleStatefulsetHelper(clients, ssClients, statefulset, replicas, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot scale statefulset = %s:%s\", namespace, name)", + "\t\t\treturn false", + "\t\t}", + "\t}", + "\treturn true", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func scaleStatefulsetHelper(clients *clientsholder.ClientsHolder, ssClient v1.StatefulSetInterface, statefulset *appsv1.StatefulSet, replicas int32, timeout time.Duration, logger *log.Logger) bool {", + "\tname := statefulset.Name", + "\tnamespace := statefulset.Namespace", + "", + "\tretryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {", + "\t\t// Retrieve the latest version of statefulset before attempting update", + "\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver", + "\t\tss, err := ssClient.Get(context.TODO(), name, v1machinery.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get latest version of statefulset %s:%s with error %s\", namespace, name, err)", + "\t\t\treturn err", + "\t\t}", + "\t\tss.Spec.Replicas = \u0026replicas", + "\t\t_, err = clients.K8sClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), ss, v1machinery.UpdateOptions{})", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Cannot update statefulset %s:%s\", namespace, name)", + "\t\t\treturn err", + "\t\t}", + "\t\tif !podsets.WaitForStatefulSetReady(namespace, name, timeout, logger) {", + "\t\t\tlogger.Error(\"Cannot update statefulset %s:%s\", namespace, name)", + "\t\t\treturn errors.New(\"can not update statefulset\")", + "\t\t}", + "\t\treturn nil", + "\t})", + "\tif retryErr != nil {", + "\t\tlogger.Error(\"Cannot scale statefulset %s:%s, err=%v\", namespace, name, retryErr)", + "\t\treturn false", + "\t}", + "\treturn true", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/tolerations", + "name": "tolerations", + "files": 1, + "imports": [ + "k8s.io/api/core/v1", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "IsTolerationDefault", + "qualifiedName": "IsTolerationDefault", + "exported": true, + "signature": "func(corev1.Toleration)(bool)", + "doc": "IsTolerationDefault Determines whether a toleration is one of the default Kubernetes tolerations\n\nThis function examines the key field of a toleration and returns true if it\nincludes the substring \"node.kubernetes.io\", indicating that the toleration\noriginates from the default set added by Kubernetes. It performs this check\nusing a simple string containment test, which covers all standard node taint\nkeys such as not-ready, unreachable, and memory-pressure. The result is a\nboolean value signifying whether the toleration should be considered\nunmodified.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/tolerations/tolerations.go:110", + "calls": [ + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/tolerations", + "name": "IsTolerationModified", + "kind": "function", + "source": [ + "func IsTolerationModified(t corev1.Toleration, qosClass corev1.PodQOSClass) bool {", + "\tconst (", + "\t\tnotReadyStr = \"node.kubernetes.io/not-ready\"", + "\t\tunreachableStr = \"node.kubernetes.io/unreachable\"", + "\t\tmemoryPressureStr = \"node.kubernetes.io/memory-pressure\"", + "\t)", + "\t// Check each of the tolerations to make sure they are the default tolerations added by k8s:", + "\t// tolerations:", + "\t// - effect: NoExecute", + "\t// key: node.kubernetes.io/not-ready", + "\t// operator: Exists", + "\t// tolerationSeconds: 300", + "\t// - effect: NoExecute", + "\t// key: node.kubernetes.io/unreachable", + "\t// operator: Exists", + "\t// tolerationSeconds: 300", + "\t// # this last one, only if QoS class for the pod is different than BestEffort", + "\t// - effect: NoSchedule", + "\t// key: node.kubernetes.io/memory-pressure", + "\t// operator: Exists", + "", + "\t// Short circuit. Anything that is not 'node.kubernetes.io' is considered a modified toleration immediately.", + "\tif !IsTolerationDefault(t) {", + "\t\treturn true", + "\t}", + "", + "\tswitch t.Effect {", + "\tcase corev1.TaintEffectNoExecute:", + "\t\tif t.Key == notReadyStr || t.Key == unreachableStr {", + "\t\t\t// 300 seconds is the default, return false for not modified", + "\t\t\tif t.Operator == corev1.TolerationOpExists \u0026\u0026 t.TolerationSeconds != nil \u0026\u0026 *t.TolerationSeconds == int64(tolerationSecondsDefault) {", + "\t\t\t\treturn false", + "\t\t\t}", + "", + "\t\t\t// Toleration seconds has been modified, return true.", + "\t\t\treturn true", + "\t\t}", + "\tcase corev1.TaintEffectNoSchedule:", + "\t\t// If toleration is NoSchedule - node.kubernetes.io/memory-pressure - Exists and the QoS class for", + "\t\t// the pod is different than BestEffort, it is also a default toleration added by k8s", + "\t\tif (t.Key == memoryPressureStr) \u0026\u0026", + "\t\t\t(t.Operator == corev1.TolerationOpExists) \u0026\u0026", + "\t\t\t(qosClass != corev1.PodQOSBestEffort) {", + "\t\t\treturn false", + "\t\t}", + "\tcase corev1.TaintEffectPreferNoSchedule:", + "\t\t// PreferNoSchedule is not a default toleration added by k8s", + "\t\treturn true", + "\t}", + "", + "\t// Check through the list of non-compliant tolerations to see if anything snuck by the above short circuit", + "\tfor _, nct := range nonCompliantTolerations {", + "\t\tif t.Effect == nct {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsTolerationDefault(t corev1.Toleration) bool {", + "\treturn strings.Contains(t.Key, \"node.kubernetes.io\")", + "}" + ] + }, + { + "name": "IsTolerationModified", + "qualifiedName": "IsTolerationModified", + "exported": true, + "signature": "func(corev1.Toleration, corev1.PodQOSClass)(bool)", + "doc": "IsTolerationModified Determines if a pod toleration deviates from the Kubernetes defaults\n\nThe function examines a single toleration in conjunction with the pod's QoS\nclass to see whether it matches one of the three default tolerations that\nkubelet adds automatically. It first filters out any toleration whose key\ndoes not start with \"node.kubernetes.io\", then checks the effect, key,\noperator, and optional seconds value against the expected defaults for\nNoExecute and NoSchedule effects, considering the pod's QoS class for\nmemory‑pressure cases. If a toleration fails these checks or matches a\nknown non‑compliant set, the function returns true to indicate\nmodification; otherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/tolerations/tolerations.go:41", + "calls": [ + { + "name": "IsTolerationDefault", + "kind": "function", + "source": [ + "func IsTolerationDefault(t corev1.Toleration) bool {", + "\treturn strings.Contains(t.Key, \"node.kubernetes.io\")", + "}" + ] + }, + { + "name": "int64", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodTolerationBypass", + "kind": "function", + "source": [ + "func testPodTolerationBypass(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tpodIsCompliant := true", + "\t\tfor _, t := range put.Spec.Tolerations {", + "\t\t\t// Check if the tolerations fall outside the 'default' and are modified versions", + "\t\t\t// Take also into account the qosClass applied to the pod", + "\t\t\tif tolerations.IsTolerationModified(t, put.Status.QOSClass) {", + "\t\t\t\tcheck.LogError(\"Pod %q has been found with non-default toleration %s/%s which is not allowed.\", put, t.Key, t.Effect)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has non-default toleration\", false).", + "\t\t\t\t\tAddField(testhelper.TolerationKey, t.Key).", + "\t\t\t\t\tAddField(testhelper.TolerationEffect, string(t.Effect)))", + "\t\t\t\tpodIsCompliant = false", + "\t\t\t}", + "\t\t}", + "", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogInfo(\"Pod %q has default toleration\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has default toleration\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsTolerationModified(t corev1.Toleration, qosClass corev1.PodQOSClass) bool {", + "\tconst (", + "\t\tnotReadyStr = \"node.kubernetes.io/not-ready\"", + "\t\tunreachableStr = \"node.kubernetes.io/unreachable\"", + "\t\tmemoryPressureStr = \"node.kubernetes.io/memory-pressure\"", + "\t)", + "\t// Check each of the tolerations to make sure they are the default tolerations added by k8s:", + "\t// tolerations:", + "\t// - effect: NoExecute", + "\t// key: node.kubernetes.io/not-ready", + "\t// operator: Exists", + "\t// tolerationSeconds: 300", + "\t// - effect: NoExecute", + "\t// key: node.kubernetes.io/unreachable", + "\t// operator: Exists", + "\t// tolerationSeconds: 300", + "\t// # this last one, only if QoS class for the pod is different than BestEffort", + "\t// - effect: NoSchedule", + "\t// key: node.kubernetes.io/memory-pressure", + "\t// operator: Exists", + "", + "\t// Short circuit. Anything that is not 'node.kubernetes.io' is considered a modified toleration immediately.", + "\tif !IsTolerationDefault(t) {", + "\t\treturn true", + "\t}", + "", + "\tswitch t.Effect {", + "\tcase corev1.TaintEffectNoExecute:", + "\t\tif t.Key == notReadyStr || t.Key == unreachableStr {", + "\t\t\t// 300 seconds is the default, return false for not modified", + "\t\t\tif t.Operator == corev1.TolerationOpExists \u0026\u0026 t.TolerationSeconds != nil \u0026\u0026 *t.TolerationSeconds == int64(tolerationSecondsDefault) {", + "\t\t\t\treturn false", + "\t\t\t}", + "", + "\t\t\t// Toleration seconds has been modified, return true.", + "\t\t\treturn true", + "\t\t}", + "\tcase corev1.TaintEffectNoSchedule:", + "\t\t// If toleration is NoSchedule - node.kubernetes.io/memory-pressure - Exists and the QoS class for", + "\t\t// the pod is different than BestEffort, it is also a default toleration added by k8s", + "\t\tif (t.Key == memoryPressureStr) \u0026\u0026", + "\t\t\t(t.Operator == corev1.TolerationOpExists) \u0026\u0026", + "\t\t\t(qosClass != corev1.PodQOSBestEffort) {", + "\t\t\treturn false", + "\t\t}", + "\tcase corev1.TaintEffectPreferNoSchedule:", + "\t\t// PreferNoSchedule is not a default toleration added by k8s", + "\t\treturn true", + "\t}", + "", + "\t// Check through the list of non-compliant tolerations to see if anything snuck by the above short circuit", + "\tfor _, nct := range nonCompliantTolerations {", + "\t\tif t.Effect == nct {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "globals": [ + { + "name": "nonCompliantTolerations", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/tolerations/tolerations.go:26" + }, + { + "name": "tolerationSecondsDefault", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/tolerations/tolerations.go:27" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/volumes", + "name": "volumes", + "files": 1, + "imports": [ + "k8s.io/api/core/v1" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "IsPodVolumeReclaimPolicyDelete", + "qualifiedName": "IsPodVolumeReclaimPolicyDelete", + "exported": true, + "signature": "func(*corev1.Volume, []corev1.PersistentVolume, []corev1.PersistentVolumeClaim)(bool)", + "doc": "IsPodVolumeReclaimPolicyDelete Verifies that a pod volume’s reclaim policy is DELETE\n\nThe function receives a pod volume, the cluster's persistent volumes, and\npersistent volume claims. It first finds the claim referenced by the volume,\nthen checks if the corresponding persistent volume has a delete reclaim\npolicy. If both conditions are satisfied, it returns true; otherwise false.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/volumes/volumes.go:44", + "calls": [ + { + "name": "getPVCFromSlice", + "kind": "function", + "source": [ + "func getPVCFromSlice(pvcs []corev1.PersistentVolumeClaim, pvcName string) *corev1.PersistentVolumeClaim {", + "\tfor i := range pvcs {", + "\t\tif pvcs[i].Name == pvcName {", + "\t\t\treturn \u0026pvcs[i]", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle", + "name": "testPodPersistentVolumeReclaimPolicy", + "kind": "function", + "source": [ + "func testPodPersistentVolumeReclaimPolicy(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Look through all of the pods, matching their persistent volumes to the list of overall cluster PVs and checking their reclaim status.", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tcompliantPod := true", + "\t\t// Loop through all of the volumes attached to the pod.", + "\t\tfor pvIndex := range put.Spec.Volumes {", + "\t\t\t// Skip any volumes that do not have a PVC. No need to test them.", + "\t\t\tif put.Spec.Volumes[pvIndex].PersistentVolumeClaim == nil {", + "\t\t\t\tcheck.LogInfo(\"Pod %q does not have a PVC\", put)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// If the Pod Volume is not tied back to a PVC and corresponding PV that has a reclaim policy of DELETE.", + "\t\t\tif !volumes.IsPodVolumeReclaimPolicyDelete(\u0026put.Spec.Volumes[pvIndex], env.PersistentVolumes, env.PersistentVolumeClaims) {", + "\t\t\t\tcheck.LogError(\"Pod %q with volume %q has been found without a reclaim policy of DELETE.\", put, put.Spec.Volumes[pvIndex].Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod contains volume without a reclaim policy of DELETE\", false).", + "\t\t\t\t\tAddField(testhelper.PersistentVolumeName, put.Spec.Volumes[pvIndex].Name).", + "\t\t\t\t\tAddField(testhelper.PersistentVolumeClaimName, put.Spec.Volumes[pvIndex].PersistentVolumeClaim.ClaimName))", + "\t\t\t\tcompliantPod = false", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif compliantPod {", + "\t\t\tcheck.LogInfo(\"Pod %q complies with volume reclaim policy rules\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod complies with volume reclaim policy rules\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsPodVolumeReclaimPolicyDelete(vol *corev1.Volume, pvs []corev1.PersistentVolume, pvcs []corev1.PersistentVolumeClaim) bool {", + "\t// Check if the Volume is bound to a PVC.", + "\tif putPVC := getPVCFromSlice(pvcs, vol.PersistentVolumeClaim.ClaimName); putPVC != nil {", + "\t\t// Loop through the PersistentVolumes in the cluster, looking for bound PV/PVCs.", + "\t\tfor pvIndex := range pvs {", + "\t\t\t// Check to make sure its reclaim policy is DELETE.", + "\t\t\tif putPVC.Spec.VolumeName == pvs[pvIndex].Name \u0026\u0026 pvs[pvIndex].Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimDelete {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "getPVCFromSlice", + "qualifiedName": "getPVCFromSlice", + "exported": false, + "signature": "func([]corev1.PersistentVolumeClaim, string)(*corev1.PersistentVolumeClaim)", + "doc": "getPVCFromSlice retrieves a PersistentVolumeClaim by name from a list\n\nThis function iterates over the provided slice of claims, comparing each\nclaim's name to the target name. If a match is found, it returns a pointer to\nthat claim; otherwise, it returns nil to indicate no matching claim was\npresent.", + "position": "/Users/deliedit/dev/certsuite/tests/lifecycle/volumes/volumes.go:29", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/volumes", + "name": "IsPodVolumeReclaimPolicyDelete", + "kind": "function", + "source": [ + "func IsPodVolumeReclaimPolicyDelete(vol *corev1.Volume, pvs []corev1.PersistentVolume, pvcs []corev1.PersistentVolumeClaim) bool {", + "\t// Check if the Volume is bound to a PVC.", + "\tif putPVC := getPVCFromSlice(pvcs, vol.PersistentVolumeClaim.ClaimName); putPVC != nil {", + "\t\t// Loop through the PersistentVolumes in the cluster, looking for bound PV/PVCs.", + "\t\tfor pvIndex := range pvs {", + "\t\t\t// Check to make sure its reclaim policy is DELETE.", + "\t\t\tif putPVC.Spec.VolumeName == pvs[pvIndex].Name \u0026\u0026 pvs[pvIndex].Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimDelete {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getPVCFromSlice(pvcs []corev1.PersistentVolumeClaim, pvcName string) *corev1.PersistentVolumeClaim {", + "\tfor i := range pvcs {", + "\t\tif pvcs[i].Name == pvcName {", + "\t\t\treturn \u0026pvcs[i]", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/manageability", + "name": "manageability", + "files": 1, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "LoadChecks", + "qualifiedName": "LoadChecks", + "exported": true, + "signature": "func()()", + "doc": "LoadChecks Initializes the manageability checks group and registers test functions\n\nThe function creates a new checks group for manageability, logs the loading\naction, and adds two checks: one verifying container image tags and another\nvalidating container port naming conventions. Each check is configured with a\nskip condition that bypasses it if no containers are present and supplies the\nappropriate test logic via closures. The checks are then registered in the\nglobal checks database for execution during testing.", + "position": "/Users/deliedit/dev/certsuite/tests/manageability/suite.go:55", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "WithBeforeEachFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewChecksGroup", + "kind": "function", + "source": [ + "func NewChecksGroup(groupName string) *ChecksGroup {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\tif dbByGroup == nil {", + "\t\tdbByGroup = map[string]*ChecksGroup{}", + "\t}", + "", + "\tgroup, exists := dbByGroup[groupName]", + "\tif exists {", + "\t\treturn group", + "\t}", + "", + "\tgroup = \u0026ChecksGroup{", + "\t\tname: groupName,", + "\t\tchecks: []*Check{},", + "\t\tcurrentRunningCheckIdx: checkIdxNone,", + "\t}", + "\tdbByGroup[groupName] = group", + "", + "\treturn group", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "name": "testContainersImageTag", + "kind": "function", + "source": [ + "func testContainersImageTag(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogDebug(\"Testing Container %q\", cut)", + "\t\tif cut.IsTagEmpty() {", + "\t\t\tcheck.LogError(\"Container %q is missing image tag(s)\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is missing image tag(s)\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q is tagged with %q\", cut, cut.ContainerImageIdentifier.Tag)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is tagged\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "name": "testContainerPortNameFormat", + "kind": "function", + "source": [ + "func testContainerPortNameFormat(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tfor _, newProtocol := range env.ValidProtocolNames {", + "\t\tallowedProtocolNames[newProtocol] = true", + "\t}", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogDebug(\"Testing Container %q\", cut)", + "\t\tfor _, port := range cut.Ports {", + "\t\t\tif !containerPortNameFormatCheck(port.Name) {", + "\t\t\t\tcheck.LogError(\"Container %q declares port %q that does not follow the partner naming conventions\", cut, port.Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"ContainerPort does not follow the partner naming conventions\", false).", + "\t\t\t\t\tAddField(testhelper.ContainerPort, port.Name))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Container %q declares port %q that does follow the partner naming conventions\", cut, port.Name)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"ContainerPort follows the partner naming conventions\", true).", + "\t\t\t\t\tAddField(testhelper.ContainerPort, port.Name))", + "\t\t\t}", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadInternalChecksDB", + "kind": "function", + "source": [ + "func LoadInternalChecksDB() {", + "\taccesscontrol.LoadChecks()", + "\tcertification.LoadChecks()", + "\tlifecycle.LoadChecks()", + "\tmanageability.LoadChecks()", + "\tnetworking.LoadChecks()", + "\tobservability.LoadChecks()", + "\tperformance.LoadChecks()", + "\tplatform.LoadChecks()", + "\toperator.LoadChecks()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ManageabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ManageabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainersImageTag)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImageTag(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPortNameFormat)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerPortNameFormat(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "name": "containerPortNameFormatCheck", + "qualifiedName": "containerPortNameFormatCheck", + "exported": false, + "signature": "func(string)(bool)", + "doc": "containerPortNameFormatCheck Verifies that a container port name starts with an allowed protocol\n\nThe function splits the provided name on hyphens, extracts the first segment,\nand checks whether this segment is present in the global map of permitted\nprotocols. It returns true if the protocol prefix is valid; otherwise it\nreturns false.", + "position": "/Users/deliedit/dev/certsuite/tests/manageability/suite.go:109", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/manageability", + "name": "testContainerPortNameFormat", + "kind": "function", + "source": [ + "func testContainerPortNameFormat(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tfor _, newProtocol := range env.ValidProtocolNames {", + "\t\tallowedProtocolNames[newProtocol] = true", + "\t}", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogDebug(\"Testing Container %q\", cut)", + "\t\tfor _, port := range cut.Ports {", + "\t\t\tif !containerPortNameFormatCheck(port.Name) {", + "\t\t\t\tcheck.LogError(\"Container %q declares port %q that does not follow the partner naming conventions\", cut, port.Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"ContainerPort does not follow the partner naming conventions\", false).", + "\t\t\t\t\tAddField(testhelper.ContainerPort, port.Name))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Container %q declares port %q that does follow the partner naming conventions\", cut, port.Name)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"ContainerPort follows the partner naming conventions\", true).", + "\t\t\t\t\tAddField(testhelper.ContainerPort, port.Name))", + "\t\t\t}", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func containerPortNameFormatCheck(portName string) bool {", + "\tres := strings.Split(portName, \"-\")", + "\treturn allowedProtocolNames[res[0]]", + "}" + ] + }, + { + "name": "testContainerPortNameFormat", + "qualifiedName": "testContainerPortNameFormat", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testContainerPortNameFormat Verifies that container port names match partner naming conventions\n\nThe function iterates over all containers in the test environment, checking\neach declared port name against a list of allowed protocol prefixes. It logs\nerrors for non‑compliant ports and records both compliant and\nnon‑compliant objects. Finally, it sets the check result with these lists.", + "position": "/Users/deliedit/dev/certsuite/tests/manageability/suite.go:120", + "calls": [ + { + "name": "LogDebug", + "kind": "function" + }, + { + "name": "containerPortNameFormatCheck", + "kind": "function", + "source": [ + "func containerPortNameFormatCheck(portName string) bool {", + "\tres := strings.Split(portName, \"-\")", + "\treturn allowedProtocolNames[res[0]]", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/manageability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ManageabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ManageabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainersImageTag)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImageTag(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPortNameFormat)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerPortNameFormat(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainerPortNameFormat(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tfor _, newProtocol := range env.ValidProtocolNames {", + "\t\tallowedProtocolNames[newProtocol] = true", + "\t}", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogDebug(\"Testing Container %q\", cut)", + "\t\tfor _, port := range cut.Ports {", + "\t\t\tif !containerPortNameFormatCheck(port.Name) {", + "\t\t\t\tcheck.LogError(\"Container %q declares port %q that does not follow the partner naming conventions\", cut, port.Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"ContainerPort does not follow the partner naming conventions\", false).", + "\t\t\t\t\tAddField(testhelper.ContainerPort, port.Name))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Container %q declares port %q that does follow the partner naming conventions\", cut, port.Name)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"ContainerPort follows the partner naming conventions\", true).", + "\t\t\t\t\tAddField(testhelper.ContainerPort, port.Name))", + "\t\t\t}", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testContainersImageTag", + "qualifiedName": "testContainersImageTag", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testContainersImageTag Verifies that each container has a non-empty image tag\n\nThe function iterates over all containers in the test environment, checking\nwhether their image tags are present. Containers lacking tags are logged as\nerrors and added to a non‑compliant list; those with tags are logged as\ninfo and added to a compliant list. Finally, it records both lists as the\nresult of the compliance check.", + "position": "/Users/deliedit/dev/certsuite/tests/manageability/suite.go:83", + "calls": [ + { + "name": "LogDebug", + "kind": "function" + }, + { + "name": "IsTagEmpty", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/manageability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ManageabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ManageabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainersImageTag)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersImageTag(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestContainerPortNameFormat)).", + "\t\tWithSkipCheckFn(skipIfNoContainersFn).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainerPortNameFormat(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainersImageTag(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogDebug(\"Testing Container %q\", cut)", + "\t\tif cut.IsTagEmpty() {", + "\t\t\tcheck.LogError(\"Container %q is missing image tag(s)\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is missing image tag(s)\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q is tagged with %q\", cut, cut.ContainerImageIdentifier.Tag)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is tagged\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "globals": [ + { + "name": "allowedProtocolNames", + "exported": false, + "type": "", + "doc": "The name field in the ContainerPort section must be of the form \u003cprotocol\u003e[-\u003csuffix\u003e] where \u003cprotocol\u003e is one of the following,\nand the optional \u003csuffix\u003e can be chosen by the application. Allowed protocol names: grpc, grpc-web, http, http2, tcp, udp.", + "position": "/Users/deliedit/dev/certsuite/tests/manageability/suite.go:101" + }, + { + "name": "beforeEachFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/manageability/suite.go:33" + }, + { + "name": "env", + "exported": false, + "type": "provider.TestEnvironment", + "position": "/Users/deliedit/dev/certsuite/tests/manageability/suite.go:31" + }, + { + "name": "skipIfNoContainersFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/manageability/suite.go:38" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "networking", + "files": 2, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/policies", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/services", + "k8s.io/api/networking/v1", + "strconv" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "LoadChecks", + "qualifiedName": "LoadChecks", + "exported": true, + "signature": "func()()", + "doc": "LoadChecks Registers networking test checks in the internal database\n\nThis function logs that networking tests are being loaded, creates a check\ngroup for networking, and adds multiple specific checks such as ICMP\nconnectivity for IPv4/IPv6, port usage validation, reserved port checks,\nservice dual‑stack verification, network policy compliance,\npartner‑specific ports, DPDK CPU pinning probe restrictions, SRIOV restart\nlabels, and MTU configuration. Each check is configured with appropriate skip\nconditions based on the current test environment and a function that performs\nthe actual test logic.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:69", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "WithBeforeEachFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewChecksGroup", + "kind": "function", + "source": [ + "func NewChecksGroup(groupName string) *ChecksGroup {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\tif dbByGroup == nil {", + "\t\tdbByGroup = map[string]*ChecksGroup{}", + "\t}", + "", + "\tgroup, exists := dbByGroup[groupName]", + "\tif exists {", + "\t\treturn group", + "\t}", + "", + "\tgroup = \u0026ChecksGroup{", + "\t\tname: groupName,", + "\t\tchecks: []*Check{},", + "\t\tcurrentRunningCheckIdx: checkIdxNone,", + "\t}", + "\tdbByGroup[groupName] = group", + "", + "\treturn group", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testNetworkConnectivity", + "kind": "function", + "source": [ + "func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommons.IPVersion, aType netcommons.IFType, check *checksdb.Check) {", + "\tnetsUnderTest := icmp.BuildNetTestContext(env.Pods, aIPVersion, aType, check.GetLogger())", + "\treport, skip := icmp.RunNetworkingTests(netsUnderTest, defaultNumPings, aIPVersion, check.GetLogger())", + "\tif skip {", + "\t\tcheck.LogInfo(\"There are no %q networks to test with at least 2 pods, skipping test\", aIPVersion)", + "\t}", + "\tcheck.SetResult(report.CompliantObjectsOut, report.NonCompliantObjectsOut)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testNetworkConnectivity", + "kind": "function", + "source": [ + "func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommons.IPVersion, aType netcommons.IFType, check *checksdb.Check) {", + "\tnetsUnderTest := icmp.BuildNetTestContext(env.Pods, aIPVersion, aType, check.GetLogger())", + "\treport, skip := icmp.RunNetworkingTests(netsUnderTest, defaultNumPings, aIPVersion, check.GetLogger())", + "\tif skip {", + "\t\tcheck.LogInfo(\"There are no %q networks to test with at least 2 pods, skipping test\", aIPVersion)", + "\t}", + "\tcheck.SetResult(report.CompliantObjectsOut, report.NonCompliantObjectsOut)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testNetworkConnectivity", + "kind": "function", + "source": [ + "func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommons.IPVersion, aType netcommons.IFType, check *checksdb.Check) {", + "\tnetsUnderTest := icmp.BuildNetTestContext(env.Pods, aIPVersion, aType, check.GetLogger())", + "\treport, skip := icmp.RunNetworkingTests(netsUnderTest, defaultNumPings, aIPVersion, check.GetLogger())", + "\tif skip {", + "\t\tcheck.LogInfo(\"There are no %q networks to test with at least 2 pods, skipping test\", aIPVersion)", + "\t}", + "\tcheck.SetResult(report.CompliantObjectsOut, report.NonCompliantObjectsOut)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testNetworkConnectivity", + "kind": "function", + "source": [ + "func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommons.IPVersion, aType netcommons.IFType, check *checksdb.Check) {", + "\tnetsUnderTest := icmp.BuildNetTestContext(env.Pods, aIPVersion, aType, check.GetLogger())", + "\treport, skip := icmp.RunNetworkingTests(netsUnderTest, defaultNumPings, aIPVersion, check.GetLogger())", + "\tif skip {", + "\t\tcheck.LogInfo(\"There are no %q networks to test with at least 2 pods, skipping test\", aIPVersion)", + "\t}", + "\tcheck.SetResult(report.CompliantObjectsOut, report.NonCompliantObjectsOut)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testUndeclaredContainerPortsUsage", + "kind": "function", + "source": [ + "func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tvar portInfo netutil.PortInfo", + "\tfor _, put := range env.Pods {", + "\t\t// First get the ports declared in the Pod's containers spec", + "\t\tdeclaredPorts := make(map[netutil.PortInfo]bool)", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t\tfor _, port := range cut.Ports {", + "\t\t\t\tportInfo.PortNumber = port.ContainerPort", + "\t\t\t\tportInfo.Protocol = string(port.Protocol)", + "\t\t\t\tdeclaredPorts[portInfo] = true", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Then check the actual ports that the containers are listening on", + "\t\tfirstPodContainer := put.Containers[0]", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(firstPodContainer)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get container %q listening ports, err: %v\", firstPodContainer, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf(\"Failed to get the container's listening ports, err: %v\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(listeningPorts) == 0 {", + "\t\t\tcheck.LogInfo(\"None of the containers of %q have any listening port.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"None of the containers have any listening ports\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Verify that all the listening ports have been declared in the container spec", + "\t\tfailedPod := false", + "\t\tfor listeningPort := range listeningPorts {", + "\t\t\tif put.ContainsIstioProxy() \u0026\u0026 netcommons.ReservedIstioPorts[listeningPort.PortNumber] {", + "\t\t\t\tcheck.LogInfo(\"%q is listening on port %d protocol %q, but the pod also contains istio-proxy. Ignoring.\",", + "\t\t\t\t\tput, listeningPort.PortNumber, listeningPort.Protocol)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tif ok := declaredPorts[listeningPort]; !ok {", + "\t\t\t\tcheck.LogError(\"%q is listening on port %d protocol %q, but that port was not declared in any container spec.\",", + "\t\t\t\t\tput, listeningPort.PortNumber, listeningPort.Protocol)", + "\t\t\t\tfailedPod = true", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\"Listening port was declared in no container spec\", false).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(listeningPort.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, listeningPort.Protocol))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"%q is listening on declared port %d protocol %q\", put, listeningPort.PortNumber, listeningPort.Protocol)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\"Listening port was declared in container spec\", true).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(listeningPort.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, listeningPort.Protocol))", + "\t\t\t}", + "\t\t}", + "\t\tif failedPod {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"At least one port was listening but not declared in any container specs\", false))", + "\t\t} else {", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"All listening were declared in containers specs\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOCPReservedPortsUsage", + "kind": "function", + "source": [ + "func testOCPReservedPortsUsage(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// List of all ports reserved by OpenShift", + "\tOCPReservedPorts := map[int32]bool{", + "\t\t22623: true,", + "\t\t22624: true}", + "\tcompliantObjects, nonCompliantObjects := netcommons.TestReservedPortsUsage(env, OCPReservedPorts, \"OCP\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoServicesUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoServicesUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Services) == 0 {", + "\t\t\treturn true, \"no services to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testDualStackServices", + "kind": "function", + "source": [ + "func testDualStackServices(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, s := range env.Services {", + "\t\tcheck.LogInfo(\"Testing Service %q\", s.Name)", + "\t\tserviceIPVersion, err := services.GetServiceIPVersion(s)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get IP version from Service %q, err=%v\", s.Name, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Could not get IP Version from service\", testhelper.ServiceType, false).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name))", + "\t\t}", + "\t\tif serviceIPVersion == netcommons.Undefined || serviceIPVersion == netcommons.IPv4 {", + "\t\t\tcheck.LogError(\"Service %q (ns: %q) only supports IPv4\", s.Name, s.Namespace)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Service supports only IPv4\", testhelper.ServiceType, false).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceIPVersion, serviceIPVersion.String()))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Service %q (ns: %q) supports IPv6 or is dual stack\", s.Name, s.Namespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"Service supports IPv6 or is dual stack\", testhelper.ServiceType, true).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceIPVersion, serviceIPVersion.String()))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testNetworkPolicyDenyAll", + "kind": "function", + "source": [ + "func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through the pods, looking for corresponding entries within a deny-all network policy (both ingress and egress).", + "\t// This ensures that each pod is accounted for that we are tasked with testing and excludes any pods that are not marked", + "\t// for testing (via the labels).", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tdenyAllEgressFound := false", + "\t\tdenyAllIngressFound := false", + "", + "\t\t// Look through all of the network policies for a matching namespace.", + "\t\tfor index := range env.NetworkPolicies {", + "\t\t\tnetworkPolicy := env.NetworkPolicies[index]", + "\t\t\tcheck.LogInfo(\"Testing Network policy %q against pod %q\", networkPolicy.Name, put)", + "", + "\t\t\t// Skip any network policies that don't match the namespace of the pod we are testing.", + "\t\t\tif networkPolicy.Namespace != put.Namespace {", + "\t\t\t\tcheck.LogInfo(\"Skipping Network policy %q (namespace %q does not match Pod namespace %q)\", networkPolicy.Name, networkPolicy.Namespace, put.Namespace)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Match the pod namespace with the network policy namespace.", + "\t\t\tif policies.LabelsMatch(networkPolicy.Spec.PodSelector, put.Labels) {", + "\t\t\t\tvar reason string", + "\t\t\t\tif !denyAllEgressFound {", + "\t\t\t\t\tdenyAllEgressFound, reason = policies.IsNetworkPolicyCompliant(\u0026networkPolicy, networkingv1.PolicyTypeEgress)", + "\t\t\t\t\tif reason != \"\" {", + "\t\t\t\t\t\tcheck.LogError(\"Network policy %q is not compliant, reason=%q\", networkPolicy.Name, reason)", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tif !denyAllIngressFound {", + "\t\t\t\t\tdenyAllIngressFound, reason = policies.IsNetworkPolicyCompliant(\u0026networkPolicy, networkingv1.PolicyTypeIngress)", + "\t\t\t\t\tif reason != \"\" {", + "\t\t\t\t\t\tcheck.LogError(\"Network policy %q is not compliant, reason=%q\", networkPolicy.Name, reason)", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Network policy has not been found that contains a deny-all rule for both ingress and egress.", + "\t\tpodIsCompliant := true", + "\t\tif !denyAllIngressFound {", + "\t\t\tcheck.LogError(\"Pod %q was found to not have a default ingress deny-all network policy.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod was found to not have a default ingress deny-all network policy\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t}", + "", + "\t\tif !denyAllEgressFound {", + "\t\t\tcheck.LogError(\"Pod %q was found to not have a default egress deny-all network policy.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod was found to not have a default egress deny-all network policy\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t}", + "", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogInfo(\"Pod %q has a default ingress/egress deny-all network policy\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has a default ingress/egress deny-all network policy\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPartnerSpecificTCPPorts", + "kind": "function", + "source": [ + "func testPartnerSpecificTCPPorts(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// List of all of the ports reserved by partner", + "\tReservedPorts := map[int32]bool{", + "\t\t15443: true,", + "\t\t15090: true,", + "\t\t15021: true,", + "\t\t15020: true,", + "\t\t15014: true,", + "\t\t15008: true,", + "\t\t15006: true,", + "\t\t15001: true,", + "\t\t15000: true,", + "\t}", + "\tcompliantObjects, nonCompliantObjects := netcommons.TestReservedPortsUsage(env, ReservedPorts, \"Partner\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoCPUPinningPodsSkipFn", + "kind": "function", + "source": [ + "func GetNoCPUPinningPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetCPUPinningPodsWithDpdk()) == 0 {", + "\t\t\treturn true, \"no CPU pinning pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetCPUPinningPodsWithDpdk", + "kind": "function" + }, + { + "name": "testExecProbDenyAtCPUPinning", + "kind": "function", + "source": [ + "func testExecProbDenyAtCPUPinning(check *checksdb.Check, dpdkPods []*provider.Pod) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cpuPinnedPod := range dpdkPods {", + "\t\texecProbeFound := false", + "\t\tfor _, cut := range cpuPinnedPod.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t\tif cut.HasExecProbes() {", + "\t\t\t\tcheck.LogError(\"Container %q defines an exec probe\", cut)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(cpuPinnedPod.Namespace, cpuPinnedPod.Name, \"Exec prob is not allowed\", false))", + "\t\t\t\texecProbeFound = true", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !execProbeFound {", + "\t\t\tcheck.LogInfo(\"Pod %q does not define any exec probe\", cpuPinnedPod)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(cpuPinnedPod.Namespace, cpuPinnedPod.Name, \"Exec prob is allowed\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoSRIOVPodsSkipFn", + "kind": "function", + "source": [ + "func GetNoSRIOVPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tpods, err := env.GetPodsUsingSRIOV()", + "\t\tif err != nil {", + "\t\t\treturn true, fmt.Sprintf(\"failed to get SRIOV pods: %v\", err)", + "\t\t}", + "", + "\t\tif len(pods) == 0 {", + "\t\t\treturn true, \"no SRIOV pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetPodsUsingSRIOV", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "testRestartOnRebootLabelOnPodsUsingSriov", + "kind": "function", + "source": [ + "func testRestartOnRebootLabelOnPodsUsingSriov(check *checksdb.Check, sriovPods []*provider.Pod) {", + "\tconst (", + "\t\trestartOnRebootLabel = \"restart-on-reboot\"", + "\t)", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, pod := range sriovPods {", + "\t\tcheck.LogInfo(\"Testing SRIOV Pod %q\", pod)", + "", + "\t\tlabelValue, exist := pod.GetLabels()[restartOnRebootLabel]", + "\t\tif !exist {", + "\t\t\tcheck.LogError(\"Pod %q uses SRIOV but the label %q was not found.\", pod, restartOnRebootLabel)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf(\"Pod uses SRIOV but the label %s was not found\", restartOnRebootLabel), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif labelValue != \"true\" {", + "\t\t\tcheck.LogError(\"Pod %q uses SRIOV but the %q label value is not true.\", pod, restartOnRebootLabel)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf(\"Pod uses SRIOV but the label %s is not set to true\", restartOnRebootLabel), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Pod %q uses SRIOV and the %q label is set to true\", pod, restartOnRebootLabel)", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf(\"Pod uses SRIOV and the label %s is set to true\", restartOnRebootLabel), true))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoSRIOVPodsSkipFn", + "kind": "function", + "source": [ + "func GetNoSRIOVPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tpods, err := env.GetPodsUsingSRIOV()", + "\t\tif err != nil {", + "\t\t\treturn true, fmt.Sprintf(\"failed to get SRIOV pods: %v\", err)", + "\t\t}", + "", + "\t\tif len(pods) == 0 {", + "\t\t\treturn true, \"no SRIOV pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "GetPodsUsingSRIOV", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "testNetworkAttachmentDefinitionSRIOVUsingMTU", + "kind": "function", + "source": [ + "func testNetworkAttachmentDefinitionSRIOVUsingMTU(check *checksdb.Check, sriovPods []*provider.Pod) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, pod := range sriovPods {", + "\t\tresult, err := pod.IsUsingSRIOVWithMTU()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to check if pod %q uses SRIOV with MTU, err: %v\", pod, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Failed to check if pod uses SRIOV with MTU\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif result {", + "\t\t\tcheck.LogInfo(\"Pod %q uses SRIOV with MTU\", pod)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod uses SRIOV with MTU\", true))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Pod %q uses SRIOV but the MTU is not set explicitly\", pod)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod uses SRIOV but the MTU is not set explicitly\", false))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadInternalChecksDB", + "kind": "function", + "source": [ + "func LoadInternalChecksDB() {", + "\taccesscontrol.LoadChecks()", + "\tcertification.LoadChecks()", + "\tlifecycle.LoadChecks()", + "\tmanageability.LoadChecks()", + "\tnetworking.LoadChecks()", + "\tobservability.LoadChecks()", + "\tperformance.LoadChecks()", + "\tplatform.LoadChecks()", + "\toperator.LoadChecks()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "name": "testDualStackServices", + "qualifiedName": "testDualStackServices", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testDualStackServices Verifies that each Service supports IPv6 or dual stack\n\nThe function iterates over all services in the test environment, determines\ntheir IP version using a helper, and logs whether they are compliant.\nServices that only support IPv4 or cannot be evaluated produce\nnon‑compliant report objects; otherwise compliant ones are recorded.\nFinally, it sets the check result with lists of compliant and non‑compliant\nreports.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:355", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/services", + "name": "GetServiceIPVersion", + "kind": "function", + "source": [ + "func GetServiceIPVersion(aService *corev1.Service) (result netcommons.IPVersion, err error) {", + "\tipver, err := netcommons.GetIPVersion(aService.Spec.ClusterIP)", + "\tif err != nil {", + "\t\terr = fmt.Errorf(\"%s cannot get aService clusterIP version\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "\tif aService.Spec.IPFamilyPolicy == nil {", + "\t\terr = fmt.Errorf(\"%s does not have a IPFamilyPolicy configured\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "\tif *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack \u0026\u0026", + "\t\tipver == netcommons.IPv6 {", + "\t\tlog.Debug(\"%s is single stack ipv6\", ToString(aService))", + "\t\treturn netcommons.IPv6, nil", + "\t}", + "\tif *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack \u0026\u0026", + "\t\tipver == netcommons.IPv4 {", + "\t\tlog.Debug(\"%s is single stack ipv4\", ToString(aService))", + "\t\treturn netcommons.IPv4, nil", + "\t}", + "\tif (*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyPreferDualStack ||", + "\t\t*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyRequireDualStack) \u0026\u0026", + "\t\tlen(aService.Spec.ClusterIPs) \u003c 2 {", + "\t\terr = fmt.Errorf(\"%s is dual stack but has only zero or one ClusterIPs\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "", + "\tres, err := isClusterIPsDualStack(aService.Spec.ClusterIPs)", + "\tif err != nil {", + "\t\terr = fmt.Errorf(\"%s, err:%s\", ToString(aService), err)", + "\t\treturn result, err", + "\t}", + "\tif res {", + "\t\tlog.Debug(\"%s is dual-stack\", ToString(aService))", + "\t\treturn netcommons.IPv4v6, nil", + "\t}", + "", + "\terr = fmt.Errorf(\"%s is not compliant, it is not single stack ipv6 or dual stack\", ToString(aService))", + "\treturn result, err", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testDualStackServices(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, s := range env.Services {", + "\t\tcheck.LogInfo(\"Testing Service %q\", s.Name)", + "\t\tserviceIPVersion, err := services.GetServiceIPVersion(s)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get IP version from Service %q, err=%v\", s.Name, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Could not get IP Version from service\", testhelper.ServiceType, false).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name))", + "\t\t}", + "\t\tif serviceIPVersion == netcommons.Undefined || serviceIPVersion == netcommons.IPv4 {", + "\t\t\tcheck.LogError(\"Service %q (ns: %q) only supports IPv4\", s.Name, s.Namespace)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Service supports only IPv4\", testhelper.ServiceType, false).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceIPVersion, serviceIPVersion.String()))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Service %q (ns: %q) supports IPv6 or is dual stack\", s.Name, s.Namespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"Service supports IPv6 or is dual stack\", testhelper.ServiceType, true).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceIPVersion, serviceIPVersion.String()))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testExecProbDenyAtCPUPinning", + "qualifiedName": "testExecProbDenyAtCPUPinning", + "exported": false, + "signature": "func(*checksdb.Check, []*provider.Pod)()", + "doc": "testExecProbDenyAtCPUPinning verifies that pods pinned to CPUs do not use exec probes\n\nThe routine iterates over each CPU‑pinned pod, inspecting all containers\nfor defined exec probes. If any container contains an exec probe, it records\nthe pod as non‑compliant and logs an error; otherwise it marks the pod\ncompliant and logs informational output. Finally, it sets the check result\nwith lists of compliant and non‑compliant report objects.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:189", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "HasExecProbes", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testExecProbDenyAtCPUPinning(check *checksdb.Check, dpdkPods []*provider.Pod) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cpuPinnedPod := range dpdkPods {", + "\t\texecProbeFound := false", + "\t\tfor _, cut := range cpuPinnedPod.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t\tif cut.HasExecProbes() {", + "\t\t\t\tcheck.LogError(\"Container %q defines an exec probe\", cut)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(cpuPinnedPod.Namespace, cpuPinnedPod.Name, \"Exec prob is not allowed\", false))", + "\t\t\t\texecProbeFound = true", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !execProbeFound {", + "\t\t\tcheck.LogInfo(\"Pod %q does not define any exec probe\", cpuPinnedPod)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(cpuPinnedPod.Namespace, cpuPinnedPod.Name, \"Exec prob is allowed\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testNetworkAttachmentDefinitionSRIOVUsingMTU", + "qualifiedName": "testNetworkAttachmentDefinitionSRIOVUsingMTU", + "exported": false, + "signature": "func(*checksdb.Check, []*provider.Pod)()", + "doc": "testNetworkAttachmentDefinitionSRIOVUsingMTU evaluates SRIOV pods for explicit MTU configuration\n\nThe function iterates over a list of SRIOV-enabled pods, checking whether\neach pod’s network attachment definition includes an explicitly set MTU\nvalue. It logs informational messages for compliant pods and error messages\nfor non‑compliant or failed checks, creating report objects accordingly.\nFinally, it aggregates the results into the check result sets for reporting.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:501", + "calls": [ + { + "name": "IsUsingSRIOVWithMTU", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testNetworkAttachmentDefinitionSRIOVUsingMTU(check *checksdb.Check, sriovPods []*provider.Pod) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, pod := range sriovPods {", + "\t\tresult, err := pod.IsUsingSRIOVWithMTU()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to check if pod %q uses SRIOV with MTU, err: %v\", pod, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Failed to check if pod uses SRIOV with MTU\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif result {", + "\t\t\tcheck.LogInfo(\"Pod %q uses SRIOV with MTU\", pod)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod uses SRIOV with MTU\", true))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Pod %q uses SRIOV but the MTU is not set explicitly\", pod)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod uses SRIOV but the MTU is not set explicitly\", false))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testNetworkConnectivity", + "qualifiedName": "testNetworkConnectivity", + "exported": false, + "signature": "func(*provider.TestEnvironment, netcommons.IPVersion, netcommons.IFType, *checksdb.Check)()", + "doc": "testNetworkConnectivity establishes ICMP connectivity between pods\n\nThe function builds a test context for the specified IP version and interface\ntype, then runs ping tests across all eligible pod pairs. It records both\nsuccessful and failed pings into compliant or non‑compliant report objects.\nIf no network has enough pods to test, it logs that the test is skipped.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:300", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp", + "name": "BuildNetTestContext", + "kind": "function", + "source": [ + "func BuildNetTestContext(pods []*provider.Pod, aIPVersion netcommons.IPVersion, aType netcommons.IFType, logger *log.Logger) (netsUnderTest map[string]netcommons.NetTestContext) {", + "\tnetsUnderTest = make(map[string]netcommons.NetTestContext)", + "\tfor _, put := range pods {", + "\t\tlogger.Info(\"Testing Pod %q\", put)", + "\t\tif put.SkipNetTests {", + "\t\t\tlogger.Info(\"Skipping %q because it is excluded from all connectivity tests\", put)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif aType == netcommons.MULTUS {", + "\t\t\tif put.SkipMultusNetTests {", + "\t\t\t\tlogger.Info(\"Skipping pod %q because it is excluded from %q connectivity tests only\", put.Name, aType)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tfor netKey, multusNetworkInterface := range put.MultusNetworkInterfaces {", + "\t\t\t\t// The first container is used to get the network namespace", + "\t\t\t\tprocessContainerIpsPerNet(put.Containers[0], netKey, multusNetworkInterface.IPs, multusNetworkInterface.Interface, netsUnderTest, aIPVersion, logger)", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tconst defaultNetKey = \"default\"", + "\t\tdefaultIPAddress := put.Status.PodIPs", + "\t\t// The first container is used to get the network namespace", + "\t\tprocessContainerIpsPerNet(put.Containers[0], defaultNetKey, netcommons.PodIPsToStringList(defaultIPAddress), \"\", netsUnderTest, aIPVersion, logger)", + "\t}", + "\treturn netsUnderTest", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp", + "name": "RunNetworkingTests", + "kind": "function", + "source": [ + "func RunNetworkingTests( //nolint:funlen", + "\tnetsUnderTest map[string]netcommons.NetTestContext,", + "\tcount int,", + "\taIPVersion netcommons.IPVersion,", + "\tlogger *log.Logger) (report testhelper.FailureReasonOut, skip bool) {", + "\tlogger.Debug(\"%s\", netcommons.PrintNetTestContextMap(netsUnderTest))", + "\tskip = false", + "\tif len(netsUnderTest) == 0 {", + "\t\tlogger.Debug(\"There are no %q networks to test, skipping test\", aIPVersion)", + "\t\tskip = true", + "\t\treturn report, skip", + "\t}", + "\t// if no network can be tested, then we need to skip the test entirely.", + "\t// If at least one network can be tested (e.g. \u003e 2 IPs/ interfaces present), then we do not skip the test", + "\tatLeastOneNetworkTested := false", + "\tcompliantNets := map[string]int{}", + "\tnonCompliantNets := map[string]int{}", + "\tfor netName, netUnderTest := range netsUnderTest {", + "\t\tcompliantNets[netName] = 0", + "\t\tnonCompliantNets[netName] = 0", + "\t\tif len(netUnderTest.DestTargets) == 0 {", + "\t\t\tlogger.Debug(\"There are no containers to ping for %q network %q. A minimum of 2 containers is needed to run a ping test (a source and a destination) Skipping test\", aIPVersion, netName)", + "\t\t\tcontinue", + "\t\t}", + "\t\tatLeastOneNetworkTested = true", + "\t\tlogger.Debug(\"%q Ping tests on network %q. Number of target IPs: %d\", aIPVersion, netName, len(netUnderTest.DestTargets))", + "", + "\t\tfor _, aDestIP := range netUnderTest.DestTargets {", + "\t\t\tlogger.Debug(\"%q ping test on network %q from ( %q srcip: %q ) to ( %q dstip: %q )\",", + "\t\t\t\taIPVersion, netName,", + "\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP,", + "\t\t\t\taDestIP.ContainerIdentifier, aDestIP.IP)", + "\t\t\tresult, err := TestPing(netUnderTest.TesterSource.ContainerIdentifier, aDestIP, count)", + "\t\t\tlogger.Debug(\"Ping results: %q\", result)", + "\t\t\tlogger.Info(\"%q ping test on network %q from ( %q srcip: %q ) to ( %q dstip: %q ) result: %q\",", + "\t\t\t\taIPVersion, netName,", + "\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP,", + "\t\t\t\taDestIP.ContainerIdentifier, aDestIP.IP, result)", + "\t\t\tif err != nil {", + "\t\t\t\tlogger.Debug(\"Ping failed, err=%v\", err)", + "\t\t\t}", + "\t\t\tif result.outcome != testhelper.SUCCESS {", + "\t\t\t\tlogger.Error(\"Ping from %q (srcip: %q) to %q (dstip: %q) failed\",", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier,", + "\t\t\t\t\tnetUnderTest.TesterSource.IP,", + "\t\t\t\t\taDestIP.ContainerIdentifier,", + "\t\t\t\t\taDestIP.IP)", + "\t\t\t\tnonCompliantNets[netName]++", + "\t\t\t\tnonCompliantObject := testhelper.NewContainerReportObject(netUnderTest.TesterSource.ContainerIdentifier.Namespace,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Podname,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Name, \"Pinging destination container/IP from source container (identified by Namespace/Pod Name/Container Name) Failed\", false).", + "\t\t\t\t\tSetType(testhelper.ICMPResultType).", + "\t\t\t\t\tAddField(testhelper.NetworkName, netName).", + "\t\t\t\t\tAddField(testhelper.SourceIP, netUnderTest.TesterSource.IP).", + "\t\t\t\t\tAddField(testhelper.DestinationNamespace, aDestIP.ContainerIdentifier.Namespace).", + "\t\t\t\t\tAddField(testhelper.DestinationPodName, aDestIP.ContainerIdentifier.Podname).", + "\t\t\t\t\tAddField(testhelper.DestinationContainerName, aDestIP.ContainerIdentifier.Name).", + "\t\t\t\t\tAddField(testhelper.DestinationIP, aDestIP.IP)", + "\t\t\t\treport.NonCompliantObjectsOut = append(report.NonCompliantObjectsOut, nonCompliantObject)", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"Ping from %q (srcip: %q) to %q (dstip: %q) succeeded\",", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier,", + "\t\t\t\t\tnetUnderTest.TesterSource.IP,", + "\t\t\t\t\taDestIP.ContainerIdentifier,", + "\t\t\t\t\taDestIP.IP)", + "\t\t\t\tcompliantNets[netName]++", + "\t\t\t\tCompliantObject := testhelper.NewContainerReportObject(netUnderTest.TesterSource.ContainerIdentifier.Namespace,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Podname,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Name, \"Pinging destination container/IP from source container (identified by Namespace/Pod Name/Container Name) Succeeded\", true).", + "\t\t\t\t\tSetType(testhelper.ICMPResultType).", + "\t\t\t\t\tAddField(testhelper.NetworkName, netName).", + "\t\t\t\t\tAddField(testhelper.SourceIP, netUnderTest.TesterSource.IP).", + "\t\t\t\t\tAddField(testhelper.DestinationNamespace, aDestIP.ContainerIdentifier.Namespace).", + "\t\t\t\t\tAddField(testhelper.DestinationPodName, aDestIP.ContainerIdentifier.Podname).", + "\t\t\t\t\tAddField(testhelper.DestinationContainerName, aDestIP.ContainerIdentifier.Name).", + "\t\t\t\t\tAddField(testhelper.DestinationIP, aDestIP.IP)", + "\t\t\t\treport.CompliantObjectsOut = append(report.CompliantObjectsOut, CompliantObject)", + "\t\t\t}", + "\t\t}", + "\t\tif nonCompliantNets[netName] != 0 {", + "\t\t\tlogger.Error(\"ICMP tests failed for %d IP source/destination in this network\", nonCompliantNets[netName])", + "\t\t\treport.NonCompliantObjectsOut = append(report.NonCompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf(\"ICMP tests failed for %d IP source/destination in this network\", nonCompliantNets[netName]), testhelper.NetworkType, false).", + "\t\t\t\tAddField(testhelper.NetworkName, netName))", + "\t\t}", + "\t\tif compliantNets[netName] != 0 {", + "\t\t\tlogger.Info(\"ICMP tests were successful for all %d IP source/destination in this network\", compliantNets[netName])", + "\t\t\treport.CompliantObjectsOut = append(report.CompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf(\"ICMP tests were successful for all %d IP source/destination in this network\", compliantNets[netName]), testhelper.NetworkType, true).", + "\t\t\t\tAddField(testhelper.NetworkName, netName))", + "\t\t}", + "\t}", + "\tif !atLeastOneNetworkTested {", + "\t\tlogger.Debug(\"There are no %q networks to test, skipping test\", aIPVersion)", + "\t\tskip = true", + "\t}", + "", + "\treturn report, skip", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommons.IPVersion, aType netcommons.IFType, check *checksdb.Check) {", + "\tnetsUnderTest := icmp.BuildNetTestContext(env.Pods, aIPVersion, aType, check.GetLogger())", + "\treport, skip := icmp.RunNetworkingTests(netsUnderTest, defaultNumPings, aIPVersion, check.GetLogger())", + "\tif skip {", + "\t\tcheck.LogInfo(\"There are no %q networks to test with at least 2 pods, skipping test\", aIPVersion)", + "\t}", + "\tcheck.SetResult(report.CompliantObjectsOut, report.NonCompliantObjectsOut)", + "}" + ] + }, + { + "name": "testNetworkPolicyDenyAll", + "qualifiedName": "testNetworkPolicyDenyAll", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testNetworkPolicyDenyAll Verifies that each pod has default deny-all ingress and egress policies\n\nThe routine iterates over all pods in the test environment, checking for\nmatching network policies within the same namespace. It confirms that a\npolicy with empty rules exists for both ingress and egress, indicating a\ndeny‑all configuration. Pods lacking either rule are logged as\nnon‑compliant, while compliant pods are recorded accordingly.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:392", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/policies", + "name": "LabelsMatch", + "kind": "function", + "source": [ + "func LabelsMatch(podSelectorLabels v1.LabelSelector, podLabels map[string]string) bool {", + "\tlabelMatch := false", + "", + "\t// When the pod selector label is empty, it will always match the pod", + "\tif podSelectorLabels.Size() == 0 {", + "\t\treturn true", + "\t}", + "", + "\tfor psLabelKey, psLabelValue := range podSelectorLabels.MatchLabels {", + "\t\tfor podLabelKey, podLabelValue := range podLabels {", + "\t\t\tif psLabelKey == podLabelKey \u0026\u0026 psLabelValue == podLabelValue {", + "\t\t\t\tlabelMatch = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif labelMatch {", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\treturn labelMatch", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/policies", + "name": "IsNetworkPolicyCompliant", + "kind": "function", + "source": [ + "func IsNetworkPolicyCompliant(np *networkingv1.NetworkPolicy, policyType networkingv1.PolicyType) (bool, string) {", + "\t// As long as we have decided above that there is no pod selector,", + "\t// we just have to make sure that the policy type is either Ingress or Egress (or both) we can return true.", + "\t// For more information about deny-all policies, there are some good examples on:", + "\t// https://kubernetes.io/docs/concepts/services-networking/network-policies/", + "", + "\tif len(np.Spec.PolicyTypes) == 0 {", + "\t\treturn false, \"empty policy types\"", + "\t}", + "", + "\t// Ingress and Egress rules should be \"empty\" if it is a default rule.", + "\tif policyType == networkingv1.PolicyTypeEgress {", + "\t\tif np.Spec.Egress != nil || len(np.Spec.Egress) \u003e 0 {", + "\t\t\treturn false, \"egress spec not empty for default egress rule\"", + "\t\t}", + "\t}", + "", + "\tif policyType == networkingv1.PolicyTypeIngress {", + "\t\tif np.Spec.Ingress != nil || len(np.Spec.Ingress) \u003e 0 {", + "\t\t\treturn false, \"ingress spec not empty for default ingress rule\"", + "\t\t}", + "\t}", + "", + "\tpolicyTypeFound := false", + "\t// Look through the returned policies to see if they match the desired policyType", + "\tfor _, p := range np.Spec.PolicyTypes {", + "\t\tif p == policyType {", + "\t\t\tpolicyTypeFound = true", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\treturn policyTypeFound, \"\"", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/policies", + "name": "IsNetworkPolicyCompliant", + "kind": "function", + "source": [ + "func IsNetworkPolicyCompliant(np *networkingv1.NetworkPolicy, policyType networkingv1.PolicyType) (bool, string) {", + "\t// As long as we have decided above that there is no pod selector,", + "\t// we just have to make sure that the policy type is either Ingress or Egress (or both) we can return true.", + "\t// For more information about deny-all policies, there are some good examples on:", + "\t// https://kubernetes.io/docs/concepts/services-networking/network-policies/", + "", + "\tif len(np.Spec.PolicyTypes) == 0 {", + "\t\treturn false, \"empty policy types\"", + "\t}", + "", + "\t// Ingress and Egress rules should be \"empty\" if it is a default rule.", + "\tif policyType == networkingv1.PolicyTypeEgress {", + "\t\tif np.Spec.Egress != nil || len(np.Spec.Egress) \u003e 0 {", + "\t\t\treturn false, \"egress spec not empty for default egress rule\"", + "\t\t}", + "\t}", + "", + "\tif policyType == networkingv1.PolicyTypeIngress {", + "\t\tif np.Spec.Ingress != nil || len(np.Spec.Ingress) \u003e 0 {", + "\t\t\treturn false, \"ingress spec not empty for default ingress rule\"", + "\t\t}", + "\t}", + "", + "\tpolicyTypeFound := false", + "\t// Look through the returned policies to see if they match the desired policyType", + "\tfor _, p := range np.Spec.PolicyTypes {", + "\t\tif p == policyType {", + "\t\t\tpolicyTypeFound = true", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\treturn policyTypeFound, \"\"", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through the pods, looking for corresponding entries within a deny-all network policy (both ingress and egress).", + "\t// This ensures that each pod is accounted for that we are tasked with testing and excludes any pods that are not marked", + "\t// for testing (via the labels).", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tdenyAllEgressFound := false", + "\t\tdenyAllIngressFound := false", + "", + "\t\t// Look through all of the network policies for a matching namespace.", + "\t\tfor index := range env.NetworkPolicies {", + "\t\t\tnetworkPolicy := env.NetworkPolicies[index]", + "\t\t\tcheck.LogInfo(\"Testing Network policy %q against pod %q\", networkPolicy.Name, put)", + "", + "\t\t\t// Skip any network policies that don't match the namespace of the pod we are testing.", + "\t\t\tif networkPolicy.Namespace != put.Namespace {", + "\t\t\t\tcheck.LogInfo(\"Skipping Network policy %q (namespace %q does not match Pod namespace %q)\", networkPolicy.Name, networkPolicy.Namespace, put.Namespace)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Match the pod namespace with the network policy namespace.", + "\t\t\tif policies.LabelsMatch(networkPolicy.Spec.PodSelector, put.Labels) {", + "\t\t\t\tvar reason string", + "\t\t\t\tif !denyAllEgressFound {", + "\t\t\t\t\tdenyAllEgressFound, reason = policies.IsNetworkPolicyCompliant(\u0026networkPolicy, networkingv1.PolicyTypeEgress)", + "\t\t\t\t\tif reason != \"\" {", + "\t\t\t\t\t\tcheck.LogError(\"Network policy %q is not compliant, reason=%q\", networkPolicy.Name, reason)", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tif !denyAllIngressFound {", + "\t\t\t\t\tdenyAllIngressFound, reason = policies.IsNetworkPolicyCompliant(\u0026networkPolicy, networkingv1.PolicyTypeIngress)", + "\t\t\t\t\tif reason != \"\" {", + "\t\t\t\t\t\tcheck.LogError(\"Network policy %q is not compliant, reason=%q\", networkPolicy.Name, reason)", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Network policy has not been found that contains a deny-all rule for both ingress and egress.", + "\t\tpodIsCompliant := true", + "\t\tif !denyAllIngressFound {", + "\t\t\tcheck.LogError(\"Pod %q was found to not have a default ingress deny-all network policy.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod was found to not have a default ingress deny-all network policy\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t}", + "", + "\t\tif !denyAllEgressFound {", + "\t\t\tcheck.LogError(\"Pod %q was found to not have a default egress deny-all network policy.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod was found to not have a default egress deny-all network policy\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t}", + "", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogInfo(\"Pod %q has a default ingress/egress deny-all network policy\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has a default ingress/egress deny-all network policy\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testOCPReservedPortsUsage", + "qualifiedName": "testOCPReservedPortsUsage", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOCPReservedPortsUsage Verifies pods do not listen on OpenShift reserved ports\n\nThe function builds a map of ports that OpenShift reserves, then calls a\nshared routine to scan all running pods for listeners on those ports. It\ncollects compliant and non‑compliant findings, passing them to the test\nframework through the check object’s result setter.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:315", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "TestReservedPortsUsage", + "kind": "function", + "source": [ + "func TestReservedPortsUsage(env *provider.TestEnvironment, reservedPorts map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tcompliantObjectsEntries, nonCompliantObjectsEntries := findRoguePodsListeningToPorts(env.Pods, reservedPorts, portsOrigin, logger)", + "\tcompliantObjects = append(compliantObjects, compliantObjectsEntries...)", + "\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantObjectsEntries...)", + "", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOCPReservedPortsUsage(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// List of all ports reserved by OpenShift", + "\tOCPReservedPorts := map[int32]bool{", + "\t\t22623: true,", + "\t\t22624: true}", + "\tcompliantObjects, nonCompliantObjects := netcommons.TestReservedPortsUsage(env, OCPReservedPorts, \"OCP\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPartnerSpecificTCPPorts", + "qualifiedName": "testPartnerSpecificTCPPorts", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPartnerSpecificTCPPorts Verifies that pods do not listen on partner‑reserved TCP ports\n\nThis routine defines a set of TCP ports reserved by the partner and checks\nall pods in the environment to ensure none are listening on those ports. It\ncalls a common test helper to identify compliant and non‑compliant objects,\nthen records the results in the provided check object.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:330", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "TestReservedPortsUsage", + "kind": "function", + "source": [ + "func TestReservedPortsUsage(env *provider.TestEnvironment, reservedPorts map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tcompliantObjectsEntries, nonCompliantObjectsEntries := findRoguePodsListeningToPorts(env.Pods, reservedPorts, portsOrigin, logger)", + "\tcompliantObjects = append(compliantObjects, compliantObjectsEntries...)", + "\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantObjectsEntries...)", + "", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPartnerSpecificTCPPorts(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// List of all of the ports reserved by partner", + "\tReservedPorts := map[int32]bool{", + "\t\t15443: true,", + "\t\t15090: true,", + "\t\t15021: true,", + "\t\t15020: true,", + "\t\t15014: true,", + "\t\t15008: true,", + "\t\t15006: true,", + "\t\t15001: true,", + "\t\t15000: true,", + "\t}", + "\tcompliantObjects, nonCompliantObjects := netcommons.TestReservedPortsUsage(env, ReservedPorts, \"Partner\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testRestartOnRebootLabelOnPodsUsingSriov", + "qualifiedName": "testRestartOnRebootLabelOnPodsUsingSriov", + "exported": false, + "signature": "func(*checksdb.Check, []*provider.Pod)()", + "doc": "testRestartOnRebootLabelOnPodsUsingSriov Verifies SRIOV pods have a restart-on-reboot label set to true\n\nThe function iterates over each pod that uses SRIOV, checking for the\npresence of the restart‑on‑reboot label. If the label is missing or its\nvalue is not \"true\", it records a non‑compliant report object and logs an\nerror; otherwise it records a compliant object and logs success. After\nprocessing all pods, it sets the check result with the lists of compliant and\nnon‑compliant objects.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:464", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "GetLabels", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testRestartOnRebootLabelOnPodsUsingSriov(check *checksdb.Check, sriovPods []*provider.Pod) {", + "\tconst (", + "\t\trestartOnRebootLabel = \"restart-on-reboot\"", + "\t)", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, pod := range sriovPods {", + "\t\tcheck.LogInfo(\"Testing SRIOV Pod %q\", pod)", + "", + "\t\tlabelValue, exist := pod.GetLabels()[restartOnRebootLabel]", + "\t\tif !exist {", + "\t\t\tcheck.LogError(\"Pod %q uses SRIOV but the label %q was not found.\", pod, restartOnRebootLabel)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf(\"Pod uses SRIOV but the label %s was not found\", restartOnRebootLabel), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif labelValue != \"true\" {", + "\t\t\tcheck.LogError(\"Pod %q uses SRIOV but the %q label value is not true.\", pod, restartOnRebootLabel)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf(\"Pod uses SRIOV but the label %s is not set to true\", restartOnRebootLabel), false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Pod %q uses SRIOV and the %q label is set to true\", pod, restartOnRebootLabel)", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, fmt.Sprintf(\"Pod uses SRIOV and the label %s is set to true\", restartOnRebootLabel), true))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testUndeclaredContainerPortsUsage", + "qualifiedName": "testUndeclaredContainerPortsUsage", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testUndeclaredContainerPortsUsage Verifies that every port a pod’s containers actually listen on is declared in the container specification\n\nThe function iterates over all pods, collecting the ports defined in each\ncontainer’s spec and then retrieving the actual listening ports via a\nsystem call. It compares these two sets, ignoring Istio proxy reserved ports,\nand records any mismatches as non‑compliant objects. Finally, it reports\ncompliant or non‑compliant results for each pod based on whether all\nlistening ports were properly declared.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:222", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil", + "name": "GetListeningPorts", + "kind": "function", + "source": [ + "func GetListeningPorts(cut *provider.Container) (map[PortInfo]bool, error) {", + "\toutStr, errStr, err := crclient.ExecCommandContainerNSEnter(getListeningPortsCmd, cut)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"failed to execute command %s on %s, err: %v\", getListeningPortsCmd, cut, err)", + "\t}", + "", + "\treturn parseListeningPorts(outStr)", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "ContainsIstioProxy", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "int", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "int", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.NetworkingTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.NetworkingTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\t// Default interface ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv4 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv4ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv4, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Default interface ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.DEFAULT, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Multus interfaces ICMP IPv6 test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestICMPv6ConnectivityMultusIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkConnectivity(\u0026env, netcommons.IPv6, netcommons.MULTUS, c)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Undeclared container ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUndeclaredContainerPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUndeclaredContainerPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// OCP reserved ports usage test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env), testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPReservedPortsUsage(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Dual stack services test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceDualStackIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoServicesUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestDualStackServices(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Network policy deny all test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNetworkPolicyDenyAllIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNetworkPolicyDenyAll(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Extended partner ports test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestReservedExtendedPartnerPorts)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env), testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPartnerSpecificTCPPorts(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// DPDK CPU pinning exec probe test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tdpdkPods := env.GetCPUPinningPodsWithDpdk()", + "\t\t\ttestExecProbDenyAtCPUPinning(c, dpdkPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// Restart on reboot label test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestRestartOnRebootLabelOnPodsUsingSriov(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\t// SRIOV MTU test case", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(", + "\t\tidentifiers.TestNetworkAttachmentDefinitionSRIOVUsingMTU)).", + "\t\tWithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\tsriovPods, err := env.GetPodsUsingSRIOV()", + "\t\t\tif err != nil {", + "\t\t\t\treturn fmt.Errorf(\"failure getting pods using SRIOV: %v\", err)", + "\t\t\t}", + "\t\t\ttestNetworkAttachmentDefinitionSRIOVUsingMTU(c, sriovPods)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tvar portInfo netutil.PortInfo", + "\tfor _, put := range env.Pods {", + "\t\t// First get the ports declared in the Pod's containers spec", + "\t\tdeclaredPorts := make(map[netutil.PortInfo]bool)", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t\tfor _, port := range cut.Ports {", + "\t\t\t\tportInfo.PortNumber = port.ContainerPort", + "\t\t\t\tportInfo.Protocol = string(port.Protocol)", + "\t\t\t\tdeclaredPorts[portInfo] = true", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Then check the actual ports that the containers are listening on", + "\t\tfirstPodContainer := put.Containers[0]", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(firstPodContainer)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get container %q listening ports, err: %v\", firstPodContainer, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf(\"Failed to get the container's listening ports, err: %v\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(listeningPorts) == 0 {", + "\t\t\tcheck.LogInfo(\"None of the containers of %q have any listening port.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"None of the containers have any listening ports\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Verify that all the listening ports have been declared in the container spec", + "\t\tfailedPod := false", + "\t\tfor listeningPort := range listeningPorts {", + "\t\t\tif put.ContainsIstioProxy() \u0026\u0026 netcommons.ReservedIstioPorts[listeningPort.PortNumber] {", + "\t\t\t\tcheck.LogInfo(\"%q is listening on port %d protocol %q, but the pod also contains istio-proxy. Ignoring.\",", + "\t\t\t\t\tput, listeningPort.PortNumber, listeningPort.Protocol)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tif ok := declaredPorts[listeningPort]; !ok {", + "\t\t\t\tcheck.LogError(\"%q is listening on port %d protocol %q, but that port was not declared in any container spec.\",", + "\t\t\t\t\tput, listeningPort.PortNumber, listeningPort.Protocol)", + "\t\t\t\tfailedPod = true", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\"Listening port was declared in no container spec\", false).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(listeningPort.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, listeningPort.Protocol))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"%q is listening on declared port %d protocol %q\", put, listeningPort.PortNumber, listeningPort.Protocol)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\"Listening port was declared in container spec\", true).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(listeningPort.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, listeningPort.Protocol))", + "\t\t\t}", + "\t\t}", + "\t\tif failedPod {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"At least one port was listening but not declared in any container specs\", false))", + "\t\t} else {", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"All listening were declared in containers specs\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "globals": [ + { + "name": "beforeEachFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:51" + }, + { + "name": "env", + "exported": false, + "type": "provider.TestEnvironment", + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:49" + } + ], + "consts": [ + { + "name": "defaultNumPings", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:38" + }, + { + "name": "nodePort", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/networking/suite.go:39" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp", + "name": "icmp", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "regexp", + "strconv" + ], + "structs": [ + { + "name": "PingResults", + "exported": true, + "doc": "PingResults represents the outcome of a ping test\n\nThe structure holds counts for transmitted, received, and error packets along\nwith an outcome code that indicates success, failure, or error. It is used by\nparsing functions to convert raw command output into structured data, and it\nprovides a string representation summarizing these metrics.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/icmp/icmp.go:45", + "fields": { + "errors": "int", + "outcome": "int", + "received": "int", + "transmitted": "int" + }, + "methodNames": [ + "String" + ], + "source": [ + "type PingResults struct {", + "\toutcome int", + "\ttransmitted int", + "\treceived int", + "\terrors int", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "BuildNetTestContext", + "qualifiedName": "BuildNetTestContext", + "exported": true, + "signature": "func([]*provider.Pod, netcommons.IPVersion, netcommons.IFType, *log.Logger)(map[string]netcommons.NetTestContext)", + "doc": "BuildNetTestContext Creates a map of network test contexts for pods\n\nThe function iterates over provided pods, filtering out those excluded from\ntests. For each pod it collects IP addresses based on the requested interface\n, selects one container to represent the namespace, and builds a context that\ndesignates a tester source and destination targets. The resulting map is\nkeyed by network identifiers and returned for use in connectivity checks.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/icmp/icmp.go:69", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "processContainerIpsPerNet", + "kind": "function", + "source": [ + "func processContainerIpsPerNet(containerID *provider.Container,", + "\tnetKey string,", + "\tipAddresses []string,", + "\tifName string,", + "\tnetsUnderTest map[string]netcommons.NetTestContext,", + "\taIPVersion netcommons.IPVersion,", + "\tlogger *log.Logger) {", + "\tipAddressesFiltered := netcommons.FilterIPListByIPVersion(ipAddresses, aIPVersion)", + "\tif len(ipAddressesFiltered) == 0 {", + "\t\t// if no multus addresses found, skip this container", + "\t\tlogger.Debug(\"Skipping %q, Network %q because no multus IPs are present\", containerID, netKey)", + "\t\treturn", + "\t}", + "\t// Create an entry at \"key\" if it is not present", + "\tif _, ok := netsUnderTest[netKey]; !ok {", + "\t\tnetsUnderTest[netKey] = netcommons.NetTestContext{}", + "\t}", + "\t// get a copy of the content", + "\tentry := netsUnderTest[netKey]", + "\t// Then modify the copy", + "\tfirstIPIndex := 0", + "\tif entry.TesterSource.ContainerIdentifier == nil {", + "\t\tlogger.Debug(\"%q selected to initiate ping tests\", containerID)", + "\t\tentry.TesterSource.ContainerIdentifier = containerID", + "\t\t// if multiple interfaces are present for this network on this container/pod, pick the first one as the tester source ip", + "\t\tentry.TesterSource.IP = ipAddressesFiltered[firstIPIndex]", + "\t\tif ifName != \"\" {", + "\t\t\tentry.TesterSource.InterfaceName = ifName", + "\t\t}", + "\t\t// do no include tester's IP in the list of destination IPs to ping", + "\t\tfirstIPIndex++", + "\t}", + "", + "\tfor _, aIP := range ipAddressesFiltered[firstIPIndex:] {", + "\t\tipDestEntry := netcommons.ContainerIP{}", + "\t\tipDestEntry.ContainerIdentifier = containerID", + "\t\tipDestEntry.IP = aIP", + "\t\t// if the interface name is not empty, then add it to the destination entry", + "\t\tif ifName != \"\" {", + "\t\t\tipDestEntry.InterfaceName = ifName", + "\t\t}", + "\t\tentry.DestTargets = append(entry.DestTargets, ipDestEntry)", + "\t}", + "", + "\t// Then reassign map entry", + "\tnetsUnderTest[netKey] = entry", + "}" + ] + }, + { + "name": "processContainerIpsPerNet", + "kind": "function", + "source": [ + "func processContainerIpsPerNet(containerID *provider.Container,", + "\tnetKey string,", + "\tipAddresses []string,", + "\tifName string,", + "\tnetsUnderTest map[string]netcommons.NetTestContext,", + "\taIPVersion netcommons.IPVersion,", + "\tlogger *log.Logger) {", + "\tipAddressesFiltered := netcommons.FilterIPListByIPVersion(ipAddresses, aIPVersion)", + "\tif len(ipAddressesFiltered) == 0 {", + "\t\t// if no multus addresses found, skip this container", + "\t\tlogger.Debug(\"Skipping %q, Network %q because no multus IPs are present\", containerID, netKey)", + "\t\treturn", + "\t}", + "\t// Create an entry at \"key\" if it is not present", + "\tif _, ok := netsUnderTest[netKey]; !ok {", + "\t\tnetsUnderTest[netKey] = netcommons.NetTestContext{}", + "\t}", + "\t// get a copy of the content", + "\tentry := netsUnderTest[netKey]", + "\t// Then modify the copy", + "\tfirstIPIndex := 0", + "\tif entry.TesterSource.ContainerIdentifier == nil {", + "\t\tlogger.Debug(\"%q selected to initiate ping tests\", containerID)", + "\t\tentry.TesterSource.ContainerIdentifier = containerID", + "\t\t// if multiple interfaces are present for this network on this container/pod, pick the first one as the tester source ip", + "\t\tentry.TesterSource.IP = ipAddressesFiltered[firstIPIndex]", + "\t\tif ifName != \"\" {", + "\t\t\tentry.TesterSource.InterfaceName = ifName", + "\t\t}", + "\t\t// do no include tester's IP in the list of destination IPs to ping", + "\t\tfirstIPIndex++", + "\t}", + "", + "\tfor _, aIP := range ipAddressesFiltered[firstIPIndex:] {", + "\t\tipDestEntry := netcommons.ContainerIP{}", + "\t\tipDestEntry.ContainerIdentifier = containerID", + "\t\tipDestEntry.IP = aIP", + "\t\t// if the interface name is not empty, then add it to the destination entry", + "\t\tif ifName != \"\" {", + "\t\t\tipDestEntry.InterfaceName = ifName", + "\t\t}", + "\t\tentry.DestTargets = append(entry.DestTargets, ipDestEntry)", + "\t}", + "", + "\t// Then reassign map entry", + "\tnetsUnderTest[netKey] = entry", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "PodIPsToStringList", + "kind": "function", + "source": [ + "func PodIPsToStringList(ips []corev1.PodIP) (ipList []string) {", + "\tfor _, ip := range ips {", + "\t\tipList = append(ipList, ip.IP)", + "\t}", + "\treturn ipList", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testNetworkConnectivity", + "kind": "function", + "source": [ + "func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommons.IPVersion, aType netcommons.IFType, check *checksdb.Check) {", + "\tnetsUnderTest := icmp.BuildNetTestContext(env.Pods, aIPVersion, aType, check.GetLogger())", + "\treport, skip := icmp.RunNetworkingTests(netsUnderTest, defaultNumPings, aIPVersion, check.GetLogger())", + "\tif skip {", + "\t\tcheck.LogInfo(\"There are no %q networks to test with at least 2 pods, skipping test\", aIPVersion)", + "\t}", + "\tcheck.SetResult(report.CompliantObjectsOut, report.NonCompliantObjectsOut)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func BuildNetTestContext(pods []*provider.Pod, aIPVersion netcommons.IPVersion, aType netcommons.IFType, logger *log.Logger) (netsUnderTest map[string]netcommons.NetTestContext) {", + "\tnetsUnderTest = make(map[string]netcommons.NetTestContext)", + "\tfor _, put := range pods {", + "\t\tlogger.Info(\"Testing Pod %q\", put)", + "\t\tif put.SkipNetTests {", + "\t\t\tlogger.Info(\"Skipping %q because it is excluded from all connectivity tests\", put)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif aType == netcommons.MULTUS {", + "\t\t\tif put.SkipMultusNetTests {", + "\t\t\t\tlogger.Info(\"Skipping pod %q because it is excluded from %q connectivity tests only\", put.Name, aType)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tfor netKey, multusNetworkInterface := range put.MultusNetworkInterfaces {", + "\t\t\t\t// The first container is used to get the network namespace", + "\t\t\t\tprocessContainerIpsPerNet(put.Containers[0], netKey, multusNetworkInterface.IPs, multusNetworkInterface.Interface, netsUnderTest, aIPVersion, logger)", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tconst defaultNetKey = \"default\"", + "\t\tdefaultIPAddress := put.Status.PodIPs", + "\t\t// The first container is used to get the network namespace", + "\t\tprocessContainerIpsPerNet(put.Containers[0], defaultNetKey, netcommons.PodIPsToStringList(defaultIPAddress), \"\", netsUnderTest, aIPVersion, logger)", + "\t}", + "\treturn netsUnderTest", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "PingResults.String", + "exported": true, + "receiver": "PingResults", + "signature": "func()(string)", + "doc": "PingResults.String Provides a formatted string representation of ping results\n\nThe method formats the outcome, transmitted count, received count, and error\ncount into a readable string. It converts the numeric result code to a\nhuman‑readable word using a helper function before embedding all values in\nthe output.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/icmp/icmp.go:58", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "ResultToString", + "kind": "function", + "source": [ + "func ResultToString(result int) (str string) {", + "\tswitch result {", + "\tcase SUCCESS:", + "\t\treturn \"SUCCESS\"", + "\tcase FAILURE:", + "\t\treturn \"FAILURE\"", + "\tcase ERROR:", + "\t\treturn \"ERROR\"", + "\t}", + "\treturn \"\"", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (results PingResults) String() string {", + "\treturn fmt.Sprintf(\"outcome: %s transmitted: %d received: %d errors: %d\", testhelper.ResultToString(results.outcome), results.transmitted, results.received, results.errors)", + "}" + ] + }, + { + "name": "RunNetworkingTests", + "qualifiedName": "RunNetworkingTests", + "exported": true, + "signature": "func(map[string]netcommons.NetTestContext, int, netcommons.IPVersion, *log.Logger)(testhelper.FailureReasonOut, bool)", + "doc": "RunNetworkingTests Executes ICMP ping tests across multiple network attachments\n\nThe function receives a map of networking contexts, a ping count, IP version,\nand logger. It iterates over each network, performing pings from a source\ncontainer to all destination containers, recording successes and failures in\nreport objects. If no networks or destinations are available, the test is\nskipped.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/icmp/icmp.go:160", + "calls": [ + { + "name": "Debug", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "PrintNetTestContextMap", + "kind": "function", + "source": [ + "func PrintNetTestContextMap(netsUnderTest map[string]NetTestContext) string {", + "\tvar sb strings.Builder", + "\tif len(netsUnderTest) == 0 {", + "\t\tsb.WriteString(\"No networks to test.\\n\")", + "\t}", + "\tfor netName, netUnderTest := range netsUnderTest {", + "\t\tsb.WriteString(fmt.Sprintf(\"***Test for Network attachment: %s\\n\", netName))", + "\t\tsb.WriteString(fmt.Sprintf(\"%s\\n\", netUnderTest.String()))", + "\t}", + "\treturn sb.String()", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "TestPing", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testNetworkConnectivity", + "kind": "function", + "source": [ + "func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommons.IPVersion, aType netcommons.IFType, check *checksdb.Check) {", + "\tnetsUnderTest := icmp.BuildNetTestContext(env.Pods, aIPVersion, aType, check.GetLogger())", + "\treport, skip := icmp.RunNetworkingTests(netsUnderTest, defaultNumPings, aIPVersion, check.GetLogger())", + "\tif skip {", + "\t\tcheck.LogInfo(\"There are no %q networks to test with at least 2 pods, skipping test\", aIPVersion)", + "\t}", + "\tcheck.SetResult(report.CompliantObjectsOut, report.NonCompliantObjectsOut)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func RunNetworkingTests( //nolint:funlen", + "\tnetsUnderTest map[string]netcommons.NetTestContext,", + "\tcount int,", + "\taIPVersion netcommons.IPVersion,", + "\tlogger *log.Logger) (report testhelper.FailureReasonOut, skip bool) {", + "\tlogger.Debug(\"%s\", netcommons.PrintNetTestContextMap(netsUnderTest))", + "\tskip = false", + "\tif len(netsUnderTest) == 0 {", + "\t\tlogger.Debug(\"There are no %q networks to test, skipping test\", aIPVersion)", + "\t\tskip = true", + "\t\treturn report, skip", + "\t}", + "\t// if no network can be tested, then we need to skip the test entirely.", + "\t// If at least one network can be tested (e.g. \u003e 2 IPs/ interfaces present), then we do not skip the test", + "\tatLeastOneNetworkTested := false", + "\tcompliantNets := map[string]int{}", + "\tnonCompliantNets := map[string]int{}", + "\tfor netName, netUnderTest := range netsUnderTest {", + "\t\tcompliantNets[netName] = 0", + "\t\tnonCompliantNets[netName] = 0", + "\t\tif len(netUnderTest.DestTargets) == 0 {", + "\t\t\tlogger.Debug(\"There are no containers to ping for %q network %q. A minimum of 2 containers is needed to run a ping test (a source and a destination) Skipping test\", aIPVersion, netName)", + "\t\t\tcontinue", + "\t\t}", + "\t\tatLeastOneNetworkTested = true", + "\t\tlogger.Debug(\"%q Ping tests on network %q. Number of target IPs: %d\", aIPVersion, netName, len(netUnderTest.DestTargets))", + "", + "\t\tfor _, aDestIP := range netUnderTest.DestTargets {", + "\t\t\tlogger.Debug(\"%q ping test on network %q from ( %q srcip: %q ) to ( %q dstip: %q )\",", + "\t\t\t\taIPVersion, netName,", + "\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP,", + "\t\t\t\taDestIP.ContainerIdentifier, aDestIP.IP)", + "\t\t\tresult, err := TestPing(netUnderTest.TesterSource.ContainerIdentifier, aDestIP, count)", + "\t\t\tlogger.Debug(\"Ping results: %q\", result)", + "\t\t\tlogger.Info(\"%q ping test on network %q from ( %q srcip: %q ) to ( %q dstip: %q ) result: %q\",", + "\t\t\t\taIPVersion, netName,", + "\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP,", + "\t\t\t\taDestIP.ContainerIdentifier, aDestIP.IP, result)", + "\t\t\tif err != nil {", + "\t\t\t\tlogger.Debug(\"Ping failed, err=%v\", err)", + "\t\t\t}", + "\t\t\tif result.outcome != testhelper.SUCCESS {", + "\t\t\t\tlogger.Error(\"Ping from %q (srcip: %q) to %q (dstip: %q) failed\",", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier,", + "\t\t\t\t\tnetUnderTest.TesterSource.IP,", + "\t\t\t\t\taDestIP.ContainerIdentifier,", + "\t\t\t\t\taDestIP.IP)", + "\t\t\t\tnonCompliantNets[netName]++", + "\t\t\t\tnonCompliantObject := testhelper.NewContainerReportObject(netUnderTest.TesterSource.ContainerIdentifier.Namespace,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Podname,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Name, \"Pinging destination container/IP from source container (identified by Namespace/Pod Name/Container Name) Failed\", false).", + "\t\t\t\t\tSetType(testhelper.ICMPResultType).", + "\t\t\t\t\tAddField(testhelper.NetworkName, netName).", + "\t\t\t\t\tAddField(testhelper.SourceIP, netUnderTest.TesterSource.IP).", + "\t\t\t\t\tAddField(testhelper.DestinationNamespace, aDestIP.ContainerIdentifier.Namespace).", + "\t\t\t\t\tAddField(testhelper.DestinationPodName, aDestIP.ContainerIdentifier.Podname).", + "\t\t\t\t\tAddField(testhelper.DestinationContainerName, aDestIP.ContainerIdentifier.Name).", + "\t\t\t\t\tAddField(testhelper.DestinationIP, aDestIP.IP)", + "\t\t\t\treport.NonCompliantObjectsOut = append(report.NonCompliantObjectsOut, nonCompliantObject)", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"Ping from %q (srcip: %q) to %q (dstip: %q) succeeded\",", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier,", + "\t\t\t\t\tnetUnderTest.TesterSource.IP,", + "\t\t\t\t\taDestIP.ContainerIdentifier,", + "\t\t\t\t\taDestIP.IP)", + "\t\t\t\tcompliantNets[netName]++", + "\t\t\t\tCompliantObject := testhelper.NewContainerReportObject(netUnderTest.TesterSource.ContainerIdentifier.Namespace,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Podname,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Name, \"Pinging destination container/IP from source container (identified by Namespace/Pod Name/Container Name) Succeeded\", true).", + "\t\t\t\t\tSetType(testhelper.ICMPResultType).", + "\t\t\t\t\tAddField(testhelper.NetworkName, netName).", + "\t\t\t\t\tAddField(testhelper.SourceIP, netUnderTest.TesterSource.IP).", + "\t\t\t\t\tAddField(testhelper.DestinationNamespace, aDestIP.ContainerIdentifier.Namespace).", + "\t\t\t\t\tAddField(testhelper.DestinationPodName, aDestIP.ContainerIdentifier.Podname).", + "\t\t\t\t\tAddField(testhelper.DestinationContainerName, aDestIP.ContainerIdentifier.Name).", + "\t\t\t\t\tAddField(testhelper.DestinationIP, aDestIP.IP)", + "\t\t\t\treport.CompliantObjectsOut = append(report.CompliantObjectsOut, CompliantObject)", + "\t\t\t}", + "\t\t}", + "\t\tif nonCompliantNets[netName] != 0 {", + "\t\t\tlogger.Error(\"ICMP tests failed for %d IP source/destination in this network\", nonCompliantNets[netName])", + "\t\t\treport.NonCompliantObjectsOut = append(report.NonCompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf(\"ICMP tests failed for %d IP source/destination in this network\", nonCompliantNets[netName]), testhelper.NetworkType, false).", + "\t\t\t\tAddField(testhelper.NetworkName, netName))", + "\t\t}", + "\t\tif compliantNets[netName] != 0 {", + "\t\t\tlogger.Info(\"ICMP tests were successful for all %d IP source/destination in this network\", compliantNets[netName])", + "\t\t\treport.CompliantObjectsOut = append(report.CompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf(\"ICMP tests were successful for all %d IP source/destination in this network\", compliantNets[netName]), testhelper.NetworkType, true).", + "\t\t\t\tAddField(testhelper.NetworkName, netName))", + "\t\t}", + "\t}", + "\tif !atLeastOneNetworkTested {", + "\t\tlogger.Debug(\"There are no %q networks to test, skipping test\", aIPVersion)", + "\t\tskip = true", + "\t}", + "", + "\treturn report, skip", + "}" + ] + }, + { + "name": "parsePingResult", + "qualifiedName": "parsePingResult", + "exported": false, + "signature": "func(string, string)(PingResults, error)", + "doc": "parsePingResult Parses ping command output to determine success, failure, or error\n\nThe function examines the standard output for patterns indicating invalid\narguments or successful execution. It extracts transmitted, received, and\nerror counts from the output using regular expressions and converts them to\nintegers. Based on these metrics, it sets an outcome flag and returns the\nresults along with any parsing errors.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/icmp/icmp.go:282", + "calls": [ + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "name": "FindStringSubmatch", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "name": "FindStringSubmatch", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func parsePingResult(stdout, stderr string) (results PingResults, err error) {", + "\tre := regexp.MustCompile(ConnectInvalidArgumentRegex)", + "\tmatched := re.FindStringSubmatch(stdout)", + "\t// If we find a error log we fail", + "\tif matched != nil {", + "\t\tresults.outcome = testhelper.ERROR", + "\t\treturn results, fmt.Errorf(\"ping failed with invalid arguments, stdout: %s, stderr: %s\", stdout, stderr)", + "\t}", + "\tre = regexp.MustCompile(SuccessfulOutputRegex)", + "\tmatched = re.FindStringSubmatch(stdout)", + "\t// If we do not find a successful log, we fail", + "\tif matched == nil {", + "\t\tresults.outcome = testhelper.FAILURE", + "\t\treturn results, fmt.Errorf(\"ping output did not match successful regex, stdout: %s, stderr: %s\", stdout, stderr)", + "\t}", + "\t// Ignore errors in converting matches to decimal integers.", + "\t// Regular expression `stat` is required to underwrite this assumption.", + "\tresults.transmitted, _ = strconv.Atoi(matched[1])", + "\tresults.received, _ = strconv.Atoi(matched[2])", + "\tresults.errors, _ = strconv.Atoi(matched[4])", + "\tswitch {", + "\tcase results.transmitted == 0 || results.errors \u003e 0:", + "\t\tresults.outcome = testhelper.ERROR", + "\tcase results.received \u003e 0 \u0026\u0026 (results.transmitted-results.received) \u003c= 1:", + "\t\tresults.outcome = testhelper.SUCCESS", + "\tdefault:", + "\t\tresults.outcome = testhelper.FAILURE", + "\t}", + "\treturn results, nil", + "}" + ] + }, + { + "name": "processContainerIpsPerNet", + "qualifiedName": "processContainerIpsPerNet", + "exported": false, + "signature": "func(*provider.Container, string, []string, string, map[string]netcommons.NetTestContext, netcommons.IPVersion, *log.Logger)()", + "doc": "processContainerIpsPerNet collects container IPs for a network to set up ping tests\n\nThis routine filters the supplied IP addresses by the desired IP version,\nthen records them in a shared map keyed by network name. The first IP found\nis designated as the test initiator and stored as the source of pings;\nsubsequent IPs become destination targets. If no suitable IPs exist, the\ncontainer is skipped and the function exits early.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/icmp/icmp.go:105", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "FilterIPListByIPVersion", + "kind": "function", + "source": [ + "func FilterIPListByIPVersion(ipList []string, aIPVersion IPVersion) []string {", + "\tvar filteredIPList []string", + "\tfor _, aIP := range ipList {", + "\t\tif ver, _ := GetIPVersion(aIP); aIPVersion == ver {", + "\t\t\tfilteredIPList = append(filteredIPList, aIP)", + "\t\t}", + "\t}", + "\treturn filteredIPList", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp", + "name": "BuildNetTestContext", + "kind": "function", + "source": [ + "func BuildNetTestContext(pods []*provider.Pod, aIPVersion netcommons.IPVersion, aType netcommons.IFType, logger *log.Logger) (netsUnderTest map[string]netcommons.NetTestContext) {", + "\tnetsUnderTest = make(map[string]netcommons.NetTestContext)", + "\tfor _, put := range pods {", + "\t\tlogger.Info(\"Testing Pod %q\", put)", + "\t\tif put.SkipNetTests {", + "\t\t\tlogger.Info(\"Skipping %q because it is excluded from all connectivity tests\", put)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif aType == netcommons.MULTUS {", + "\t\t\tif put.SkipMultusNetTests {", + "\t\t\t\tlogger.Info(\"Skipping pod %q because it is excluded from %q connectivity tests only\", put.Name, aType)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tfor netKey, multusNetworkInterface := range put.MultusNetworkInterfaces {", + "\t\t\t\t// The first container is used to get the network namespace", + "\t\t\t\tprocessContainerIpsPerNet(put.Containers[0], netKey, multusNetworkInterface.IPs, multusNetworkInterface.Interface, netsUnderTest, aIPVersion, logger)", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tconst defaultNetKey = \"default\"", + "\t\tdefaultIPAddress := put.Status.PodIPs", + "\t\t// The first container is used to get the network namespace", + "\t\tprocessContainerIpsPerNet(put.Containers[0], defaultNetKey, netcommons.PodIPsToStringList(defaultIPAddress), \"\", netsUnderTest, aIPVersion, logger)", + "\t}", + "\treturn netsUnderTest", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func processContainerIpsPerNet(containerID *provider.Container,", + "\tnetKey string,", + "\tipAddresses []string,", + "\tifName string,", + "\tnetsUnderTest map[string]netcommons.NetTestContext,", + "\taIPVersion netcommons.IPVersion,", + "\tlogger *log.Logger) {", + "\tipAddressesFiltered := netcommons.FilterIPListByIPVersion(ipAddresses, aIPVersion)", + "\tif len(ipAddressesFiltered) == 0 {", + "\t\t// if no multus addresses found, skip this container", + "\t\tlogger.Debug(\"Skipping %q, Network %q because no multus IPs are present\", containerID, netKey)", + "\t\treturn", + "\t}", + "\t// Create an entry at \"key\" if it is not present", + "\tif _, ok := netsUnderTest[netKey]; !ok {", + "\t\tnetsUnderTest[netKey] = netcommons.NetTestContext{}", + "\t}", + "\t// get a copy of the content", + "\tentry := netsUnderTest[netKey]", + "\t// Then modify the copy", + "\tfirstIPIndex := 0", + "\tif entry.TesterSource.ContainerIdentifier == nil {", + "\t\tlogger.Debug(\"%q selected to initiate ping tests\", containerID)", + "\t\tentry.TesterSource.ContainerIdentifier = containerID", + "\t\t// if multiple interfaces are present for this network on this container/pod, pick the first one as the tester source ip", + "\t\tentry.TesterSource.IP = ipAddressesFiltered[firstIPIndex]", + "\t\tif ifName != \"\" {", + "\t\t\tentry.TesterSource.InterfaceName = ifName", + "\t\t}", + "\t\t// do no include tester's IP in the list of destination IPs to ping", + "\t\tfirstIPIndex++", + "\t}", + "", + "\tfor _, aIP := range ipAddressesFiltered[firstIPIndex:] {", + "\t\tipDestEntry := netcommons.ContainerIP{}", + "\t\tipDestEntry.ContainerIdentifier = containerID", + "\t\tipDestEntry.IP = aIP", + "\t\t// if the interface name is not empty, then add it to the destination entry", + "\t\tif ifName != \"\" {", + "\t\t\tipDestEntry.InterfaceName = ifName", + "\t\t}", + "\t\tentry.DestTargets = append(entry.DestTargets, ipDestEntry)", + "\t}", + "", + "\t// Then reassign map entry", + "\tnetsUnderTest[netKey] = entry", + "}" + ] + } + ], + "globals": [ + { + "name": "TestPing", + "exported": true, + "type": "", + "doc": "TestPing Initiates a ping test between a source container and network (1 ip) and a destination container and network (1 ip)", + "position": "/Users/deliedit/dev/certsuite/tests/networking/icmp/icmp.go:259" + } + ], + "consts": [ + { + "name": "ConnectInvalidArgumentRegex", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/networking/icmp/icmp.go:33" + }, + { + "name": "SuccessfulOutputRegex", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/networking/icmp/icmp.go:36" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "netcommons", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil", + "k8s.io/api/core/v1", + "net", + "strconv", + "strings" + ], + "structs": [ + { + "name": "ContainerIP", + "exported": true, + "doc": "ContainerIP Formats a container's IP address with its identifier\n\nThis method returns a human‑readable representation that combines the\ncontainer’s IP address and a long form of its identifier. It concatenates\nthe two strings with parentheses to clearly separate the network address from\nthe container details. The output is useful for logging or debugging\nnetworking tests.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:94", + "fields": { + "ContainerIdentifier": "*provider.Container", + "IP": "string", + "InterfaceName": "string" + }, + "methodNames": [ + "String" + ], + "source": [ + "type ContainerIP struct {", + "\t// ip address of the target container", + "\tIP string", + "\t// targetContainerIdentifier container identifier including namespace, pod name, container name, node name, and container UID", + "\tContainerIdentifier *provider.Container", + "\t// interfaceName is the interface we want to target for the ping test", + "\tInterfaceName string", + "}" + ] + }, + { + "name": "NetTestContext", + "exported": true, + "doc": "NetTestContext Describes a network test setup for a subnet\n\nThis structure holds information about which container initiates ping tests\non a given network, the node it runs on, and the list of target containers to\nbe pinged. The tester source is chosen randomly from available containers. It\nprovides a string representation that lists the initiating container followed\nby all destination targets.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:78", + "fields": { + "DestTargets": "[]ContainerIP", + "TesterContainerNodeName": "string", + "TesterSource": "ContainerIP" + }, + "methodNames": [ + "String" + ], + "source": [ + "type NetTestContext struct {", + "\t// testerContainerNodeOc session context to access the node running the container selected to initiate tests", + "\tTesterContainerNodeName string", + "\t// testerSource is the container select to initiate the ping tests on this given network", + "\tTesterSource ContainerIP", + "\t// ipDestTargets List of containers to be pinged by the testerSource on this given network", + "\tDestTargets []ContainerIP", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "String", + "qualifiedName": "ContainerIP.String", + "exported": true, + "receiver": "ContainerIP", + "signature": "func()(string)", + "doc": "ContainerIP.String Formats the container IP address with its identifier\n\nThis method constructs a string that shows the IP address followed by the\nlong form of the container’s identifier in parentheses. It uses formatting\nutilities to combine the two pieces into a single readable representation,\nwhich is returned as a string.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:128", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "StringLong", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (cip *ContainerIP) String() string {", + "\treturn fmt.Sprintf(\"%s ( %s )\",", + "\t\tcip.IP,", + "\t\tcip.ContainerIdentifier.StringLong(),", + "\t)", + "}" + ] + }, + { + "name": "FilterIPListByIPVersion", + "qualifiedName": "FilterIPListByIPVersion", + "exported": true, + "signature": "func([]string, IPVersion)([]string)", + "doc": "FilterIPListByIPVersion Selects addresses matching a specified IP version\n\nThe function receives a slice of string IPs and an IP version to filter by.\nIt iterates over the list, determines each address’s version, and keeps\nonly those that match the requested type. The resulting slice contains only\nIPv4 or IPv6 addresses as requested.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:193", + "calls": [ + { + "name": "GetIPVersion", + "kind": "function", + "source": [ + "func GetIPVersion(aIP string) (IPVersion, error) {", + "\tip := net.ParseIP(aIP)", + "\tif ip == nil {", + "\t\treturn Undefined, fmt.Errorf(\"%s is Not an IPv4 or an IPv6\", aIP)", + "\t}", + "\tif ip.To4() != nil {", + "\t\treturn IPv4, nil", + "\t}", + "\treturn IPv6, nil", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp", + "name": "processContainerIpsPerNet", + "kind": "function", + "source": [ + "func processContainerIpsPerNet(containerID *provider.Container,", + "\tnetKey string,", + "\tipAddresses []string,", + "\tifName string,", + "\tnetsUnderTest map[string]netcommons.NetTestContext,", + "\taIPVersion netcommons.IPVersion,", + "\tlogger *log.Logger) {", + "\tipAddressesFiltered := netcommons.FilterIPListByIPVersion(ipAddresses, aIPVersion)", + "\tif len(ipAddressesFiltered) == 0 {", + "\t\t// if no multus addresses found, skip this container", + "\t\tlogger.Debug(\"Skipping %q, Network %q because no multus IPs are present\", containerID, netKey)", + "\t\treturn", + "\t}", + "\t// Create an entry at \"key\" if it is not present", + "\tif _, ok := netsUnderTest[netKey]; !ok {", + "\t\tnetsUnderTest[netKey] = netcommons.NetTestContext{}", + "\t}", + "\t// get a copy of the content", + "\tentry := netsUnderTest[netKey]", + "\t// Then modify the copy", + "\tfirstIPIndex := 0", + "\tif entry.TesterSource.ContainerIdentifier == nil {", + "\t\tlogger.Debug(\"%q selected to initiate ping tests\", containerID)", + "\t\tentry.TesterSource.ContainerIdentifier = containerID", + "\t\t// if multiple interfaces are present for this network on this container/pod, pick the first one as the tester source ip", + "\t\tentry.TesterSource.IP = ipAddressesFiltered[firstIPIndex]", + "\t\tif ifName != \"\" {", + "\t\t\tentry.TesterSource.InterfaceName = ifName", + "\t\t}", + "\t\t// do no include tester's IP in the list of destination IPs to ping", + "\t\tfirstIPIndex++", + "\t}", + "", + "\tfor _, aIP := range ipAddressesFiltered[firstIPIndex:] {", + "\t\tipDestEntry := netcommons.ContainerIP{}", + "\t\tipDestEntry.ContainerIdentifier = containerID", + "\t\tipDestEntry.IP = aIP", + "\t\t// if the interface name is not empty, then add it to the destination entry", + "\t\tif ifName != \"\" {", + "\t\t\tipDestEntry.InterfaceName = ifName", + "\t\t}", + "\t\tentry.DestTargets = append(entry.DestTargets, ipDestEntry)", + "\t}", + "", + "\t// Then reassign map entry", + "\tnetsUnderTest[netKey] = entry", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func FilterIPListByIPVersion(ipList []string, aIPVersion IPVersion) []string {", + "\tvar filteredIPList []string", + "\tfor _, aIP := range ipList {", + "\t\tif ver, _ := GetIPVersion(aIP); aIPVersion == ver {", + "\t\t\tfilteredIPList = append(filteredIPList, aIP)", + "\t\t}", + "\t}", + "\treturn filteredIPList", + "}" + ] + }, + { + "name": "GetIPVersion", + "qualifiedName": "GetIPVersion", + "exported": true, + "signature": "func(string)(IPVersion, error)", + "doc": "GetIPVersion determines whether a string represents an IPv4 or IPv6 address\n\nThe function attempts to parse the input as an IP address using the standard\nlibrary. If parsing fails, it reports that the string is not a valid IP. It\nthen distinguishes between IPv4 and IPv6 by checking if the parsed address\ncan be converted to a four‑byte form; the result is returned along with any\nerror.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:176", + "calls": [ + { + "pkgPath": "net", + "name": "ParseIP", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "To4", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "FilterIPListByIPVersion", + "kind": "function", + "source": [ + "func FilterIPListByIPVersion(ipList []string, aIPVersion IPVersion) []string {", + "\tvar filteredIPList []string", + "\tfor _, aIP := range ipList {", + "\t\tif ver, _ := GetIPVersion(aIP); aIPVersion == ver {", + "\t\t\tfilteredIPList = append(filteredIPList, aIP)", + "\t\t}", + "\t}", + "\treturn filteredIPList", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/services", + "name": "GetServiceIPVersion", + "kind": "function", + "source": [ + "func GetServiceIPVersion(aService *corev1.Service) (result netcommons.IPVersion, err error) {", + "\tipver, err := netcommons.GetIPVersion(aService.Spec.ClusterIP)", + "\tif err != nil {", + "\t\terr = fmt.Errorf(\"%s cannot get aService clusterIP version\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "\tif aService.Spec.IPFamilyPolicy == nil {", + "\t\terr = fmt.Errorf(\"%s does not have a IPFamilyPolicy configured\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "\tif *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack \u0026\u0026", + "\t\tipver == netcommons.IPv6 {", + "\t\tlog.Debug(\"%s is single stack ipv6\", ToString(aService))", + "\t\treturn netcommons.IPv6, nil", + "\t}", + "\tif *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack \u0026\u0026", + "\t\tipver == netcommons.IPv4 {", + "\t\tlog.Debug(\"%s is single stack ipv4\", ToString(aService))", + "\t\treturn netcommons.IPv4, nil", + "\t}", + "\tif (*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyPreferDualStack ||", + "\t\t*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyRequireDualStack) \u0026\u0026", + "\t\tlen(aService.Spec.ClusterIPs) \u003c 2 {", + "\t\terr = fmt.Errorf(\"%s is dual stack but has only zero or one ClusterIPs\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "", + "\tres, err := isClusterIPsDualStack(aService.Spec.ClusterIPs)", + "\tif err != nil {", + "\t\terr = fmt.Errorf(\"%s, err:%s\", ToString(aService), err)", + "\t\treturn result, err", + "\t}", + "\tif res {", + "\t\tlog.Debug(\"%s is dual-stack\", ToString(aService))", + "\t\treturn netcommons.IPv4v6, nil", + "\t}", + "", + "\terr = fmt.Errorf(\"%s is not compliant, it is not single stack ipv6 or dual stack\", ToString(aService))", + "\treturn result, err", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/services", + "name": "isClusterIPsDualStack", + "kind": "function", + "source": [ + "func isClusterIPsDualStack(ips []string) (result bool, err error) {", + "\tvar hasIPv4, hasIPv6 bool", + "\tfor _, ip := range ips {", + "\t\tipver, err := netcommons.GetIPVersion(ip)", + "\t\tif err != nil {", + "\t\t\treturn result, fmt.Errorf(\"cannot get aService ClusterIPs (%s) version - err: %v\", ip, err)", + "\t\t}", + "\t\tswitch ipver {", + "\t\tcase netcommons.IPv4:", + "\t\t\thasIPv4 = true", + "\t\tcase netcommons.IPv6:", + "\t\t\thasIPv6 = true", + "\t\tcase netcommons.IPv4v6, netcommons.Undefined:", + "\t\t}", + "\t}", + "\tif hasIPv4 \u0026\u0026 hasIPv6 {", + "\t\treturn true, nil", + "\t}", + "\treturn false, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetIPVersion(aIP string) (IPVersion, error) {", + "\tip := net.ParseIP(aIP)", + "\tif ip == nil {", + "\t\treturn Undefined, fmt.Errorf(\"%s is Not an IPv4 or an IPv6\", aIP)", + "\t}", + "\tif ip.To4() != nil {", + "\t\treturn IPv4, nil", + "\t}", + "\treturn IPv6, nil", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "IPVersion.String", + "exported": true, + "receiver": "IPVersion", + "signature": "func()(string)", + "doc": "IPVersion.String Returns the textual form of an IP version\n\nThe method examines the value of the receiver and maps each predefined\nconstant to its corresponding string. It covers IPv4, IPv6, combined\nIPv4/IPv6, and an undefined case. If none match, it defaults to the undefined\nstring.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:57", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (version IPVersion) String() string {", + "\tswitch version {", + "\tcase IPv4:", + "\t\treturn IPv4String", + "\tcase IPv6:", + "\t\treturn IPv6String", + "\tcase IPv4v6:", + "\t\treturn IPv4v6String", + "\tcase Undefined:", + "\t\treturn UndefinedString", + "\t}", + "\treturn UndefinedString", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "NetTestContext.String", + "exported": true, + "receiver": "NetTestContext", + "signature": "func()(string)", + "doc": "NetTestContext.String Formats the network test context for display\n\nThis method builds a multi-line string describing the container that\ninitiates the tests and each target container it will communicate with. It\nfirst writes the source container, then lists all destination containers or\nindicates when none are present. The resulting string is returned for logging\nor debugging purposes.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:110", + "calls": [ + { + "name": "WriteString", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "NetTestContext.String", + "kind": "function", + "source": [ + "func (testContext *NetTestContext) String() string {", + "\tvar sb strings.Builder", + "\tsb.WriteString(fmt.Sprintf(\"From initiating container: %s\\n\", testContext.TesterSource.String()))", + "\tif len(testContext.DestTargets) == 0 {", + "\t\tsb.WriteString(\"--\u003e No target containers to test for this network\")", + "\t}", + "\tfor _, target := range testContext.DestTargets {", + "\t\tsb.WriteString(fmt.Sprintf(\"--\u003e To target container: %s\\n\", target.String()))", + "\t}", + "\treturn sb.String()", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "WriteString", + "kind": "function" + }, + { + "name": "WriteString", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "NetTestContext.String", + "kind": "function", + "source": [ + "func (testContext *NetTestContext) String() string {", + "\tvar sb strings.Builder", + "\tsb.WriteString(fmt.Sprintf(\"From initiating container: %s\\n\", testContext.TesterSource.String()))", + "\tif len(testContext.DestTargets) == 0 {", + "\t\tsb.WriteString(\"--\u003e No target containers to test for this network\")", + "\t}", + "\tfor _, target := range testContext.DestTargets {", + "\t\tsb.WriteString(fmt.Sprintf(\"--\u003e To target container: %s\\n\", target.String()))", + "\t}", + "\treturn sb.String()", + "}" + ] + }, + { + "name": "NetTestContext.String", + "kind": "function", + "source": [ + "func (testContext *NetTestContext) String() string {", + "\tvar sb strings.Builder", + "\tsb.WriteString(fmt.Sprintf(\"From initiating container: %s\\n\", testContext.TesterSource.String()))", + "\tif len(testContext.DestTargets) == 0 {", + "\t\tsb.WriteString(\"--\u003e No target containers to test for this network\")", + "\t}", + "\tfor _, target := range testContext.DestTargets {", + "\t\tsb.WriteString(fmt.Sprintf(\"--\u003e To target container: %s\\n\", target.String()))", + "\t}", + "\treturn sb.String()", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "NetTestContext.String", + "kind": "function", + "source": [ + "func (testContext *NetTestContext) String() string {", + "\tvar sb strings.Builder", + "\tsb.WriteString(fmt.Sprintf(\"From initiating container: %s\\n\", testContext.TesterSource.String()))", + "\tif len(testContext.DestTargets) == 0 {", + "\t\tsb.WriteString(\"--\u003e No target containers to test for this network\")", + "\t}", + "\tfor _, target := range testContext.DestTargets {", + "\t\tsb.WriteString(fmt.Sprintf(\"--\u003e To target container: %s\\n\", target.String()))", + "\t}", + "\treturn sb.String()", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "PrintNetTestContextMap", + "kind": "function", + "source": [ + "func PrintNetTestContextMap(netsUnderTest map[string]NetTestContext) string {", + "\tvar sb strings.Builder", + "\tif len(netsUnderTest) == 0 {", + "\t\tsb.WriteString(\"No networks to test.\\n\")", + "\t}", + "\tfor netName, netUnderTest := range netsUnderTest {", + "\t\tsb.WriteString(fmt.Sprintf(\"***Test for Network attachment: %s\\n\", netName))", + "\t\tsb.WriteString(fmt.Sprintf(\"%s\\n\", netUnderTest.String()))", + "\t}", + "\treturn sb.String()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (testContext *NetTestContext) String() string {", + "\tvar sb strings.Builder", + "\tsb.WriteString(fmt.Sprintf(\"From initiating container: %s\\n\", testContext.TesterSource.String()))", + "\tif len(testContext.DestTargets) == 0 {", + "\t\tsb.WriteString(\"--\u003e No target containers to test for this network\")", + "\t}", + "\tfor _, target := range testContext.DestTargets {", + "\t\tsb.WriteString(fmt.Sprintf(\"--\u003e To target container: %s\\n\", target.String()))", + "\t}", + "\treturn sb.String()", + "}" + ] + }, + { + "name": "PodIPsToStringList", + "qualifiedName": "PodIPsToStringList", + "exported": true, + "signature": "func([]corev1.PodIP)([]string)", + "doc": "PodIPsToStringList Transforms a slice of PodIP structures into plain IP address strings\n\nThe function iterates over each corev1.PodIP element, extracts the IP string\nfield, and appends it to a new slice. It returns this list of string\naddresses for use elsewhere in the package. The operation is linear in the\nnumber of input items and requires no additional dependencies beyond standard\nGo append.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:162", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp", + "name": "BuildNetTestContext", + "kind": "function", + "source": [ + "func BuildNetTestContext(pods []*provider.Pod, aIPVersion netcommons.IPVersion, aType netcommons.IFType, logger *log.Logger) (netsUnderTest map[string]netcommons.NetTestContext) {", + "\tnetsUnderTest = make(map[string]netcommons.NetTestContext)", + "\tfor _, put := range pods {", + "\t\tlogger.Info(\"Testing Pod %q\", put)", + "\t\tif put.SkipNetTests {", + "\t\t\tlogger.Info(\"Skipping %q because it is excluded from all connectivity tests\", put)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif aType == netcommons.MULTUS {", + "\t\t\tif put.SkipMultusNetTests {", + "\t\t\t\tlogger.Info(\"Skipping pod %q because it is excluded from %q connectivity tests only\", put.Name, aType)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tfor netKey, multusNetworkInterface := range put.MultusNetworkInterfaces {", + "\t\t\t\t// The first container is used to get the network namespace", + "\t\t\t\tprocessContainerIpsPerNet(put.Containers[0], netKey, multusNetworkInterface.IPs, multusNetworkInterface.Interface, netsUnderTest, aIPVersion, logger)", + "\t\t\t}", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tconst defaultNetKey = \"default\"", + "\t\tdefaultIPAddress := put.Status.PodIPs", + "\t\t// The first container is used to get the network namespace", + "\t\tprocessContainerIpsPerNet(put.Containers[0], defaultNetKey, netcommons.PodIPsToStringList(defaultIPAddress), \"\", netsUnderTest, aIPVersion, logger)", + "\t}", + "\treturn netsUnderTest", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PodIPsToStringList(ips []corev1.PodIP) (ipList []string) {", + "\tfor _, ip := range ips {", + "\t\tipList = append(ipList, ip.IP)", + "\t}", + "\treturn ipList", + "}" + ] + }, + { + "name": "PrintNetTestContextMap", + "qualifiedName": "PrintNetTestContextMap", + "exported": true, + "signature": "func(map[string]NetTestContext)(string)", + "doc": "PrintNetTestContextMap Formats a map of network test contexts into a readable string\n\nThis function iterates over a mapping from network names to NetTestContext\nobjects, building a multi-line string that begins with a header for each\nnetwork and then includes the detailed output of the context’s String\nmethod. If no networks are provided it returns a short message indicating\nthere is nothing to test. The resulting string is used by other parts of the\ntest suite to log or display current test conditions.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:143", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "WriteString", + "kind": "function" + }, + { + "name": "WriteString", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "WriteString", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "NetTestContext.String", + "kind": "function", + "source": [ + "func (testContext *NetTestContext) String() string {", + "\tvar sb strings.Builder", + "\tsb.WriteString(fmt.Sprintf(\"From initiating container: %s\\n\", testContext.TesterSource.String()))", + "\tif len(testContext.DestTargets) == 0 {", + "\t\tsb.WriteString(\"--\u003e No target containers to test for this network\")", + "\t}", + "\tfor _, target := range testContext.DestTargets {", + "\t\tsb.WriteString(fmt.Sprintf(\"--\u003e To target container: %s\\n\", target.String()))", + "\t}", + "\treturn sb.String()", + "}" + ] + }, + { + "name": "NetTestContext.String", + "kind": "function", + "source": [ + "func (testContext *NetTestContext) String() string {", + "\tvar sb strings.Builder", + "\tsb.WriteString(fmt.Sprintf(\"From initiating container: %s\\n\", testContext.TesterSource.String()))", + "\tif len(testContext.DestTargets) == 0 {", + "\t\tsb.WriteString(\"--\u003e No target containers to test for this network\")", + "\t}", + "\tfor _, target := range testContext.DestTargets {", + "\t\tsb.WriteString(fmt.Sprintf(\"--\u003e To target container: %s\\n\", target.String()))", + "\t}", + "\treturn sb.String()", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp", + "name": "RunNetworkingTests", + "kind": "function", + "source": [ + "func RunNetworkingTests( //nolint:funlen", + "\tnetsUnderTest map[string]netcommons.NetTestContext,", + "\tcount int,", + "\taIPVersion netcommons.IPVersion,", + "\tlogger *log.Logger) (report testhelper.FailureReasonOut, skip bool) {", + "\tlogger.Debug(\"%s\", netcommons.PrintNetTestContextMap(netsUnderTest))", + "\tskip = false", + "\tif len(netsUnderTest) == 0 {", + "\t\tlogger.Debug(\"There are no %q networks to test, skipping test\", aIPVersion)", + "\t\tskip = true", + "\t\treturn report, skip", + "\t}", + "\t// if no network can be tested, then we need to skip the test entirely.", + "\t// If at least one network can be tested (e.g. \u003e 2 IPs/ interfaces present), then we do not skip the test", + "\tatLeastOneNetworkTested := false", + "\tcompliantNets := map[string]int{}", + "\tnonCompliantNets := map[string]int{}", + "\tfor netName, netUnderTest := range netsUnderTest {", + "\t\tcompliantNets[netName] = 0", + "\t\tnonCompliantNets[netName] = 0", + "\t\tif len(netUnderTest.DestTargets) == 0 {", + "\t\t\tlogger.Debug(\"There are no containers to ping for %q network %q. A minimum of 2 containers is needed to run a ping test (a source and a destination) Skipping test\", aIPVersion, netName)", + "\t\t\tcontinue", + "\t\t}", + "\t\tatLeastOneNetworkTested = true", + "\t\tlogger.Debug(\"%q Ping tests on network %q. Number of target IPs: %d\", aIPVersion, netName, len(netUnderTest.DestTargets))", + "", + "\t\tfor _, aDestIP := range netUnderTest.DestTargets {", + "\t\t\tlogger.Debug(\"%q ping test on network %q from ( %q srcip: %q ) to ( %q dstip: %q )\",", + "\t\t\t\taIPVersion, netName,", + "\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP,", + "\t\t\t\taDestIP.ContainerIdentifier, aDestIP.IP)", + "\t\t\tresult, err := TestPing(netUnderTest.TesterSource.ContainerIdentifier, aDestIP, count)", + "\t\t\tlogger.Debug(\"Ping results: %q\", result)", + "\t\t\tlogger.Info(\"%q ping test on network %q from ( %q srcip: %q ) to ( %q dstip: %q ) result: %q\",", + "\t\t\t\taIPVersion, netName,", + "\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier, netUnderTest.TesterSource.IP,", + "\t\t\t\taDestIP.ContainerIdentifier, aDestIP.IP, result)", + "\t\t\tif err != nil {", + "\t\t\t\tlogger.Debug(\"Ping failed, err=%v\", err)", + "\t\t\t}", + "\t\t\tif result.outcome != testhelper.SUCCESS {", + "\t\t\t\tlogger.Error(\"Ping from %q (srcip: %q) to %q (dstip: %q) failed\",", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier,", + "\t\t\t\t\tnetUnderTest.TesterSource.IP,", + "\t\t\t\t\taDestIP.ContainerIdentifier,", + "\t\t\t\t\taDestIP.IP)", + "\t\t\t\tnonCompliantNets[netName]++", + "\t\t\t\tnonCompliantObject := testhelper.NewContainerReportObject(netUnderTest.TesterSource.ContainerIdentifier.Namespace,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Podname,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Name, \"Pinging destination container/IP from source container (identified by Namespace/Pod Name/Container Name) Failed\", false).", + "\t\t\t\t\tSetType(testhelper.ICMPResultType).", + "\t\t\t\t\tAddField(testhelper.NetworkName, netName).", + "\t\t\t\t\tAddField(testhelper.SourceIP, netUnderTest.TesterSource.IP).", + "\t\t\t\t\tAddField(testhelper.DestinationNamespace, aDestIP.ContainerIdentifier.Namespace).", + "\t\t\t\t\tAddField(testhelper.DestinationPodName, aDestIP.ContainerIdentifier.Podname).", + "\t\t\t\t\tAddField(testhelper.DestinationContainerName, aDestIP.ContainerIdentifier.Name).", + "\t\t\t\t\tAddField(testhelper.DestinationIP, aDestIP.IP)", + "\t\t\t\treport.NonCompliantObjectsOut = append(report.NonCompliantObjectsOut, nonCompliantObject)", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"Ping from %q (srcip: %q) to %q (dstip: %q) succeeded\",", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier,", + "\t\t\t\t\tnetUnderTest.TesterSource.IP,", + "\t\t\t\t\taDestIP.ContainerIdentifier,", + "\t\t\t\t\taDestIP.IP)", + "\t\t\t\tcompliantNets[netName]++", + "\t\t\t\tCompliantObject := testhelper.NewContainerReportObject(netUnderTest.TesterSource.ContainerIdentifier.Namespace,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Podname,", + "\t\t\t\t\tnetUnderTest.TesterSource.ContainerIdentifier.Name, \"Pinging destination container/IP from source container (identified by Namespace/Pod Name/Container Name) Succeeded\", true).", + "\t\t\t\t\tSetType(testhelper.ICMPResultType).", + "\t\t\t\t\tAddField(testhelper.NetworkName, netName).", + "\t\t\t\t\tAddField(testhelper.SourceIP, netUnderTest.TesterSource.IP).", + "\t\t\t\t\tAddField(testhelper.DestinationNamespace, aDestIP.ContainerIdentifier.Namespace).", + "\t\t\t\t\tAddField(testhelper.DestinationPodName, aDestIP.ContainerIdentifier.Podname).", + "\t\t\t\t\tAddField(testhelper.DestinationContainerName, aDestIP.ContainerIdentifier.Name).", + "\t\t\t\t\tAddField(testhelper.DestinationIP, aDestIP.IP)", + "\t\t\t\treport.CompliantObjectsOut = append(report.CompliantObjectsOut, CompliantObject)", + "\t\t\t}", + "\t\t}", + "\t\tif nonCompliantNets[netName] != 0 {", + "\t\t\tlogger.Error(\"ICMP tests failed for %d IP source/destination in this network\", nonCompliantNets[netName])", + "\t\t\treport.NonCompliantObjectsOut = append(report.NonCompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf(\"ICMP tests failed for %d IP source/destination in this network\", nonCompliantNets[netName]), testhelper.NetworkType, false).", + "\t\t\t\tAddField(testhelper.NetworkName, netName))", + "\t\t}", + "\t\tif compliantNets[netName] != 0 {", + "\t\t\tlogger.Info(\"ICMP tests were successful for all %d IP source/destination in this network\", compliantNets[netName])", + "\t\t\treport.CompliantObjectsOut = append(report.CompliantObjectsOut, testhelper.NewReportObject(fmt.Sprintf(\"ICMP tests were successful for all %d IP source/destination in this network\", compliantNets[netName]), testhelper.NetworkType, true).", + "\t\t\t\tAddField(testhelper.NetworkName, netName))", + "\t\t}", + "\t}", + "\tif !atLeastOneNetworkTested {", + "\t\tlogger.Debug(\"There are no %q networks to test, skipping test\", aIPVersion)", + "\t\tskip = true", + "\t}", + "", + "\treturn report, skip", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PrintNetTestContextMap(netsUnderTest map[string]NetTestContext) string {", + "\tvar sb strings.Builder", + "\tif len(netsUnderTest) == 0 {", + "\t\tsb.WriteString(\"No networks to test.\\n\")", + "\t}", + "\tfor netName, netUnderTest := range netsUnderTest {", + "\t\tsb.WriteString(fmt.Sprintf(\"***Test for Network attachment: %s\\n\", netName))", + "\t\tsb.WriteString(fmt.Sprintf(\"%s\\n\", netUnderTest.String()))", + "\t}", + "\treturn sb.String()", + "}" + ] + }, + { + "name": "TestReservedPortsUsage", + "qualifiedName": "TestReservedPortsUsage", + "exported": true, + "signature": "func(*provider.TestEnvironment, map[int32]bool, string, *log.Logger)([]*testhelper.ReportObject)", + "doc": "TestReservedPortsUsage checks pods for listening on or declaring reserved ports\n\nThe function receives a test environment, a map of port numbers that are\nconsidered reserved, an origin label for those ports, and a logger. It scans\nall pods in the environment to find any containers listening on or declaring\nthese ports, excluding known Istio proxy exceptions. The result is two slices\nof report objects indicating compliant and non‑compliant findings.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:320", + "calls": [ + { + "name": "findRoguePodsListeningToPorts", + "kind": "function", + "source": [ + "func findRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, put := range pods {", + "\t\tlogger.Info(\"Testing Pod %q\", put)", + "\t\tcompliantObjectsEntries, nonCompliantObjectsEntries := findRogueContainersDeclaringPorts(put.Containers, portsToTest, portsOrigin, logger)", + "\t\tnonCompliantPortFound := len(nonCompliantObjectsEntries) \u003e 0", + "\t\tcompliantObjects = append(compliantObjects, compliantObjectsEntries...)", + "\t\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantObjectsEntries...)", + "\t\tcut := put.Containers[0]", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(cut)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get the listening ports on %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\tfmt.Sprintf(\"Failed to get the listening ports on pod, err: %v\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor port := range listeningPorts {", + "\t\t\tif ok := portsToTest[port.PortNumber]; ok {", + "\t\t\t\t// If pod contains an \"istio-proxy\" container, we need to make sure that the ports returned", + "\t\t\t\t// overlap with the known istio ports", + "\t\t\t\tif put.ContainsIstioProxy() \u0026\u0026 ReservedIstioPorts[port.PortNumber] {", + "\t\t\t\t\tlogger.Info(\"%q was found to be listening to port %d due to istio-proxy being present. Ignoring.\", put, port.PortNumber)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlogger.Error(\"%q has one container (%q) listening on port %d (%s) that has been reserved\", put, cut.Name, port.PortNumber, port.Protocol)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Pod Listens to %s reserved port in %v\", portsOrigin, portsToTest), false).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, port.Protocol))", + "\t\t\t\tnonCompliantPortFound = true", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"%q listens in %s unreserved port %d (%s)\", put, portsOrigin, port.PortNumber, port.Protocol)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Pod Listens to port not in %s reserved port %v\", portsOrigin, portsToTest), true).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, port.Protocol))", + "\t\t\t}", + "\t\t}", + "\t\tif nonCompliantPortFound {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\tfmt.Sprintf(\"Pod listens to or its containers declares some %s reserved port in %v\", portsOrigin, portsToTest), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcompliantObjects = append(compliantObjects,", + "\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\tfmt.Sprintf(\"Pod does not listen to or declare any %s reserved port in %v\", portsOrigin, portsToTest), true))", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testOCPReservedPortsUsage", + "kind": "function", + "source": [ + "func testOCPReservedPortsUsage(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// List of all ports reserved by OpenShift", + "\tOCPReservedPorts := map[int32]bool{", + "\t\t22623: true,", + "\t\t22624: true}", + "\tcompliantObjects, nonCompliantObjects := netcommons.TestReservedPortsUsage(env, OCPReservedPorts, \"OCP\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testPartnerSpecificTCPPorts", + "kind": "function", + "source": [ + "func testPartnerSpecificTCPPorts(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// List of all of the ports reserved by partner", + "\tReservedPorts := map[int32]bool{", + "\t\t15443: true,", + "\t\t15090: true,", + "\t\t15021: true,", + "\t\t15020: true,", + "\t\t15014: true,", + "\t\t15008: true,", + "\t\t15006: true,", + "\t\t15001: true,", + "\t\t15000: true,", + "\t}", + "\tcompliantObjects, nonCompliantObjects := netcommons.TestReservedPortsUsage(env, ReservedPorts, \"Partner\", check.GetLogger())", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func TestReservedPortsUsage(env *provider.TestEnvironment, reservedPorts map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tcompliantObjectsEntries, nonCompliantObjectsEntries := findRoguePodsListeningToPorts(env.Pods, reservedPorts, portsOrigin, logger)", + "\tcompliantObjects = append(compliantObjects, compliantObjectsEntries...)", + "\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantObjectsEntries...)", + "", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "findRogueContainersDeclaringPorts", + "qualifiedName": "findRogueContainersDeclaringPorts", + "exported": false, + "signature": "func([]*provider.Container, map[int32]bool, string, *log.Logger)([]*testhelper.ReportObject)", + "doc": "findRogueContainersDeclaringPorts identifies containers that declare prohibited ports\n\nThe function scans a list of containers, checking each declared port against\na set of reserved ports. For every match it records a non‑compliant report;\notherwise it logs compliance and creates a compliant report object. It\nreturns slices of these report objects for further processing.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:209", + "calls": [ + { + "name": "Info", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "int", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "int", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "findRoguePodsListeningToPorts", + "kind": "function", + "source": [ + "func findRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, put := range pods {", + "\t\tlogger.Info(\"Testing Pod %q\", put)", + "\t\tcompliantObjectsEntries, nonCompliantObjectsEntries := findRogueContainersDeclaringPorts(put.Containers, portsToTest, portsOrigin, logger)", + "\t\tnonCompliantPortFound := len(nonCompliantObjectsEntries) \u003e 0", + "\t\tcompliantObjects = append(compliantObjects, compliantObjectsEntries...)", + "\t\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantObjectsEntries...)", + "\t\tcut := put.Containers[0]", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(cut)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get the listening ports on %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\tfmt.Sprintf(\"Failed to get the listening ports on pod, err: %v\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor port := range listeningPorts {", + "\t\t\tif ok := portsToTest[port.PortNumber]; ok {", + "\t\t\t\t// If pod contains an \"istio-proxy\" container, we need to make sure that the ports returned", + "\t\t\t\t// overlap with the known istio ports", + "\t\t\t\tif put.ContainsIstioProxy() \u0026\u0026 ReservedIstioPorts[port.PortNumber] {", + "\t\t\t\t\tlogger.Info(\"%q was found to be listening to port %d due to istio-proxy being present. Ignoring.\", put, port.PortNumber)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlogger.Error(\"%q has one container (%q) listening on port %d (%s) that has been reserved\", put, cut.Name, port.PortNumber, port.Protocol)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Pod Listens to %s reserved port in %v\", portsOrigin, portsToTest), false).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, port.Protocol))", + "\t\t\t\tnonCompliantPortFound = true", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"%q listens in %s unreserved port %d (%s)\", put, portsOrigin, port.PortNumber, port.Protocol)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Pod Listens to port not in %s reserved port %v\", portsOrigin, portsToTest), true).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, port.Protocol))", + "\t\t\t}", + "\t\t}", + "\t\tif nonCompliantPortFound {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\tfmt.Sprintf(\"Pod listens to or its containers declares some %s reserved port in %v\", portsOrigin, portsToTest), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcompliantObjects = append(compliantObjects,", + "\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\tfmt.Sprintf(\"Pod does not listen to or declare any %s reserved port in %v\", portsOrigin, portsToTest), true))", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func findRogueContainersDeclaringPorts(containers []*provider.Container, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, cut := range containers {", + "\t\tlogger.Info(\"Testing Container %q\", cut)", + "\t\tfor _, port := range cut.Ports {", + "\t\t\tif portsToTest[port.ContainerPort] {", + "\t\t\t\tlogger.Error(\"%q declares %s reserved port %d (%s)\", cut, portsOrigin, port.ContainerPort, port.Protocol)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Container declares %s reserved port in %v\", portsOrigin, portsToTest), false).", + "\t\t\t\t\t\tSetType(testhelper.DeclaredPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.ContainerPort))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, string(port.Protocol)))", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"%q does not declare any %s reserved port\", cut, portsOrigin)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Container does not declare %s reserved port in %v\", portsOrigin, portsToTest), true).", + "\t\t\t\t\t\tSetType(testhelper.DeclaredPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.ContainerPort))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, string(port.Protocol)))", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "findRoguePodsListeningToPorts", + "qualifiedName": "findRoguePodsListeningToPorts", + "exported": false, + "signature": "func([]*provider.Pod, map[int32]bool, string, *log.Logger)([]*testhelper.ReportObject)", + "doc": "findRoguePodsListeningToPorts Detects pods that are listening on or declaring reserved ports\n\nThe function iterates over each pod, checking its containers for declared\nports and actual listening sockets against a set of prohibited port numbers.\nIt logs detailed information and generates report objects indicating\ncompliance status for both container declarations and pod-level listening\nbehavior. Non‑compliant pods are reported with the specific port and\nprotocol that violates the reservation rules.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:257", + "calls": [ + { + "name": "Info", + "kind": "function" + }, + { + "name": "findRogueContainersDeclaringPorts", + "kind": "function", + "source": [ + "func findRogueContainersDeclaringPorts(containers []*provider.Container, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, cut := range containers {", + "\t\tlogger.Info(\"Testing Container %q\", cut)", + "\t\tfor _, port := range cut.Ports {", + "\t\t\tif portsToTest[port.ContainerPort] {", + "\t\t\t\tlogger.Error(\"%q declares %s reserved port %d (%s)\", cut, portsOrigin, port.ContainerPort, port.Protocol)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Container declares %s reserved port in %v\", portsOrigin, portsToTest), false).", + "\t\t\t\t\t\tSetType(testhelper.DeclaredPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.ContainerPort))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, string(port.Protocol)))", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"%q does not declare any %s reserved port\", cut, portsOrigin)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Container does not declare %s reserved port in %v\", portsOrigin, portsToTest), true).", + "\t\t\t\t\t\tSetType(testhelper.DeclaredPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.ContainerPort))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, string(port.Protocol)))", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil", + "name": "GetListeningPorts", + "kind": "function", + "source": [ + "func GetListeningPorts(cut *provider.Container) (map[PortInfo]bool, error) {", + "\toutStr, errStr, err := crclient.ExecCommandContainerNSEnter(getListeningPortsCmd, cut)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"failed to execute command %s on %s, err: %v\", getListeningPortsCmd, cut, err)", + "\t}", + "", + "\treturn parseListeningPorts(outStr)", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "ContainsIstioProxy", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "int", + "kind": "function" + }, + { + "name": "Info", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "SetType", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "int", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "TestReservedPortsUsage", + "kind": "function", + "source": [ + "func TestReservedPortsUsage(env *provider.TestEnvironment, reservedPorts map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tcompliantObjectsEntries, nonCompliantObjectsEntries := findRoguePodsListeningToPorts(env.Pods, reservedPorts, portsOrigin, logger)", + "\tcompliantObjects = append(compliantObjects, compliantObjectsEntries...)", + "\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantObjectsEntries...)", + "", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func findRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, put := range pods {", + "\t\tlogger.Info(\"Testing Pod %q\", put)", + "\t\tcompliantObjectsEntries, nonCompliantObjectsEntries := findRogueContainersDeclaringPorts(put.Containers, portsToTest, portsOrigin, logger)", + "\t\tnonCompliantPortFound := len(nonCompliantObjectsEntries) \u003e 0", + "\t\tcompliantObjects = append(compliantObjects, compliantObjectsEntries...)", + "\t\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantObjectsEntries...)", + "\t\tcut := put.Containers[0]", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(cut)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get the listening ports on %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\tfmt.Sprintf(\"Failed to get the listening ports on pod, err: %v\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor port := range listeningPorts {", + "\t\t\tif ok := portsToTest[port.PortNumber]; ok {", + "\t\t\t\t// If pod contains an \"istio-proxy\" container, we need to make sure that the ports returned", + "\t\t\t\t// overlap with the known istio ports", + "\t\t\t\tif put.ContainsIstioProxy() \u0026\u0026 ReservedIstioPorts[port.PortNumber] {", + "\t\t\t\t\tlogger.Info(\"%q was found to be listening to port %d due to istio-proxy being present. Ignoring.\", put, port.PortNumber)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlogger.Error(\"%q has one container (%q) listening on port %d (%s) that has been reserved\", put, cut.Name, port.PortNumber, port.Protocol)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Pod Listens to %s reserved port in %v\", portsOrigin, portsToTest), false).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, port.Protocol))", + "\t\t\t\tnonCompliantPortFound = true", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"%q listens in %s unreserved port %d (%s)\", put, portsOrigin, port.PortNumber, port.Protocol)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Pod Listens to port not in %s reserved port %v\", portsOrigin, portsToTest), true).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, port.Protocol))", + "\t\t\t}", + "\t\t}", + "\t\tif nonCompliantPortFound {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\tfmt.Sprintf(\"Pod listens to or its containers declares some %s reserved port in %v\", portsOrigin, portsToTest), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcompliantObjects = append(compliantObjects,", + "\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\tfmt.Sprintf(\"Pod does not listen to or declare any %s reserved port in %v\", portsOrigin, portsToTest), true))", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + } + ], + "globals": [ + { + "name": "ReservedIstioPorts", + "exported": true, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:235" + } + ], + "consts": [ + { + "name": "DEFAULT", + "exported": true, + "type": "IFType", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:48" + }, + { + "name": "IPv4", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:37" + }, + { + "name": "IPv4String", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:43" + }, + { + "name": "IPv4v6", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:39" + }, + { + "name": "IPv4v6String", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:45" + }, + { + "name": "IPv6", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:38" + }, + { + "name": "IPv6String", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:44" + }, + { + "name": "MULTUS", + "exported": true, + "type": "IFType", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:47" + }, + { + "name": "Undefined", + "exported": true, + "type": "IPVersion", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:36" + }, + { + "name": "UndefinedString", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/networking/netcommons/netcommons.go:46" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil", + "name": "netutil", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "strconv", + "strings" + ], + "structs": [ + { + "name": "PortInfo", + "exported": true, + "doc": "PortInfo Describes a network port with number and protocol\n\nThis structure holds the numeric value of a listening port and the transport\nprotocol used, such as TCP or UDP. It is used to identify unique ports in\nmappings returned by functions that parse command output for listening\nsockets.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netutil/netutil.go:42", + "fields": { + "PortNumber": "int32", + "Protocol": "string" + }, + "methodNames": null, + "source": [ + "type PortInfo struct {", + "\tPortNumber int32", + "\tProtocol string", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "GetListeningPorts", + "qualifiedName": "GetListeningPorts", + "exported": true, + "signature": "func(*provider.Container)(map[PortInfo]bool, error)", + "doc": "GetListeningPorts Retrieves the set of ports currently listening inside a container\n\nThe function runs an nsenter command inside the target container to list open\nsockets, then parses the output into a map keyed by port information. It\nreturns this map along with any error that occurs during execution or\nparsing.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netutil/netutil.go:92", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "ExecCommandContainerNSEnter", + "kind": "function", + "source": [ + "func ExecCommandContainerNSEnter(command string,", + "\taContainer *provider.Container) (outStr, errStr string, err error) {", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(aContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", aContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\t// Get the container PID to build the nsenter command", + "\tcontainerPid, err := GetPidFromContainer(aContainer, ctx)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot get PID from: %s, err: %v\", aContainer, err)", + "\t}", + "", + "\t// Add the container PID and the specific command to run with nsenter", + "\tnsenterCommand := \"nsenter -t \" + strconv.Itoa(containerPid) + \" -n \" + command", + "", + "\t// Run the nsenter command on the probe pod with retry logic", + "\tfor attempt := 1; attempt \u003c= RetryAttempts; attempt++ {", + "\t\toutStr, errStr, err = ch.ExecCommandContainer(ctx, nsenterCommand)", + "\t\tif err == nil {", + "\t\t\tbreak", + "\t\t}", + "\t\tif attempt \u003c RetryAttempts {", + "\t\t\ttime.Sleep(RetrySleepSeconds * time.Second)", + "\t\t}", + "\t}", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", command, aContainer, err)", + "\t}", + "", + "\treturn outStr, errStr, err", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "parseListeningPorts", + "kind": "function", + "source": [ + "func parseListeningPorts(cmdOut string) (map[PortInfo]bool, error) {", + "\tportSet := make(map[PortInfo]bool)", + "", + "\tcmdOut = strings.TrimSuffix(cmdOut, \"\\n\")", + "\tlines := strings.Split(cmdOut, \"\\n\")", + "\tfor _, line := range lines {", + "\t\tfields := strings.Fields(line)", + "\t\tif len(fields) \u003c indexPort+1 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tif fields[indexState] != portStateListen {", + "\t\t\tcontinue", + "\t\t}", + "\t\ts := strings.Split(fields[indexPort], \":\")", + "\t\tif len(s) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tport, err := strconv.ParseInt(s[len(s)-1], 10, 32)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"string to int conversion error, err: %v\", err)", + "\t\t}", + "\t\tprotocol := strings.ToUpper(fields[indexProtocol])", + "\t\tportInfo := PortInfo{int32(port), protocol}", + "", + "\t\tportSet[portInfo] = true", + "\t}", + "", + "\treturn portSet, nil", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testNoSSHDaemonsAllowed", + "kind": "function", + "source": [ + "func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tcut := put.Containers[0]", + "", + "\t\t// 1. Find SSH port", + "\t\tport, err := netutil.GetSSHDaemonPort(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get ssh daemon port on %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the ssh port for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif port == \"\" {", + "\t\t\tcheck.LogInfo(\"Pod %q is not running an SSH daemon\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not running an SSH daemon\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tsshServicePortNumber, err := strconv.ParseInt(port, 10, 32)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not convert port %q from string to integer on Container %q\", port, cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the listening ports for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// 2. Check if SSH port is listening", + "\t\tsshPortInfo := netutil.PortInfo{PortNumber: int32(sshServicePortNumber), Protocol: sshServicePortProtocol}", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get the listening ports for Pod %q, err: %v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the listening ports for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif _, ok := listeningPorts[sshPortInfo]; ok {", + "\t\t\tcheck.LogError(\"Pod %q is running an SSH daemon\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is running an SSH daemon\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is not running an SSH daemon\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not running an SSH daemon\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testUndeclaredContainerPortsUsage", + "kind": "function", + "source": [ + "func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tvar portInfo netutil.PortInfo", + "\tfor _, put := range env.Pods {", + "\t\t// First get the ports declared in the Pod's containers spec", + "\t\tdeclaredPorts := make(map[netutil.PortInfo]bool)", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t\tfor _, port := range cut.Ports {", + "\t\t\t\tportInfo.PortNumber = port.ContainerPort", + "\t\t\t\tportInfo.Protocol = string(port.Protocol)", + "\t\t\t\tdeclaredPorts[portInfo] = true", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Then check the actual ports that the containers are listening on", + "\t\tfirstPodContainer := put.Containers[0]", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(firstPodContainer)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get container %q listening ports, err: %v\", firstPodContainer, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, fmt.Sprintf(\"Failed to get the container's listening ports, err: %v\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tif len(listeningPorts) == 0 {", + "\t\t\tcheck.LogInfo(\"None of the containers of %q have any listening port.\", put)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"None of the containers have any listening ports\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Verify that all the listening ports have been declared in the container spec", + "\t\tfailedPod := false", + "\t\tfor listeningPort := range listeningPorts {", + "\t\t\tif put.ContainsIstioProxy() \u0026\u0026 netcommons.ReservedIstioPorts[listeningPort.PortNumber] {", + "\t\t\t\tcheck.LogInfo(\"%q is listening on port %d protocol %q, but the pod also contains istio-proxy. Ignoring.\",", + "\t\t\t\t\tput, listeningPort.PortNumber, listeningPort.Protocol)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tif ok := declaredPorts[listeningPort]; !ok {", + "\t\t\t\tcheck.LogError(\"%q is listening on port %d protocol %q, but that port was not declared in any container spec.\",", + "\t\t\t\t\tput, listeningPort.PortNumber, listeningPort.Protocol)", + "\t\t\t\tfailedPod = true", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\"Listening port was declared in no container spec\", false).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(listeningPort.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, listeningPort.Protocol))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"%q is listening on declared port %d protocol %q\", put, listeningPort.PortNumber, listeningPort.Protocol)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\"Listening port was declared in container spec\", true).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(listeningPort.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, listeningPort.Protocol))", + "\t\t\t}", + "\t\t}", + "\t\tif failedPod {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"At least one port was listening but not declared in any container specs\", false))", + "\t\t} else {", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(put.Namespace, put.Name, \"All listening were declared in containers specs\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "findRoguePodsListeningToPorts", + "kind": "function", + "source": [ + "func findRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tfor _, put := range pods {", + "\t\tlogger.Info(\"Testing Pod %q\", put)", + "\t\tcompliantObjectsEntries, nonCompliantObjectsEntries := findRogueContainersDeclaringPorts(put.Containers, portsToTest, portsOrigin, logger)", + "\t\tnonCompliantPortFound := len(nonCompliantObjectsEntries) \u003e 0", + "\t\tcompliantObjects = append(compliantObjects, compliantObjectsEntries...)", + "\t\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantObjectsEntries...)", + "\t\tcut := put.Containers[0]", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(cut)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Failed to get the listening ports on %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\tfmt.Sprintf(\"Failed to get the listening ports on pod, err: %v\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tfor port := range listeningPorts {", + "\t\t\tif ok := portsToTest[port.PortNumber]; ok {", + "\t\t\t\t// If pod contains an \"istio-proxy\" container, we need to make sure that the ports returned", + "\t\t\t\t// overlap with the known istio ports", + "\t\t\t\tif put.ContainsIstioProxy() \u0026\u0026 ReservedIstioPorts[port.PortNumber] {", + "\t\t\t\t\tlogger.Info(\"%q was found to be listening to port %d due to istio-proxy being present. Ignoring.\", put, port.PortNumber)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlogger.Error(\"%q has one container (%q) listening on port %d (%s) that has been reserved\", put, cut.Name, port.PortNumber, port.Protocol)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Pod Listens to %s reserved port in %v\", portsOrigin, portsToTest), false).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, port.Protocol))", + "\t\t\t\tnonCompliantPortFound = true", + "\t\t\t} else {", + "\t\t\t\tlogger.Info(\"%q listens in %s unreserved port %d (%s)\", put, portsOrigin, port.PortNumber, port.Protocol)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\t\tfmt.Sprintf(\"Pod Listens to port not in %s reserved port %v\", portsOrigin, portsToTest), true).", + "\t\t\t\t\t\tSetType(testhelper.ListeningPortType).", + "\t\t\t\t\t\tAddField(testhelper.PortNumber, strconv.Itoa(int(port.PortNumber))).", + "\t\t\t\t\t\tAddField(testhelper.PortProtocol, port.Protocol))", + "\t\t\t}", + "\t\t}", + "\t\tif nonCompliantPortFound {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\t\tfmt.Sprintf(\"Pod listens to or its containers declares some %s reserved port in %v\", portsOrigin, portsToTest), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcompliantObjects = append(compliantObjects,", + "\t\t\ttesthelper.NewPodReportObject(cut.Namespace, put.Name,", + "\t\t\t\tfmt.Sprintf(\"Pod does not listen to or declare any %s reserved port in %v\", portsOrigin, portsToTest), true))", + "\t}", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetListeningPorts(cut *provider.Container) (map[PortInfo]bool, error) {", + "\toutStr, errStr, err := crclient.ExecCommandContainerNSEnter(getListeningPortsCmd, cut)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"failed to execute command %s on %s, err: %v\", getListeningPortsCmd, cut, err)", + "\t}", + "", + "\treturn parseListeningPorts(outStr)", + "}" + ] + }, + { + "name": "GetSSHDaemonPort", + "qualifiedName": "GetSSHDaemonPort", + "exported": true, + "signature": "func(*provider.Container)(string, error)", + "doc": "GetSSHDaemonPort Retrieves the SSH daemon listening port within a container\n\nThis function runs a shell command inside the specified container to locate\nthe sshd process and extract its bound TCP port. It executes the command via\nnsenter, handles any execution errors or non‑empty stderr output, and\nreturns the trimmed port number as a string. If the command fails or returns\nno output, an error is returned.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netutil/netutil.go:108", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "ExecCommandContainerNSEnter", + "kind": "function", + "source": [ + "func ExecCommandContainerNSEnter(command string,", + "\taContainer *provider.Container) (outStr, errStr string, err error) {", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(aContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", aContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\t// Get the container PID to build the nsenter command", + "\tcontainerPid, err := GetPidFromContainer(aContainer, ctx)", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot get PID from: %s, err: %v\", aContainer, err)", + "\t}", + "", + "\t// Add the container PID and the specific command to run with nsenter", + "\tnsenterCommand := \"nsenter -t \" + strconv.Itoa(containerPid) + \" -n \" + command", + "", + "\t// Run the nsenter command on the probe pod with retry logic", + "\tfor attempt := 1; attempt \u003c= RetryAttempts; attempt++ {", + "\t\toutStr, errStr, err = ch.ExecCommandContainer(ctx, nsenterCommand)", + "\t\tif err == nil {", + "\t\t\tbreak", + "\t\t}", + "\t\tif attempt \u003c RetryAttempts {", + "\t\t\ttime.Sleep(RetrySleepSeconds * time.Second)", + "\t\t}", + "\t}", + "\tif err != nil {", + "\t\treturn \"\", \"\", fmt.Errorf(\"cannot execute command: \\\" %s \\\" on %s err:%s\", command, aContainer, err)", + "\t}", + "", + "\treturn outStr, errStr, err", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testNoSSHDaemonsAllowed", + "kind": "function", + "source": [ + "func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tcut := put.Containers[0]", + "", + "\t\t// 1. Find SSH port", + "\t\tport, err := netutil.GetSSHDaemonPort(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get ssh daemon port on %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the ssh port for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif port == \"\" {", + "\t\t\tcheck.LogInfo(\"Pod %q is not running an SSH daemon\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not running an SSH daemon\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tsshServicePortNumber, err := strconv.ParseInt(port, 10, 32)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not convert port %q from string to integer on Container %q\", port, cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the listening ports for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// 2. Check if SSH port is listening", + "\t\tsshPortInfo := netutil.PortInfo{PortNumber: int32(sshServicePortNumber), Protocol: sshServicePortProtocol}", + "\t\tlisteningPorts, err := netutil.GetListeningPorts(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get the listening ports for Pod %q, err: %v\", put, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Failed to get the listening ports for pod\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif _, ok := listeningPorts[sshPortInfo]; ok {", + "\t\t\tcheck.LogError(\"Pod %q is running an SSH daemon\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is running an SSH daemon\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q is not running an SSH daemon\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod is not running an SSH daemon\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetSSHDaemonPort(cut *provider.Container) (string, error) {", + "\tconst findSSHDaemonPort = \"ss -tpln | grep sshd | head -1 | awk '{ print $4 }' | awk -F : '{ print $2 }'\"", + "\toutStr, errStr, err := crclient.ExecCommandContainerNSEnter(findSSHDaemonPort, cut)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn \"\", fmt.Errorf(\"failed to execute command %s on %s, err: %v\", findSSHDaemonPort, cut, err)", + "\t}", + "", + "\treturn strings.TrimSpace(outStr), nil", + "}" + ] + }, + { + "name": "parseListeningPorts", + "qualifiedName": "parseListeningPorts", + "exported": false, + "signature": "func(string)(map[PortInfo]bool, error)", + "doc": "parseListeningPorts parses command output into a map of listening ports\n\nThe function takes the raw string from a network command and splits it line\nby line, extracting protocol and port number when the state indicates LISTEN.\nIt converts the numeric part to an integer, normalizes the protocol name to\nupper case, and stores each unique pair in a map keyed by PortInfo with a\nboolean value of true. Errors during conversion cause an immediate return\nwith an error message.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/netutil/netutil.go:55", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSuffix", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Fields", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "ParseInt", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ToUpper", + "kind": "function" + }, + { + "name": "int32", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil", + "name": "GetListeningPorts", + "kind": "function", + "source": [ + "func GetListeningPorts(cut *provider.Container) (map[PortInfo]bool, error) {", + "\toutStr, errStr, err := crclient.ExecCommandContainerNSEnter(getListeningPortsCmd, cut)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"failed to execute command %s on %s, err: %v\", getListeningPortsCmd, cut, err)", + "\t}", + "", + "\treturn parseListeningPorts(outStr)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func parseListeningPorts(cmdOut string) (map[PortInfo]bool, error) {", + "\tportSet := make(map[PortInfo]bool)", + "", + "\tcmdOut = strings.TrimSuffix(cmdOut, \"\\n\")", + "\tlines := strings.Split(cmdOut, \"\\n\")", + "\tfor _, line := range lines {", + "\t\tfields := strings.Fields(line)", + "\t\tif len(fields) \u003c indexPort+1 {", + "\t\t\tcontinue", + "\t\t}", + "\t\tif fields[indexState] != portStateListen {", + "\t\t\tcontinue", + "\t\t}", + "\t\ts := strings.Split(fields[indexPort], \":\")", + "\t\tif len(s) == 0 {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tport, err := strconv.ParseInt(s[len(s)-1], 10, 32)", + "\t\tif err != nil {", + "\t\t\treturn nil, fmt.Errorf(\"string to int conversion error, err: %v\", err)", + "\t\t}", + "\t\tprotocol := strings.ToUpper(fields[indexProtocol])", + "\t\tportInfo := PortInfo{int32(port), protocol}", + "", + "\t\tportSet[portInfo] = true", + "\t}", + "", + "\treturn portSet, nil", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "getListeningPortsCmd", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/networking/netutil/netutil.go:29" + }, + { + "name": "indexPort", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/networking/netutil/netutil.go:33" + }, + { + "name": "indexProtocol", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/networking/netutil/netutil.go:31" + }, + { + "name": "indexState", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/networking/netutil/netutil.go:32" + }, + { + "name": "portStateListen", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/networking/netutil/netutil.go:30" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/policies", + "name": "policies", + "files": 1, + "imports": [ + "k8s.io/api/networking/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "IsNetworkPolicyCompliant", + "qualifiedName": "IsNetworkPolicyCompliant", + "exported": true, + "signature": "func(*networkingv1.NetworkPolicy, networkingv1.PolicyType)(bool, string)", + "doc": "IsNetworkPolicyCompliant Checks if a network policy enforces an empty rule for the specified direction\n\nThe function receives a network policy and a policy . It verifies that the\npolicy includes the given type, that its rules are nil or empty, and that the\noverall PolicyTypes slice is not empty. If these conditions hold it returns\ntrue; otherwise false with an explanatory reason.\n\nnolint:gocritic // unnamed results", + "position": "/Users/deliedit/dev/certsuite/tests/networking/policies/policies.go:32", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testNetworkPolicyDenyAll", + "kind": "function", + "source": [ + "func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through the pods, looking for corresponding entries within a deny-all network policy (both ingress and egress).", + "\t// This ensures that each pod is accounted for that we are tasked with testing and excludes any pods that are not marked", + "\t// for testing (via the labels).", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tdenyAllEgressFound := false", + "\t\tdenyAllIngressFound := false", + "", + "\t\t// Look through all of the network policies for a matching namespace.", + "\t\tfor index := range env.NetworkPolicies {", + "\t\t\tnetworkPolicy := env.NetworkPolicies[index]", + "\t\t\tcheck.LogInfo(\"Testing Network policy %q against pod %q\", networkPolicy.Name, put)", + "", + "\t\t\t// Skip any network policies that don't match the namespace of the pod we are testing.", + "\t\t\tif networkPolicy.Namespace != put.Namespace {", + "\t\t\t\tcheck.LogInfo(\"Skipping Network policy %q (namespace %q does not match Pod namespace %q)\", networkPolicy.Name, networkPolicy.Namespace, put.Namespace)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Match the pod namespace with the network policy namespace.", + "\t\t\tif policies.LabelsMatch(networkPolicy.Spec.PodSelector, put.Labels) {", + "\t\t\t\tvar reason string", + "\t\t\t\tif !denyAllEgressFound {", + "\t\t\t\t\tdenyAllEgressFound, reason = policies.IsNetworkPolicyCompliant(\u0026networkPolicy, networkingv1.PolicyTypeEgress)", + "\t\t\t\t\tif reason != \"\" {", + "\t\t\t\t\t\tcheck.LogError(\"Network policy %q is not compliant, reason=%q\", networkPolicy.Name, reason)", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tif !denyAllIngressFound {", + "\t\t\t\t\tdenyAllIngressFound, reason = policies.IsNetworkPolicyCompliant(\u0026networkPolicy, networkingv1.PolicyTypeIngress)", + "\t\t\t\t\tif reason != \"\" {", + "\t\t\t\t\t\tcheck.LogError(\"Network policy %q is not compliant, reason=%q\", networkPolicy.Name, reason)", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Network policy has not been found that contains a deny-all rule for both ingress and egress.", + "\t\tpodIsCompliant := true", + "\t\tif !denyAllIngressFound {", + "\t\t\tcheck.LogError(\"Pod %q was found to not have a default ingress deny-all network policy.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod was found to not have a default ingress deny-all network policy\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t}", + "", + "\t\tif !denyAllEgressFound {", + "\t\t\tcheck.LogError(\"Pod %q was found to not have a default egress deny-all network policy.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod was found to not have a default egress deny-all network policy\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t}", + "", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogInfo(\"Pod %q has a default ingress/egress deny-all network policy\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has a default ingress/egress deny-all network policy\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsNetworkPolicyCompliant(np *networkingv1.NetworkPolicy, policyType networkingv1.PolicyType) (bool, string) {", + "\t// As long as we have decided above that there is no pod selector,", + "\t// we just have to make sure that the policy type is either Ingress or Egress (or both) we can return true.", + "\t// For more information about deny-all policies, there are some good examples on:", + "\t// https://kubernetes.io/docs/concepts/services-networking/network-policies/", + "", + "\tif len(np.Spec.PolicyTypes) == 0 {", + "\t\treturn false, \"empty policy types\"", + "\t}", + "", + "\t// Ingress and Egress rules should be \"empty\" if it is a default rule.", + "\tif policyType == networkingv1.PolicyTypeEgress {", + "\t\tif np.Spec.Egress != nil || len(np.Spec.Egress) \u003e 0 {", + "\t\t\treturn false, \"egress spec not empty for default egress rule\"", + "\t\t}", + "\t}", + "", + "\tif policyType == networkingv1.PolicyTypeIngress {", + "\t\tif np.Spec.Ingress != nil || len(np.Spec.Ingress) \u003e 0 {", + "\t\t\treturn false, \"ingress spec not empty for default ingress rule\"", + "\t\t}", + "\t}", + "", + "\tpolicyTypeFound := false", + "\t// Look through the returned policies to see if they match the desired policyType", + "\tfor _, p := range np.Spec.PolicyTypes {", + "\t\tif p == policyType {", + "\t\t\tpolicyTypeFound = true", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\treturn policyTypeFound, \"\"", + "}" + ] + }, + { + "name": "LabelsMatch", + "qualifiedName": "LabelsMatch", + "exported": true, + "signature": "func(v1.LabelSelector, map[string]string)(bool)", + "doc": "LabelsMatch Determines whether a pod satisfies the selector's label constraints\n\nThe function examines the labels specified in a pod selector against the\nactual labels of a pod. If the selector has no labels, it immediately returns\ntrue, meaning all pods match. Otherwise, it checks for at least one matching\nkey/value pair and returns true only if such a pair is found.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/policies/policies.go:73", + "calls": [ + { + "name": "Size", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testNetworkPolicyDenyAll", + "kind": "function", + "source": [ + "func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through the pods, looking for corresponding entries within a deny-all network policy (both ingress and egress).", + "\t// This ensures that each pod is accounted for that we are tasked with testing and excludes any pods that are not marked", + "\t// for testing (via the labels).", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tdenyAllEgressFound := false", + "\t\tdenyAllIngressFound := false", + "", + "\t\t// Look through all of the network policies for a matching namespace.", + "\t\tfor index := range env.NetworkPolicies {", + "\t\t\tnetworkPolicy := env.NetworkPolicies[index]", + "\t\t\tcheck.LogInfo(\"Testing Network policy %q against pod %q\", networkPolicy.Name, put)", + "", + "\t\t\t// Skip any network policies that don't match the namespace of the pod we are testing.", + "\t\t\tif networkPolicy.Namespace != put.Namespace {", + "\t\t\t\tcheck.LogInfo(\"Skipping Network policy %q (namespace %q does not match Pod namespace %q)\", networkPolicy.Name, networkPolicy.Namespace, put.Namespace)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Match the pod namespace with the network policy namespace.", + "\t\t\tif policies.LabelsMatch(networkPolicy.Spec.PodSelector, put.Labels) {", + "\t\t\t\tvar reason string", + "\t\t\t\tif !denyAllEgressFound {", + "\t\t\t\t\tdenyAllEgressFound, reason = policies.IsNetworkPolicyCompliant(\u0026networkPolicy, networkingv1.PolicyTypeEgress)", + "\t\t\t\t\tif reason != \"\" {", + "\t\t\t\t\t\tcheck.LogError(\"Network policy %q is not compliant, reason=%q\", networkPolicy.Name, reason)", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tif !denyAllIngressFound {", + "\t\t\t\t\tdenyAllIngressFound, reason = policies.IsNetworkPolicyCompliant(\u0026networkPolicy, networkingv1.PolicyTypeIngress)", + "\t\t\t\t\tif reason != \"\" {", + "\t\t\t\t\t\tcheck.LogError(\"Network policy %q is not compliant, reason=%q\", networkPolicy.Name, reason)", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Network policy has not been found that contains a deny-all rule for both ingress and egress.", + "\t\tpodIsCompliant := true", + "\t\tif !denyAllIngressFound {", + "\t\t\tcheck.LogError(\"Pod %q was found to not have a default ingress deny-all network policy.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod was found to not have a default ingress deny-all network policy\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t}", + "", + "\t\tif !denyAllEgressFound {", + "\t\t\tcheck.LogError(\"Pod %q was found to not have a default egress deny-all network policy.\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod was found to not have a default egress deny-all network policy\", false))", + "\t\t\tpodIsCompliant = false", + "\t\t}", + "", + "\t\tif podIsCompliant {", + "\t\t\tcheck.LogInfo(\"Pod %q has a default ingress/egress deny-all network policy\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has a default ingress/egress deny-all network policy\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LabelsMatch(podSelectorLabels v1.LabelSelector, podLabels map[string]string) bool {", + "\tlabelMatch := false", + "", + "\t// When the pod selector label is empty, it will always match the pod", + "\tif podSelectorLabels.Size() == 0 {", + "\t\treturn true", + "\t}", + "", + "\tfor psLabelKey, psLabelValue := range podSelectorLabels.MatchLabels {", + "\t\tfor podLabelKey, podLabelValue := range podLabels {", + "\t\t\tif psLabelKey == podLabelKey \u0026\u0026 psLabelValue == podLabelValue {", + "\t\t\t\tlabelMatch = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif labelMatch {", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\treturn labelMatch", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/services", + "name": "services", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "k8s.io/api/core/v1" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "GetServiceIPVersion", + "qualifiedName": "GetServiceIPVersion", + "exported": true, + "signature": "func(*corev1.Service)(netcommons.IPVersion, error)", + "doc": "GetServiceIPVersion Determines the IP stack type of a Kubernetes Service\n\nThe function examines a service's ClusterIP, IPFamilyPolicy, and any\nadditional ClusterIPs to decide whether it is single‑stack IPv4,\nsingle‑stack IPv6, or dual‑stack. It returns an IPVersion value along\nwith an error if the configuration cannot be resolved or violates\nexpectations. Logging statements provide debug context for each decision\npath.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/services/services.go:35", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "GetIPVersion", + "kind": "function", + "source": [ + "func GetIPVersion(aIP string) (IPVersion, error) {", + "\tip := net.ParseIP(aIP)", + "\tif ip == nil {", + "\t\treturn Undefined, fmt.Errorf(\"%s is Not an IPv4 or an IPv6\", aIP)", + "\t}", + "\tif ip.To4() != nil {", + "\t\treturn IPv4, nil", + "\t}", + "\treturn IPv6, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function", + "source": [ + "func ToString(aService *corev1.Service) (out string) {", + "\treturn fmt.Sprintf(\"Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v\", aService.Namespace,", + "\t\taService.Name,", + "\t\taService.Spec.ClusterIP,", + "\t\taService.Spec.ClusterIPs)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function", + "source": [ + "func ToString(aService *corev1.Service) (out string) {", + "\treturn fmt.Sprintf(\"Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v\", aService.Namespace,", + "\t\taService.Name,", + "\t\taService.Spec.ClusterIP,", + "\t\taService.Spec.ClusterIPs)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "ToString", + "kind": "function", + "source": [ + "func ToString(aService *corev1.Service) (out string) {", + "\treturn fmt.Sprintf(\"Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v\", aService.Namespace,", + "\t\taService.Name,", + "\t\taService.Spec.ClusterIP,", + "\t\taService.Spec.ClusterIPs)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "ToString", + "kind": "function", + "source": [ + "func ToString(aService *corev1.Service) (out string) {", + "\treturn fmt.Sprintf(\"Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v\", aService.Namespace,", + "\t\taService.Name,", + "\t\taService.Spec.ClusterIP,", + "\t\taService.Spec.ClusterIPs)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function", + "source": [ + "func ToString(aService *corev1.Service) (out string) {", + "\treturn fmt.Sprintf(\"Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v\", aService.Namespace,", + "\t\taService.Name,", + "\t\taService.Spec.ClusterIP,", + "\t\taService.Spec.ClusterIPs)", + "}" + ] + }, + { + "name": "isClusterIPsDualStack", + "kind": "function", + "source": [ + "func isClusterIPsDualStack(ips []string) (result bool, err error) {", + "\tvar hasIPv4, hasIPv6 bool", + "\tfor _, ip := range ips {", + "\t\tipver, err := netcommons.GetIPVersion(ip)", + "\t\tif err != nil {", + "\t\t\treturn result, fmt.Errorf(\"cannot get aService ClusterIPs (%s) version - err: %v\", ip, err)", + "\t\t}", + "\t\tswitch ipver {", + "\t\tcase netcommons.IPv4:", + "\t\t\thasIPv4 = true", + "\t\tcase netcommons.IPv6:", + "\t\t\thasIPv6 = true", + "\t\tcase netcommons.IPv4v6, netcommons.Undefined:", + "\t\t}", + "\t}", + "\tif hasIPv4 \u0026\u0026 hasIPv6 {", + "\t\treturn true, nil", + "\t}", + "\treturn false, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function", + "source": [ + "func ToString(aService *corev1.Service) (out string) {", + "\treturn fmt.Sprintf(\"Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v\", aService.Namespace,", + "\t\taService.Name,", + "\t\taService.Spec.ClusterIP,", + "\t\taService.Spec.ClusterIPs)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "ToString", + "kind": "function", + "source": [ + "func ToString(aService *corev1.Service) (out string) {", + "\treturn fmt.Sprintf(\"Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v\", aService.Namespace,", + "\t\taService.Name,", + "\t\taService.Spec.ClusterIP,", + "\t\taService.Spec.ClusterIPs)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function", + "source": [ + "func ToString(aService *corev1.Service) (out string) {", + "\treturn fmt.Sprintf(\"Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v\", aService.Namespace,", + "\t\taService.Name,", + "\t\taService.Spec.ClusterIP,", + "\t\taService.Spec.ClusterIPs)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking", + "name": "testDualStackServices", + "kind": "function", + "source": [ + "func testDualStackServices(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, s := range env.Services {", + "\t\tcheck.LogInfo(\"Testing Service %q\", s.Name)", + "\t\tserviceIPVersion, err := services.GetServiceIPVersion(s)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get IP version from Service %q, err=%v\", s.Name, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Could not get IP Version from service\", testhelper.ServiceType, false).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name))", + "\t\t}", + "\t\tif serviceIPVersion == netcommons.Undefined || serviceIPVersion == netcommons.IPv4 {", + "\t\t\tcheck.LogError(\"Service %q (ns: %q) only supports IPv4\", s.Name, s.Namespace)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Service supports only IPv4\", testhelper.ServiceType, false).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceIPVersion, serviceIPVersion.String()))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Service %q (ns: %q) supports IPv6 or is dual stack\", s.Name, s.Namespace)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"Service supports IPv6 or is dual stack\", testhelper.ServiceType, true).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceIPVersion, serviceIPVersion.String()))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetServiceIPVersion(aService *corev1.Service) (result netcommons.IPVersion, err error) {", + "\tipver, err := netcommons.GetIPVersion(aService.Spec.ClusterIP)", + "\tif err != nil {", + "\t\terr = fmt.Errorf(\"%s cannot get aService clusterIP version\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "\tif aService.Spec.IPFamilyPolicy == nil {", + "\t\terr = fmt.Errorf(\"%s does not have a IPFamilyPolicy configured\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "\tif *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack \u0026\u0026", + "\t\tipver == netcommons.IPv6 {", + "\t\tlog.Debug(\"%s is single stack ipv6\", ToString(aService))", + "\t\treturn netcommons.IPv6, nil", + "\t}", + "\tif *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack \u0026\u0026", + "\t\tipver == netcommons.IPv4 {", + "\t\tlog.Debug(\"%s is single stack ipv4\", ToString(aService))", + "\t\treturn netcommons.IPv4, nil", + "\t}", + "\tif (*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyPreferDualStack ||", + "\t\t*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyRequireDualStack) \u0026\u0026", + "\t\tlen(aService.Spec.ClusterIPs) \u003c 2 {", + "\t\terr = fmt.Errorf(\"%s is dual stack but has only zero or one ClusterIPs\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "", + "\tres, err := isClusterIPsDualStack(aService.Spec.ClusterIPs)", + "\tif err != nil {", + "\t\terr = fmt.Errorf(\"%s, err:%s\", ToString(aService), err)", + "\t\treturn result, err", + "\t}", + "\tif res {", + "\t\tlog.Debug(\"%s is dual-stack\", ToString(aService))", + "\t\treturn netcommons.IPv4v6, nil", + "\t}", + "", + "\terr = fmt.Errorf(\"%s is not compliant, it is not single stack ipv6 or dual stack\", ToString(aService))", + "\treturn result, err", + "}" + ] + }, + { + "name": "ToString", + "qualifiedName": "ToString", + "exported": true, + "signature": "func(*corev1.Service)(string)", + "doc": "ToString Formats a service's namespace, name, cluster IPs, and IP family into a readable string\n\nThis function takes a pointer to a Kubernetes Service object and constructs a\nsingle-line description that includes the service's namespace, name, primary\nClusterIP, and all associated ClusterIPs. It uses string formatting to\nconcatenate these fields in a human‑readable format, which is useful for\nlogging and error messages. The result is returned as a plain string.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/services/services.go:83", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol", + "name": "testNodePort", + "kind": "function", + "source": [ + "func testNodePort(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, s := range env.Services {", + "\t\tcheck.LogInfo(\"Testing %q\", services.ToString(s))", + "", + "\t\tif s.Spec.Type == nodePort {", + "\t\t\tcheck.LogError(\"Service %q (ns %q) type is nodePort\", s.Name, s.Namespace)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Service is type NodePort\", testhelper.ServiceType, false).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceMode, string(s.Spec.Type)))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Service %q (ns %q) type is not nodePort (type=%q)\", s.Name, s.Namespace, s.Spec.Type)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"Service is not type NodePort\", testhelper.ServiceType, true).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\tAddField(testhelper.ServiceName, s.Name).", + "\t\t\t\tAddField(testhelper.ServiceMode, string(s.Spec.Type)))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/services", + "name": "GetServiceIPVersion", + "kind": "function", + "source": [ + "func GetServiceIPVersion(aService *corev1.Service) (result netcommons.IPVersion, err error) {", + "\tipver, err := netcommons.GetIPVersion(aService.Spec.ClusterIP)", + "\tif err != nil {", + "\t\terr = fmt.Errorf(\"%s cannot get aService clusterIP version\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "\tif aService.Spec.IPFamilyPolicy == nil {", + "\t\terr = fmt.Errorf(\"%s does not have a IPFamilyPolicy configured\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "\tif *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack \u0026\u0026", + "\t\tipver == netcommons.IPv6 {", + "\t\tlog.Debug(\"%s is single stack ipv6\", ToString(aService))", + "\t\treturn netcommons.IPv6, nil", + "\t}", + "\tif *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack \u0026\u0026", + "\t\tipver == netcommons.IPv4 {", + "\t\tlog.Debug(\"%s is single stack ipv4\", ToString(aService))", + "\t\treturn netcommons.IPv4, nil", + "\t}", + "\tif (*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyPreferDualStack ||", + "\t\t*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyRequireDualStack) \u0026\u0026", + "\t\tlen(aService.Spec.ClusterIPs) \u003c 2 {", + "\t\terr = fmt.Errorf(\"%s is dual stack but has only zero or one ClusterIPs\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "", + "\tres, err := isClusterIPsDualStack(aService.Spec.ClusterIPs)", + "\tif err != nil {", + "\t\terr = fmt.Errorf(\"%s, err:%s\", ToString(aService), err)", + "\t\treturn result, err", + "\t}", + "\tif res {", + "\t\tlog.Debug(\"%s is dual-stack\", ToString(aService))", + "\t\treturn netcommons.IPv4v6, nil", + "\t}", + "", + "\terr = fmt.Errorf(\"%s is not compliant, it is not single stack ipv6 or dual stack\", ToString(aService))", + "\treturn result, err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ToString(aService *corev1.Service) (out string) {", + "\treturn fmt.Sprintf(\"Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v\", aService.Namespace,", + "\t\taService.Name,", + "\t\taService.Spec.ClusterIP,", + "\t\taService.Spec.ClusterIPs)", + "}" + ] + }, + { + "name": "ToStringSlice", + "qualifiedName": "ToStringSlice", + "exported": true, + "signature": "func([]*corev1.Service)(string)", + "doc": "ToStringSlice Lists services with namespace, name, ClusterIP and IP addresses\n\nThe function iterates over a slice of service objects, appending formatted\ninformation for each one to a single string. For every service it records the\nnamespace, name, primary ClusterIP, and any additional cluster IPs. The\nresulting multi-line string is returned.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/services/services.go:96", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ToStringSlice(manyServices []*corev1.Service) (out string) {", + "\tfor _, aService := range manyServices {", + "\t\tout += fmt.Sprintf(\"Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v\\n\", aService.Namespace,", + "\t\t\taService.Name,", + "\t\t\taService.Spec.ClusterIP,", + "\t\t\taService.Spec.ClusterIPs)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "isClusterIPsDualStack", + "qualifiedName": "isClusterIPsDualStack", + "exported": false, + "signature": "func([]string)(bool, error)", + "doc": "isClusterIPsDualStack verifies that a service's ClusterIPs include both IPv4 and IPv6 addresses\n\nThe function iterates over each IP string, determines its version using an\nexternal helper, and records whether any IPv4 or IPv6 address appears. If\nboth types are present it returns true; otherwise false. Errors from the\nhelper cause an early return with a descriptive message.", + "position": "/Users/deliedit/dev/certsuite/tests/networking/services/services.go:112", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons", + "name": "GetIPVersion", + "kind": "function", + "source": [ + "func GetIPVersion(aIP string) (IPVersion, error) {", + "\tip := net.ParseIP(aIP)", + "\tif ip == nil {", + "\t\treturn Undefined, fmt.Errorf(\"%s is Not an IPv4 or an IPv6\", aIP)", + "\t}", + "\tif ip.To4() != nil {", + "\t\treturn IPv4, nil", + "\t}", + "\treturn IPv6, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/services", + "name": "GetServiceIPVersion", + "kind": "function", + "source": [ + "func GetServiceIPVersion(aService *corev1.Service) (result netcommons.IPVersion, err error) {", + "\tipver, err := netcommons.GetIPVersion(aService.Spec.ClusterIP)", + "\tif err != nil {", + "\t\terr = fmt.Errorf(\"%s cannot get aService clusterIP version\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "\tif aService.Spec.IPFamilyPolicy == nil {", + "\t\terr = fmt.Errorf(\"%s does not have a IPFamilyPolicy configured\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "\tif *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack \u0026\u0026", + "\t\tipver == netcommons.IPv6 {", + "\t\tlog.Debug(\"%s is single stack ipv6\", ToString(aService))", + "\t\treturn netcommons.IPv6, nil", + "\t}", + "\tif *aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicySingleStack \u0026\u0026", + "\t\tipver == netcommons.IPv4 {", + "\t\tlog.Debug(\"%s is single stack ipv4\", ToString(aService))", + "\t\treturn netcommons.IPv4, nil", + "\t}", + "\tif (*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyPreferDualStack ||", + "\t\t*aService.Spec.IPFamilyPolicy == corev1.IPFamilyPolicyRequireDualStack) \u0026\u0026", + "\t\tlen(aService.Spec.ClusterIPs) \u003c 2 {", + "\t\terr = fmt.Errorf(\"%s is dual stack but has only zero or one ClusterIPs\", ToString(aService))", + "\t\treturn result, err", + "\t}", + "", + "\tres, err := isClusterIPsDualStack(aService.Spec.ClusterIPs)", + "\tif err != nil {", + "\t\terr = fmt.Errorf(\"%s, err:%s\", ToString(aService), err)", + "\t\treturn result, err", + "\t}", + "\tif res {", + "\t\tlog.Debug(\"%s is dual-stack\", ToString(aService))", + "\t\treturn netcommons.IPv4v6, nil", + "\t}", + "", + "\terr = fmt.Errorf(\"%s is not compliant, it is not single stack ipv6 or dual stack\", ToString(aService))", + "\treturn result, err", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isClusterIPsDualStack(ips []string) (result bool, err error) {", + "\tvar hasIPv4, hasIPv6 bool", + "\tfor _, ip := range ips {", + "\t\tipver, err := netcommons.GetIPVersion(ip)", + "\t\tif err != nil {", + "\t\t\treturn result, fmt.Errorf(\"cannot get aService ClusterIPs (%s) version - err: %v\", ip, err)", + "\t\t}", + "\t\tswitch ipver {", + "\t\tcase netcommons.IPv4:", + "\t\t\thasIPv4 = true", + "\t\tcase netcommons.IPv6:", + "\t\t\thasIPv6 = true", + "\t\tcase netcommons.IPv4v6, netcommons.Undefined:", + "\t\t}", + "\t}", + "\tif hasIPv4 \u0026\u0026 hasIPv6 {", + "\t\treturn true, nil", + "\t}", + "\treturn false, nil", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "observability", + "files": 2, + "imports": [ + "bytes", + "context", + "fmt", + "github.com/Masterminds/semver/v3", + "github.com/openshift/api/apiserver/v1", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability/pdb", + "io", + "k8s.io/api/core/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/labels", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "LoadChecks", + "qualifiedName": "LoadChecks", + "exported": true, + "signature": "func()()", + "doc": "LoadChecks Initializes the observability test suite\n\nThe function creates a new checks group for observability and registers\nseveral checks related to logging, CRD status subresources, termination\nmessage policy, pod disruption budgets, and API compatibility with future\nOpenShift releases. Each check is configured with optional skip functions\nthat determine whether the environment contains relevant objects before\nexecution. Debug output records the loading of this suite.", + "position": "/Users/deliedit/dev/certsuite/tests/observability/suite.go:59", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "WithBeforeEachFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewChecksGroup", + "kind": "function", + "source": [ + "func NewChecksGroup(groupName string) *ChecksGroup {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\tif dbByGroup == nil {", + "\t\tdbByGroup = map[string]*ChecksGroup{}", + "\t}", + "", + "\tgroup, exists := dbByGroup[groupName]", + "\tif exists {", + "\t\treturn group", + "\t}", + "", + "\tgroup = \u0026ChecksGroup{", + "\t\tname: groupName,", + "\t\tchecks: []*Check{},", + "\t\tcurrentRunningCheckIdx: checkIdxNone,", + "\t}", + "\tdbByGroup[groupName] = group", + "", + "\treturn group", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testContainersLogging", + "kind": "function", + "source": [ + "func testContainersLogging(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Iterate through all the CUTs to get their log output. The TC checks that at least", + "\t// one log line is found.", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\thasLoggingOutput, err := containerHasLoggingOutput(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get %q log output, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not get log output\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif !hasLoggingOutput {", + "\t\t\tcheck.LogError(\"Container %q does not have any line of log to stderr/stdout\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"No log line to stderr/stdout found\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has some logging output\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Found log line to stderr/stdout\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoCrdsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoCrdsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Crds) == 0 {", + "\t\t\treturn true, \"no roles to check\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testCrds", + "kind": "function", + "source": [ + "func testCrds(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, crd := range env.Crds {", + "\t\tcheck.LogInfo(\"Testing CRD: %s\", crd.Name)", + "\t\tfor _, ver := range crd.Spec.Versions {", + "\t\t\tif _, ok := ver.Schema.OpenAPIV3Schema.Properties[\"status\"]; !ok {", + "\t\t\t\tcheck.LogError(\"CRD: %s, version: %s does not have a status subresource\", crd.Name, ver.Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewReportObject(\"Crd does not have a status sub resource set\", testhelper.CustomResourceDefinitionType, false).", + "\t\t\t\t\t\tAddField(testhelper.CustomResourceDefinitionName, crd.Name).", + "\t\t\t\t\t\tAddField(testhelper.CustomResourceDefinitionVersion, ver.Name))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"CRD: %s, version: %s has a status subresource\", crd.Name, ver.Name)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewReportObject(\"Crd has a status sub resource set\", testhelper.CustomResourceDefinitionType, true).", + "\t\t\t\t\t\tAddField(testhelper.CustomResourceDefinitionName, crd.Name).", + "\t\t\t\t\t\tAddField(testhelper.CustomResourceDefinitionVersion, ver.Name))", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testTerminationMessagePolicy", + "kind": "function", + "source": [ + "func testTerminationMessagePolicy(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.TerminationMessagePolicy != corev1.TerminationMessageFallbackToLogsOnError {", + "\t\t\tcheck.LogError(\"Container %q does not have a TerminationMessagePolicy: FallbackToLogsOnError (has %s)\", cut, cut.TerminationMessagePolicy)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"TerminationMessagePolicy is not FallbackToLogsOnError\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has a TerminationMessagePolicy: FallbackToLogsOnError\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"TerminationMessagePolicy is FallbackToLogsOnError\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipModeAll", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoDeploymentsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoDeploymentsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Deployments) == 0 {", + "\t\t\treturn true, \"no deployments to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoStatefulSetsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoStatefulSetsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.StatefulSets) == 0 {", + "\t\t\treturn true, \"no statefulSets to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodDisruptionBudgets", + "kind": "function", + "source": [ + "func testPodDisruptionBudgets(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through all of the of Deployments and StatefulSets and check if the PDBs are valid", + "\tfor _, d := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", d.ToString())", + "\t\tdeploymentSelector := labels.Set(d.Spec.Template.Labels)", + "\t\tpdbFound := false", + "\t\tfor pdbIndex := range env.PodDisruptionBudgets {", + "\t\t\tpdb := \u0026env.PodDisruptionBudgets[pdbIndex]", + "\t\t\tif pdb.Namespace != d.Namespace {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tpdbSelector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)", + "\t\t\tif err != nil {", + "\t\t\t\tcheck.LogError(\"Could not convert the PDB %q label selector to selector, err: %v\", pdbSelector, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif pdbSelector.Matches(deploymentSelector) {", + "\t\t\t\tpdbFound = true", + "\t\t\t\tif ok, err := pdbv1.CheckPDBIsValid(pdb, d.Spec.Replicas); !ok {", + "\t\t\t\t\tcheck.LogError(\"PDB %q is not valid for Deployment %q, err: %v\", pdb.Name, d.Name, err)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"Invalid PodDisruptionBudget config: %v\", err), testhelper.DeploymentType, false).", + "\t\t\t\t\t\tAddField(testhelper.DeploymentName, d.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, d.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"PDB %q is valid for Deployment: %q\", pdb.Name, d.Name)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"Deployment: references PodDisruptionBudget\", testhelper.DeploymentType, true).", + "\t\t\t\t\t\tAddField(testhelper.DeploymentName, d.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, d.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif !pdbFound {", + "\t\t\tcheck.LogError(\"Deployment %q is missing a corresponding PodDisruptionBudget\", d.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Deployment is missing a corresponding PodDisruptionBudget\", testhelper.DeploymentType, false).", + "\t\t\t\tAddField(testhelper.DeploymentName, d.Name).", + "\t\t\t\tAddField(testhelper.Namespace, d.Namespace))", + "\t\t}", + "\t}", + "", + "\tfor _, s := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", s.ToString())", + "\t\tstatefulSetSelector := labels.Set(s.Spec.Template.Labels)", + "\t\tpdbFound := false", + "\t\tfor pdbIndex := range env.PodDisruptionBudgets {", + "\t\t\tpdb := \u0026env.PodDisruptionBudgets[pdbIndex]", + "\t\t\tif pdb.Namespace != s.Namespace {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tpdbSelector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)", + "\t\t\tif err != nil {", + "\t\t\t\tcheck.LogError(\"Could not convert the PDB %q label selector to selector, err: %v\", pdbSelector, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif pdbSelector.Matches(statefulSetSelector) {", + "\t\t\t\tpdbFound = true", + "\t\t\t\tif ok, err := pdbv1.CheckPDBIsValid(pdb, s.Spec.Replicas); !ok {", + "\t\t\t\t\tcheck.LogError(\"PDB %q is not valid for StatefulSet %q, err: %v\", pdb.Name, s.Name, err)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"Invalid PodDisruptionBudget config: %v\", err), testhelper.StatefulSetType, false).", + "\t\t\t\t\t\tAddField(testhelper.StatefulSetName, s.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"PDB %q is valid for StatefulSet: %q\", pdb.Name, s.Name)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"StatefulSet: references PodDisruptionBudget\", testhelper.StatefulSetType, true).", + "\t\t\t\t\t\tAddField(testhelper.StatefulSetName, s.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif !pdbFound {", + "\t\t\tcheck.LogError(\"StatefulSet %q is missing a corresponding PodDisruptionBudget\", s.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"StatefulSet is missing a corresponding PodDisruptionBudget\", testhelper.StatefulSetType, false).", + "\t\t\t\tAddField(testhelper.StatefulSetName, s.Name).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "name": "testAPICompatibilityWithNextOCPRelease", + "kind": "function", + "source": [ + "func testAPICompatibilityWithNextOCPRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tisOCP := provider.IsOCPCluster()", + "\tcheck.LogInfo(\"Is OCP: %v\", isOCP)", + "", + "\tif !isOCP {", + "\t\tcheck.LogInfo(\"The Kubernetes distribution is not OpenShift. Skipping API compatibility test.\")", + "\t\treturn", + "\t}", + "", + "\t// Retrieve APIRequestCount using clientsholder", + "\toc := clientsholder.GetClientsHolder()", + "\tapiRequestCounts, err := oc.ApiserverClient.ApiserverV1().APIRequestCounts().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error retrieving APIRequestCount objects: %s\", err)", + "\t\treturn", + "\t}", + "", + "\t// Extract unique service account names from env.ServiceAccounts", + "\tworkloadServiceAccountNames := extractUniqueServiceAccountNames(env)", + "\tcheck.LogInfo(\"Detected %d unique service account names for the workload: %v\", len(workloadServiceAccountNames), workloadServiceAccountNames)", + "", + "\t// Build a map from service accounts to deprecated APIs", + "\tserviceAccountToDeprecatedAPIs := buildServiceAccountToDeprecatedAPIMap(apiRequestCounts.Items, workloadServiceAccountNames)", + "", + "\t// Evaluate API compliance with the next Kubernetes version", + "\tcompliantObjects, nonCompliantObjects := evaluateAPICompliance(serviceAccountToDeprecatedAPIs, env.K8sVersion, workloadServiceAccountNames)", + "", + "\t// Add test results", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadInternalChecksDB", + "kind": "function", + "source": [ + "func LoadInternalChecksDB() {", + "\taccesscontrol.LoadChecks()", + "\tcertification.LoadChecks()", + "\tlifecycle.LoadChecks()", + "\tmanageability.LoadChecks()", + "\tnetworking.LoadChecks()", + "\tobservability.LoadChecks()", + "\tperformance.LoadChecks()", + "\tplatform.LoadChecks()", + "\toperator.LoadChecks()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "name": "buildServiceAccountToDeprecatedAPIMap", + "qualifiedName": "buildServiceAccountToDeprecatedAPIMap", + "exported": false, + "signature": "func([]apiserv1.APIRequestCount, map[string]struct{})(map[string]map[string]string)", + "doc": "buildServiceAccountToDeprecatedAPIMap Creates a mapping of service accounts to APIs slated for removal\n\nThe function receives a slice of API request count objects and a set of\nworkload service account names. It iterates through the usage data,\nextracting each service account that appears in the workload list and\nrecording any API whose removal release is specified. The result is a nested\nmap where each key is a service account name and its value maps deprecated\nAPIs to their corresponding Kubernetes release version.", + "position": "/Users/deliedit/dev/certsuite/tests/observability/suite.go:333", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "testAPICompatibilityWithNextOCPRelease", + "kind": "function", + "source": [ + "func testAPICompatibilityWithNextOCPRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tisOCP := provider.IsOCPCluster()", + "\tcheck.LogInfo(\"Is OCP: %v\", isOCP)", + "", + "\tif !isOCP {", + "\t\tcheck.LogInfo(\"The Kubernetes distribution is not OpenShift. Skipping API compatibility test.\")", + "\t\treturn", + "\t}", + "", + "\t// Retrieve APIRequestCount using clientsholder", + "\toc := clientsholder.GetClientsHolder()", + "\tapiRequestCounts, err := oc.ApiserverClient.ApiserverV1().APIRequestCounts().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error retrieving APIRequestCount objects: %s\", err)", + "\t\treturn", + "\t}", + "", + "\t// Extract unique service account names from env.ServiceAccounts", + "\tworkloadServiceAccountNames := extractUniqueServiceAccountNames(env)", + "\tcheck.LogInfo(\"Detected %d unique service account names for the workload: %v\", len(workloadServiceAccountNames), workloadServiceAccountNames)", + "", + "\t// Build a map from service accounts to deprecated APIs", + "\tserviceAccountToDeprecatedAPIs := buildServiceAccountToDeprecatedAPIMap(apiRequestCounts.Items, workloadServiceAccountNames)", + "", + "\t// Evaluate API compliance with the next Kubernetes version", + "\tcompliantObjects, nonCompliantObjects := evaluateAPICompliance(serviceAccountToDeprecatedAPIs, env.K8sVersion, workloadServiceAccountNames)", + "", + "\t// Add test results", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func buildServiceAccountToDeprecatedAPIMap(apiRequestCounts []apiserv1.APIRequestCount, workloadServiceAccountNames map[string]struct{}) map[string]map[string]string {", + "\t// Define a map where the key is the service account name and the value is another map", + "\t// The inner map key is the API name and the value is the release version in which it will be removed", + "\tserviceAccountToDeprecatedAPIs := make(map[string]map[string]string)", + "", + "\tfor i := range apiRequestCounts {", + "\t\tobj := \u0026apiRequestCounts[i]", + "\t\t// Filter by non-empty removedInRelease", + "\t\tif obj.Status.RemovedInRelease != \"\" {", + "\t\t\t// Iterate over the last 24h usage data", + "\t\t\tfor _, last24h := range obj.Status.Last24h {", + "\t\t\t\tfor _, byNode := range last24h.ByNode {", + "\t\t\t\t\tfor _, byUser := range byNode.ByUser {", + "\t\t\t\t\t\t// Split the username by \":\" and take the last chunk to extract ServiceAccount", + "\t\t\t\t\t\t// from composed structures like system:serviceaccount:default:eventtest-operator-service-account", + "\t\t\t\t\t\tserviceAccountParts := strings.Split(byUser.UserName, \":\")", + "\t\t\t\t\t\tstrippedServiceAccount := serviceAccountParts[len(serviceAccountParts)-1]", + "", + "\t\t\t\t\t\t// Check if the service account is in the workload SA list", + "\t\t\t\t\t\tif _, exists := workloadServiceAccountNames[strippedServiceAccount]; exists {", + "\t\t\t\t\t\t\t// Initialize the inner map if it does not exist", + "\t\t\t\t\t\t\tif serviceAccountToDeprecatedAPIs[strippedServiceAccount] == nil {", + "\t\t\t\t\t\t\t\tserviceAccountToDeprecatedAPIs[strippedServiceAccount] = make(map[string]string)", + "\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t// Add the API and its RemovedInRelease K8s version to the map", + "\t\t\t\t\t\t\tserviceAccountToDeprecatedAPIs[strippedServiceAccount][obj.Name] = obj.Status.RemovedInRelease", + "\t\t\t\t\t\t}", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn serviceAccountToDeprecatedAPIs", + "}" + ] + }, + { + "name": "containerHasLoggingOutput", + "qualifiedName": "containerHasLoggingOutput", + "exported": false, + "signature": "func(*provider.Container)(bool, error)", + "doc": "containerHasLoggingOutput Checks whether a container has produced any log output\n\nThe function retrieves the last two lines of a pod’s logs via the\nKubernetes API, reads them into memory, and returns true if any content was\nfound. It handles errors from establishing the stream or copying data,\nreturning false with an error in those cases. The result indicates whether\nthe container produced at least one line to stdout or stderr.", + "position": "/Users/deliedit/dev/certsuite/tests/observability/suite.go:108", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "int64", + "kind": "function" + }, + { + "name": "GetLogs", + "kind": "function" + }, + { + "name": "Pods", + "kind": "function" + }, + { + "name": "CoreV1", + "kind": "function" + }, + { + "name": "Stream", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "name": "new", + "kind": "function" + }, + { + "pkgPath": "io", + "name": "Copy", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "testContainersLogging", + "kind": "function", + "source": [ + "func testContainersLogging(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Iterate through all the CUTs to get their log output. The TC checks that at least", + "\t// one log line is found.", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\thasLoggingOutput, err := containerHasLoggingOutput(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get %q log output, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not get log output\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif !hasLoggingOutput {", + "\t\t\tcheck.LogError(\"Container %q does not have any line of log to stderr/stdout\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"No log line to stderr/stdout found\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has some logging output\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Found log line to stderr/stdout\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func containerHasLoggingOutput(cut *provider.Container) (bool, error) {", + "\tocpClient := clientsholder.GetClientsHolder()", + "", + "\t// K8s' API will not return lines that do not have the newline termination char, so", + "\t// We need to ask for the last two lines.", + "\tconst tailLogLines = 2", + "\tnumLogLines := int64(tailLogLines)", + "\tpodLogOptions := corev1.PodLogOptions{TailLines: \u0026numLogLines, Container: cut.Name}", + "\treq := ocpClient.K8sClient.CoreV1().Pods(cut.Namespace).GetLogs(cut.Podname, \u0026podLogOptions)", + "", + "\tpodLogsReaderCloser, err := req.Stream(context.TODO())", + "\tif err != nil {", + "\t\treturn false, fmt.Errorf(\"unable to get log streamer, err: %v\", err)", + "\t}", + "", + "\tdefer podLogsReaderCloser.Close()", + "", + "\tbuf := new(bytes.Buffer)", + "\t_, err = io.Copy(buf, podLogsReaderCloser)", + "\tif err != nil {", + "\t\treturn false, fmt.Errorf(\"unable to get log data, err: %v\", err)", + "\t}", + "", + "\treturn buf.String() != \"\", nil", + "}" + ] + }, + { + "name": "evaluateAPICompliance", + "qualifiedName": "evaluateAPICompliance", + "exported": false, + "signature": "func(map[string]map[string]string, string, map[string]struct{})([]*testhelper.ReportObject)", + "doc": "evaluateAPICompliance Assesses whether service accounts use APIs that will be removed in the next Kubernetes release\n\nThe function parses the current Kubernetes version, increments it to\ndetermine the upcoming release, and then checks each deprecated API used by a\nservice account against the removal schedule. It creates report objects\nindicating compliance or non‑compliance for each API, adding relevant\nfields such as the API name, service account, and removal or active release.\nIf no APIs are detected, it generates pass reports for all workload service\naccounts.", + "position": "/Users/deliedit/dev/certsuite/tests/observability/suite.go:378", + "calls": [ + { + "name": "NewVersion", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "name": "IncMinor", + "kind": "function" + }, + { + "name": "NewVersion", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Printf", + "kind": "function" + }, + { + "name": "Minor", + "kind": "function" + }, + { + "name": "Minor", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "testAPICompatibilityWithNextOCPRelease", + "kind": "function", + "source": [ + "func testAPICompatibilityWithNextOCPRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tisOCP := provider.IsOCPCluster()", + "\tcheck.LogInfo(\"Is OCP: %v\", isOCP)", + "", + "\tif !isOCP {", + "\t\tcheck.LogInfo(\"The Kubernetes distribution is not OpenShift. Skipping API compatibility test.\")", + "\t\treturn", + "\t}", + "", + "\t// Retrieve APIRequestCount using clientsholder", + "\toc := clientsholder.GetClientsHolder()", + "\tapiRequestCounts, err := oc.ApiserverClient.ApiserverV1().APIRequestCounts().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error retrieving APIRequestCount objects: %s\", err)", + "\t\treturn", + "\t}", + "", + "\t// Extract unique service account names from env.ServiceAccounts", + "\tworkloadServiceAccountNames := extractUniqueServiceAccountNames(env)", + "\tcheck.LogInfo(\"Detected %d unique service account names for the workload: %v\", len(workloadServiceAccountNames), workloadServiceAccountNames)", + "", + "\t// Build a map from service accounts to deprecated APIs", + "\tserviceAccountToDeprecatedAPIs := buildServiceAccountToDeprecatedAPIMap(apiRequestCounts.Items, workloadServiceAccountNames)", + "", + "\t// Evaluate API compliance with the next Kubernetes version", + "\tcompliantObjects, nonCompliantObjects := evaluateAPICompliance(serviceAccountToDeprecatedAPIs, env.K8sVersion, workloadServiceAccountNames)", + "", + "\t// Add test results", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func evaluateAPICompliance(", + "\tserviceAccountToDeprecatedAPIs map[string]map[string]string,", + "\tkubernetesVersion string,", + "\tworkloadServiceAccountNames map[string]struct{}) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tversion, err := semver.NewVersion(kubernetesVersion)", + "\tif err != nil {", + "\t\tfmt.Printf(\"Failed to parse Kubernetes version %q: %v\", kubernetesVersion, err)", + "\t\treturn nil, nil", + "\t}", + "", + "\t// Increment the version to represent the next release for comparison", + "\tnextK8sVersion := version.IncMinor()", + "", + "\t// Iterate over each service account and its deprecated APIs", + "\tfor saName, deprecatedAPIs := range serviceAccountToDeprecatedAPIs {", + "\t\tfor apiName, removedInRelease := range deprecatedAPIs {", + "\t\t\tremovedVersion, err := semver.NewVersion(removedInRelease)", + "\t\t\tif err != nil {", + "\t\t\t\tfmt.Printf(\"Failed to parse Kubernetes version from APIRequestCount.status.removedInRelease: %s\\n\", err)", + "\t\t\t\t// Skip this API if the version parsing fails", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tisCompliantWithNextK8sVersion := removedVersion.Minor() \u003e nextK8sVersion.Minor()", + "", + "\t\t\t// Define reasons with version information", + "\t\t\tnonCompliantReason := fmt.Sprintf(\"API %s used by service account %s is NOT compliant with Kubernetes version %s, it will be removed in release %s\", apiName, saName, nextK8sVersion.String(), removedInRelease)", + "\t\t\tcompliantReason := fmt.Sprintf(\"API %s used by service account %s is compliant with Kubernetes version %s, it will be removed in release %s\", apiName, saName, nextK8sVersion.String(), removedInRelease)", + "", + "\t\t\tvar reportObject *testhelper.ReportObject", + "\t\t\tif isCompliantWithNextK8sVersion {", + "\t\t\t\treportObject = testhelper.NewReportObject(compliantReason, \"API\", true)", + "\t\t\t\treportObject.AddField(\"ActiveInRelease\", nextK8sVersion.String())", + "\t\t\t\tcompliantObjects = append(compliantObjects, reportObject)", + "\t\t\t} else {", + "\t\t\t\treportObject = testhelper.NewReportObject(nonCompliantReason, \"API\", false)", + "\t\t\t\treportObject.AddField(\"RemovedInRelease\", removedInRelease)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, reportObject)", + "\t\t\t}", + "", + "\t\t\treportObject.AddField(\"APIName\", apiName)", + "\t\t\treportObject.AddField(\"ServiceAccount\", saName)", + "\t\t}", + "\t}", + "", + "\t// Force the test to pass if both lists are empty", + "\tif len(compliantObjects) == 0 \u0026\u0026 len(nonCompliantObjects) == 0 {", + "\t\tfor saName := range workloadServiceAccountNames {", + "\t\t\treportObject := testhelper.NewReportObject(\"SA does not use any removed API\", \"ServiceAccount\", true).", + "\t\t\t\tAddField(\"Name\", saName)", + "\t\t\tcompliantObjects = append(compliantObjects, reportObject)", + "\t\t}", + "\t}", + "", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "extractUniqueServiceAccountNames", + "qualifiedName": "extractUniqueServiceAccountNames", + "exported": false, + "signature": "func(*provider.TestEnvironment)(map[string]struct{})", + "doc": "extractUniqueServiceAccountNames collects distinct service account names from the test environment\n\nIt receives a test environment, iterates over its ServiceAccounts slice, and\ninserts each name into a map to ensure uniqueness. The resulting map has keys\nof type string and empty struct values, providing an efficient set\nrepresentation for later use in compatibility checks.", + "position": "/Users/deliedit/dev/certsuite/tests/observability/suite.go:441", + "calls": [ + { + "name": "make", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "testAPICompatibilityWithNextOCPRelease", + "kind": "function", + "source": [ + "func testAPICompatibilityWithNextOCPRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tisOCP := provider.IsOCPCluster()", + "\tcheck.LogInfo(\"Is OCP: %v\", isOCP)", + "", + "\tif !isOCP {", + "\t\tcheck.LogInfo(\"The Kubernetes distribution is not OpenShift. Skipping API compatibility test.\")", + "\t\treturn", + "\t}", + "", + "\t// Retrieve APIRequestCount using clientsholder", + "\toc := clientsholder.GetClientsHolder()", + "\tapiRequestCounts, err := oc.ApiserverClient.ApiserverV1().APIRequestCounts().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error retrieving APIRequestCount objects: %s\", err)", + "\t\treturn", + "\t}", + "", + "\t// Extract unique service account names from env.ServiceAccounts", + "\tworkloadServiceAccountNames := extractUniqueServiceAccountNames(env)", + "\tcheck.LogInfo(\"Detected %d unique service account names for the workload: %v\", len(workloadServiceAccountNames), workloadServiceAccountNames)", + "", + "\t// Build a map from service accounts to deprecated APIs", + "\tserviceAccountToDeprecatedAPIs := buildServiceAccountToDeprecatedAPIMap(apiRequestCounts.Items, workloadServiceAccountNames)", + "", + "\t// Evaluate API compliance with the next Kubernetes version", + "\tcompliantObjects, nonCompliantObjects := evaluateAPICompliance(serviceAccountToDeprecatedAPIs, env.K8sVersion, workloadServiceAccountNames)", + "", + "\t// Add test results", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func extractUniqueServiceAccountNames(env *provider.TestEnvironment) map[string]struct{} {", + "\tuniqueServiceAccountNames := make(map[string]struct{})", + "", + "\t// Iterate over the service accounts to extract names", + "\tfor _, sa := range env.ServiceAccounts {", + "\t\tuniqueServiceAccountNames[sa.Name] = struct{}{}", + "\t}", + "", + "\treturn uniqueServiceAccountNames", + "}" + ] + }, + { + "name": "testAPICompatibilityWithNextOCPRelease", + "qualifiedName": "testAPICompatibilityWithNextOCPRelease", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testAPICompatibilityWithNextOCPRelease Checks whether workload APIs remain available in the upcoming OpenShift release\n\nThe function first verifies that the cluster is an OpenShift distribution,\nthen gathers API request usage data via the ApiserverV1 client. It maps each\nservice account to any deprecated APIs it has used and compares these\ndeprecation releases against the next minor Kubernetes version. Results are\nrecorded as compliant or non‑compliant objects for reporting.", + "position": "/Users/deliedit/dev/certsuite/tests/observability/suite.go:459", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "IsOCPCluster", + "kind": "function", + "source": [ + "func IsOCPCluster() bool {", + "\treturn env.OpenshiftVersion != autodiscover.NonOpenshiftClusterVersion", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "List", + "kind": "function" + }, + { + "name": "APIRequestCounts", + "kind": "function" + }, + { + "name": "ApiserverV1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "extractUniqueServiceAccountNames", + "kind": "function", + "source": [ + "func extractUniqueServiceAccountNames(env *provider.TestEnvironment) map[string]struct{} {", + "\tuniqueServiceAccountNames := make(map[string]struct{})", + "", + "\t// Iterate over the service accounts to extract names", + "\tfor _, sa := range env.ServiceAccounts {", + "\t\tuniqueServiceAccountNames[sa.Name] = struct{}{}", + "\t}", + "", + "\treturn uniqueServiceAccountNames", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "buildServiceAccountToDeprecatedAPIMap", + "kind": "function", + "source": [ + "func buildServiceAccountToDeprecatedAPIMap(apiRequestCounts []apiserv1.APIRequestCount, workloadServiceAccountNames map[string]struct{}) map[string]map[string]string {", + "\t// Define a map where the key is the service account name and the value is another map", + "\t// The inner map key is the API name and the value is the release version in which it will be removed", + "\tserviceAccountToDeprecatedAPIs := make(map[string]map[string]string)", + "", + "\tfor i := range apiRequestCounts {", + "\t\tobj := \u0026apiRequestCounts[i]", + "\t\t// Filter by non-empty removedInRelease", + "\t\tif obj.Status.RemovedInRelease != \"\" {", + "\t\t\t// Iterate over the last 24h usage data", + "\t\t\tfor _, last24h := range obj.Status.Last24h {", + "\t\t\t\tfor _, byNode := range last24h.ByNode {", + "\t\t\t\t\tfor _, byUser := range byNode.ByUser {", + "\t\t\t\t\t\t// Split the username by \":\" and take the last chunk to extract ServiceAccount", + "\t\t\t\t\t\t// from composed structures like system:serviceaccount:default:eventtest-operator-service-account", + "\t\t\t\t\t\tserviceAccountParts := strings.Split(byUser.UserName, \":\")", + "\t\t\t\t\t\tstrippedServiceAccount := serviceAccountParts[len(serviceAccountParts)-1]", + "", + "\t\t\t\t\t\t// Check if the service account is in the workload SA list", + "\t\t\t\t\t\tif _, exists := workloadServiceAccountNames[strippedServiceAccount]; exists {", + "\t\t\t\t\t\t\t// Initialize the inner map if it does not exist", + "\t\t\t\t\t\t\tif serviceAccountToDeprecatedAPIs[strippedServiceAccount] == nil {", + "\t\t\t\t\t\t\t\tserviceAccountToDeprecatedAPIs[strippedServiceAccount] = make(map[string]string)", + "\t\t\t\t\t\t\t}", + "\t\t\t\t\t\t\t// Add the API and its RemovedInRelease K8s version to the map", + "\t\t\t\t\t\t\tserviceAccountToDeprecatedAPIs[strippedServiceAccount][obj.Name] = obj.Status.RemovedInRelease", + "\t\t\t\t\t\t}", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn serviceAccountToDeprecatedAPIs", + "}" + ] + }, + { + "name": "evaluateAPICompliance", + "kind": "function", + "source": [ + "func evaluateAPICompliance(", + "\tserviceAccountToDeprecatedAPIs map[string]map[string]string,", + "\tkubernetesVersion string,", + "\tworkloadServiceAccountNames map[string]struct{}) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) {", + "\tversion, err := semver.NewVersion(kubernetesVersion)", + "\tif err != nil {", + "\t\tfmt.Printf(\"Failed to parse Kubernetes version %q: %v\", kubernetesVersion, err)", + "\t\treturn nil, nil", + "\t}", + "", + "\t// Increment the version to represent the next release for comparison", + "\tnextK8sVersion := version.IncMinor()", + "", + "\t// Iterate over each service account and its deprecated APIs", + "\tfor saName, deprecatedAPIs := range serviceAccountToDeprecatedAPIs {", + "\t\tfor apiName, removedInRelease := range deprecatedAPIs {", + "\t\t\tremovedVersion, err := semver.NewVersion(removedInRelease)", + "\t\t\tif err != nil {", + "\t\t\t\tfmt.Printf(\"Failed to parse Kubernetes version from APIRequestCount.status.removedInRelease: %s\\n\", err)", + "\t\t\t\t// Skip this API if the version parsing fails", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\tisCompliantWithNextK8sVersion := removedVersion.Minor() \u003e nextK8sVersion.Minor()", + "", + "\t\t\t// Define reasons with version information", + "\t\t\tnonCompliantReason := fmt.Sprintf(\"API %s used by service account %s is NOT compliant with Kubernetes version %s, it will be removed in release %s\", apiName, saName, nextK8sVersion.String(), removedInRelease)", + "\t\t\tcompliantReason := fmt.Sprintf(\"API %s used by service account %s is compliant with Kubernetes version %s, it will be removed in release %s\", apiName, saName, nextK8sVersion.String(), removedInRelease)", + "", + "\t\t\tvar reportObject *testhelper.ReportObject", + "\t\t\tif isCompliantWithNextK8sVersion {", + "\t\t\t\treportObject = testhelper.NewReportObject(compliantReason, \"API\", true)", + "\t\t\t\treportObject.AddField(\"ActiveInRelease\", nextK8sVersion.String())", + "\t\t\t\tcompliantObjects = append(compliantObjects, reportObject)", + "\t\t\t} else {", + "\t\t\t\treportObject = testhelper.NewReportObject(nonCompliantReason, \"API\", false)", + "\t\t\t\treportObject.AddField(\"RemovedInRelease\", removedInRelease)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, reportObject)", + "\t\t\t}", + "", + "\t\t\treportObject.AddField(\"APIName\", apiName)", + "\t\t\treportObject.AddField(\"ServiceAccount\", saName)", + "\t\t}", + "\t}", + "", + "\t// Force the test to pass if both lists are empty", + "\tif len(compliantObjects) == 0 \u0026\u0026 len(nonCompliantObjects) == 0 {", + "\t\tfor saName := range workloadServiceAccountNames {", + "\t\t\treportObject := testhelper.NewReportObject(\"SA does not use any removed API\", \"ServiceAccount\", true).", + "\t\t\t\tAddField(\"Name\", saName)", + "\t\t\tcompliantObjects = append(compliantObjects, reportObject)", + "\t\t}", + "\t}", + "", + "\treturn compliantObjects, nonCompliantObjects", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testAPICompatibilityWithNextOCPRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tisOCP := provider.IsOCPCluster()", + "\tcheck.LogInfo(\"Is OCP: %v\", isOCP)", + "", + "\tif !isOCP {", + "\t\tcheck.LogInfo(\"The Kubernetes distribution is not OpenShift. Skipping API compatibility test.\")", + "\t\treturn", + "\t}", + "", + "\t// Retrieve APIRequestCount using clientsholder", + "\toc := clientsholder.GetClientsHolder()", + "\tapiRequestCounts, err := oc.ApiserverClient.ApiserverV1().APIRequestCounts().List(context.TODO(), metav1.ListOptions{})", + "\tif err != nil {", + "\t\tcheck.LogError(\"Error retrieving APIRequestCount objects: %s\", err)", + "\t\treturn", + "\t}", + "", + "\t// Extract unique service account names from env.ServiceAccounts", + "\tworkloadServiceAccountNames := extractUniqueServiceAccountNames(env)", + "\tcheck.LogInfo(\"Detected %d unique service account names for the workload: %v\", len(workloadServiceAccountNames), workloadServiceAccountNames)", + "", + "\t// Build a map from service accounts to deprecated APIs", + "\tserviceAccountToDeprecatedAPIs := buildServiceAccountToDeprecatedAPIMap(apiRequestCounts.Items, workloadServiceAccountNames)", + "", + "\t// Evaluate API compliance with the next Kubernetes version", + "\tcompliantObjects, nonCompliantObjects := evaluateAPICompliance(serviceAccountToDeprecatedAPIs, env.K8sVersion, workloadServiceAccountNames)", + "", + "\t// Add test results", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testContainersLogging", + "qualifiedName": "testContainersLogging", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testContainersLogging Verifies that containers emit log output to stdout or stderr\n\nThe function iterates over all containers under test, attempts to fetch their\nmost recent log lines, and records whether any logs were present. Containers\nlacking logs or encountering errors are marked non‑compliant, while those\nproducing at least one line are marked compliant. The results are aggregated\ninto report objects for later analysis.", + "position": "/Users/deliedit/dev/certsuite/tests/observability/suite.go:141", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "containerHasLoggingOutput", + "kind": "function", + "source": [ + "func containerHasLoggingOutput(cut *provider.Container) (bool, error) {", + "\tocpClient := clientsholder.GetClientsHolder()", + "", + "\t// K8s' API will not return lines that do not have the newline termination char, so", + "\t// We need to ask for the last two lines.", + "\tconst tailLogLines = 2", + "\tnumLogLines := int64(tailLogLines)", + "\tpodLogOptions := corev1.PodLogOptions{TailLines: \u0026numLogLines, Container: cut.Name}", + "\treq := ocpClient.K8sClient.CoreV1().Pods(cut.Namespace).GetLogs(cut.Podname, \u0026podLogOptions)", + "", + "\tpodLogsReaderCloser, err := req.Stream(context.TODO())", + "\tif err != nil {", + "\t\treturn false, fmt.Errorf(\"unable to get log streamer, err: %v\", err)", + "\t}", + "", + "\tdefer podLogsReaderCloser.Close()", + "", + "\tbuf := new(bytes.Buffer)", + "\t_, err = io.Copy(buf, podLogsReaderCloser)", + "\tif err != nil {", + "\t\treturn false, fmt.Errorf(\"unable to get log data, err: %v\", err)", + "\t}", + "", + "\treturn buf.String() != \"\", nil", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainersLogging(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Iterate through all the CUTs to get their log output. The TC checks that at least", + "\t// one log line is found.", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\thasLoggingOutput, err := containerHasLoggingOutput(cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to get %q log output, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not get log output\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif !hasLoggingOutput {", + "\t\t\tcheck.LogError(\"Container %q does not have any line of log to stderr/stdout\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"No log line to stderr/stdout found\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has some logging output\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Found log line to stderr/stdout\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testCrds", + "qualifiedName": "testCrds", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testCrds Verifies CRD status subresource presence\n\nThe function iterates over all custom resource definitions in the test\nenvironment, checking each version for a \"status\" property in its schema. For\nevery missing status field it logs an error and records a non‑compliant\nreport object; otherwise it logs success and records a compliant report.\nFinally, it sets the check result with lists of compliant and non‑compliant\nobjects.", + "position": "/Users/deliedit/dev/certsuite/tests/observability/suite.go:178", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testCrds(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, crd := range env.Crds {", + "\t\tcheck.LogInfo(\"Testing CRD: %s\", crd.Name)", + "\t\tfor _, ver := range crd.Spec.Versions {", + "\t\t\tif _, ok := ver.Schema.OpenAPIV3Schema.Properties[\"status\"]; !ok {", + "\t\t\t\tcheck.LogError(\"CRD: %s, version: %s does not have a status subresource\", crd.Name, ver.Name)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\ttesthelper.NewReportObject(\"Crd does not have a status sub resource set\", testhelper.CustomResourceDefinitionType, false).", + "\t\t\t\t\t\tAddField(testhelper.CustomResourceDefinitionName, crd.Name).", + "\t\t\t\t\t\tAddField(testhelper.CustomResourceDefinitionVersion, ver.Name))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"CRD: %s, version: %s has a status subresource\", crd.Name, ver.Name)", + "\t\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\t\ttesthelper.NewReportObject(\"Crd has a status sub resource set\", testhelper.CustomResourceDefinitionType, true).", + "\t\t\t\t\t\tAddField(testhelper.CustomResourceDefinitionName, crd.Name).", + "\t\t\t\t\t\tAddField(testhelper.CustomResourceDefinitionVersion, ver.Name))", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodDisruptionBudgets", + "qualifiedName": "testPodDisruptionBudgets", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testPodDisruptionBudgets Verifies that deployments and stateful sets have valid pod disruption budgets\n\nThe function iterates through all deployments and stateful sets in the test\nenvironment, checking for a matching PodDisruptionBudget by label selector.\nIt validates each found PDB against the replica count of its controller using\nan external checker. Results are recorded as compliant or non‑compliant\nreport objects, which are then set on the check result.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/observability/suite.go:239", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/labels", + "name": "Set", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/apis/meta/v1", + "name": "LabelSelectorAsSelector", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "Matches", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability/pdb", + "name": "CheckPDBIsValid", + "kind": "function", + "source": [ + "func CheckPDBIsValid(pdb *policyv1.PodDisruptionBudget, replicas *int32) (bool, error) {", + "\tvar replicaCount int32", + "\tif replicas != nil {", + "\t\treplicaCount = *replicas", + "\t} else {", + "\t\treplicaCount = 1 // default value", + "\t}", + "", + "\tvar minAvailableValue int", + "\tvar maxUnavailableValue int", + "", + "\tif pdb.Spec.MinAvailable != nil {", + "\t\tvar err error", + "\t\tminAvailableValue, err = intOrStringToValue(pdb.Spec.MinAvailable, replicaCount)", + "\t\tif err != nil {", + "\t\t\treturn false, err", + "\t\t}", + "", + "\t\t// Tests for the minAvailable spec", + "\t\tif minAvailableValue == 0 {", + "\t\t\treturn false, fmt.Errorf(\"field .spec.minAvailable cannot be zero. Currently set to: %d. Replicas set to: %d\", minAvailableValue, replicaCount)", + "\t\t}", + "", + "\t\tif minAvailableValue \u003e int(replicaCount) {", + "\t\t\treturn false, fmt.Errorf(\"minAvailable cannot be greater than replicas. Currently set to: %d. Replicas set to: %d\", minAvailableValue, replicaCount)", + "\t\t}", + "\t}", + "", + "\tif pdb.Spec.MaxUnavailable != nil {", + "\t\tvar err error", + "\t\tmaxUnavailableValue, err = intOrStringToValue(pdb.Spec.MaxUnavailable, replicaCount)", + "\t\tif err != nil {", + "\t\t\treturn false, err", + "\t\t}", + "", + "\t\t// Tests for the maxUnavailable spec", + "\t\tif maxUnavailableValue \u003e= int(replicaCount) {", + "\t\t\treturn false, fmt.Errorf(\"field .spec.maxUnavailable cannot be greater than or equal to the number of pods in the replica. Currently set to: %d. Replicas set to: %d\", maxUnavailableValue, replicaCount)", + "\t\t}", + "\t}", + "", + "\treturn true, nil", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/labels", + "name": "Set", + "kind": "function" + }, + { + "pkgPath": "k8s.io/apimachinery/pkg/apis/meta/v1", + "name": "LabelSelectorAsSelector", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "Matches", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability/pdb", + "name": "CheckPDBIsValid", + "kind": "function", + "source": [ + "func CheckPDBIsValid(pdb *policyv1.PodDisruptionBudget, replicas *int32) (bool, error) {", + "\tvar replicaCount int32", + "\tif replicas != nil {", + "\t\treplicaCount = *replicas", + "\t} else {", + "\t\treplicaCount = 1 // default value", + "\t}", + "", + "\tvar minAvailableValue int", + "\tvar maxUnavailableValue int", + "", + "\tif pdb.Spec.MinAvailable != nil {", + "\t\tvar err error", + "\t\tminAvailableValue, err = intOrStringToValue(pdb.Spec.MinAvailable, replicaCount)", + "\t\tif err != nil {", + "\t\t\treturn false, err", + "\t\t}", + "", + "\t\t// Tests for the minAvailable spec", + "\t\tif minAvailableValue == 0 {", + "\t\t\treturn false, fmt.Errorf(\"field .spec.minAvailable cannot be zero. Currently set to: %d. Replicas set to: %d\", minAvailableValue, replicaCount)", + "\t\t}", + "", + "\t\tif minAvailableValue \u003e int(replicaCount) {", + "\t\t\treturn false, fmt.Errorf(\"minAvailable cannot be greater than replicas. Currently set to: %d. Replicas set to: %d\", minAvailableValue, replicaCount)", + "\t\t}", + "\t}", + "", + "\tif pdb.Spec.MaxUnavailable != nil {", + "\t\tvar err error", + "\t\tmaxUnavailableValue, err = intOrStringToValue(pdb.Spec.MaxUnavailable, replicaCount)", + "\t\tif err != nil {", + "\t\t\treturn false, err", + "\t\t}", + "", + "\t\t// Tests for the maxUnavailable spec", + "\t\tif maxUnavailableValue \u003e= int(replicaCount) {", + "\t\t\treturn false, fmt.Errorf(\"field .spec.maxUnavailable cannot be greater than or equal to the number of pods in the replica. Currently set to: %d. Replicas set to: %d\", maxUnavailableValue, replicaCount)", + "\t\t}", + "\t}", + "", + "\treturn true, nil", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "ToString", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodDisruptionBudgets(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through all of the of Deployments and StatefulSets and check if the PDBs are valid", + "\tfor _, d := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", d.ToString())", + "\t\tdeploymentSelector := labels.Set(d.Spec.Template.Labels)", + "\t\tpdbFound := false", + "\t\tfor pdbIndex := range env.PodDisruptionBudgets {", + "\t\t\tpdb := \u0026env.PodDisruptionBudgets[pdbIndex]", + "\t\t\tif pdb.Namespace != d.Namespace {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tpdbSelector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)", + "\t\t\tif err != nil {", + "\t\t\t\tcheck.LogError(\"Could not convert the PDB %q label selector to selector, err: %v\", pdbSelector, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif pdbSelector.Matches(deploymentSelector) {", + "\t\t\t\tpdbFound = true", + "\t\t\t\tif ok, err := pdbv1.CheckPDBIsValid(pdb, d.Spec.Replicas); !ok {", + "\t\t\t\t\tcheck.LogError(\"PDB %q is not valid for Deployment %q, err: %v\", pdb.Name, d.Name, err)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"Invalid PodDisruptionBudget config: %v\", err), testhelper.DeploymentType, false).", + "\t\t\t\t\t\tAddField(testhelper.DeploymentName, d.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, d.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"PDB %q is valid for Deployment: %q\", pdb.Name, d.Name)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"Deployment: references PodDisruptionBudget\", testhelper.DeploymentType, true).", + "\t\t\t\t\t\tAddField(testhelper.DeploymentName, d.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, d.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif !pdbFound {", + "\t\t\tcheck.LogError(\"Deployment %q is missing a corresponding PodDisruptionBudget\", d.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Deployment is missing a corresponding PodDisruptionBudget\", testhelper.DeploymentType, false).", + "\t\t\t\tAddField(testhelper.DeploymentName, d.Name).", + "\t\t\t\tAddField(testhelper.Namespace, d.Namespace))", + "\t\t}", + "\t}", + "", + "\tfor _, s := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", s.ToString())", + "\t\tstatefulSetSelector := labels.Set(s.Spec.Template.Labels)", + "\t\tpdbFound := false", + "\t\tfor pdbIndex := range env.PodDisruptionBudgets {", + "\t\t\tpdb := \u0026env.PodDisruptionBudgets[pdbIndex]", + "\t\t\tif pdb.Namespace != s.Namespace {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tpdbSelector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)", + "\t\t\tif err != nil {", + "\t\t\t\tcheck.LogError(\"Could not convert the PDB %q label selector to selector, err: %v\", pdbSelector, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif pdbSelector.Matches(statefulSetSelector) {", + "\t\t\t\tpdbFound = true", + "\t\t\t\tif ok, err := pdbv1.CheckPDBIsValid(pdb, s.Spec.Replicas); !ok {", + "\t\t\t\t\tcheck.LogError(\"PDB %q is not valid for StatefulSet %q, err: %v\", pdb.Name, s.Name, err)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"Invalid PodDisruptionBudget config: %v\", err), testhelper.StatefulSetType, false).", + "\t\t\t\t\t\tAddField(testhelper.StatefulSetName, s.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"PDB %q is valid for StatefulSet: %q\", pdb.Name, s.Name)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"StatefulSet: references PodDisruptionBudget\", testhelper.StatefulSetType, true).", + "\t\t\t\t\t\tAddField(testhelper.StatefulSetName, s.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif !pdbFound {", + "\t\t\tcheck.LogError(\"StatefulSet %q is missing a corresponding PodDisruptionBudget\", s.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"StatefulSet is missing a corresponding PodDisruptionBudget\", testhelper.StatefulSetType, false).", + "\t\t\t\tAddField(testhelper.StatefulSetName, s.Name).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testTerminationMessagePolicy", + "qualifiedName": "testTerminationMessagePolicy", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testTerminationMessagePolicy Verifies container termination message policies\n\nThe function iterates over each container in the test environment, checking\nwhether its TerminationMessagePolicy is set to FallbackToLogsOnError.\nContainers that meet this requirement are recorded as compliant; others are\nmarked non-compliant with an explanatory report object. After processing all\ncontainers, the check results are stored for reporting.", + "position": "/Users/deliedit/dev/certsuite/tests/observability/suite.go:210", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.ObservabilityTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.ObservabilityTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLoggingIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersLogging(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCrdsStatusSubresourceIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCrdsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestCrds(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestTerminationMessagePolicyIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTerminationMessagePolicy(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodDisruptionBudgetIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoDeploymentsUnderTestSkipFn(\u0026env), testhelper.GetNoStatefulSetsUnderTestSkipFn(\u0026env)).", + "\t\tWithSkipModeAll().", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodDisruptionBudgets(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestAPICompatibilityWithNextOCPReleaseIdentifier)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestAPICompatibilityWithNextOCPRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testTerminationMessagePolicy(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif cut.TerminationMessagePolicy != corev1.TerminationMessageFallbackToLogsOnError {", + "\t\t\tcheck.LogError(\"Container %q does not have a TerminationMessagePolicy: FallbackToLogsOnError (has %s)\", cut, cut.TerminationMessagePolicy)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"TerminationMessagePolicy is not FallbackToLogsOnError\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has a TerminationMessagePolicy: FallbackToLogsOnError\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"TerminationMessagePolicy is FallbackToLogsOnError\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "globals": [ + { + "name": "beforeEachFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/observability/suite.go:45" + }, + { + "name": "env", + "exported": false, + "type": "provider.TestEnvironment", + "position": "/Users/deliedit/dev/certsuite/tests/observability/suite.go:43" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability/pdb", + "name": "pdb", + "files": 1, + "imports": [ + "fmt", + "k8s.io/api/policy/v1", + "k8s.io/apimachinery/pkg/util/intstr", + "math" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "CheckPDBIsValid", + "qualifiedName": "CheckPDBIsValid", + "exported": true, + "signature": "func(*policyv1.PodDisruptionBudget, *int32)(bool, error)", + "doc": "CheckPDBIsValid Validates a PodDisruptionBudget against replica count\n\nThe function checks the .spec.minAvailable and .spec.maxUnavailable fields of\na PodDisruptionBudget, converting them to integer values based on the\nprovided replica count or a default of one. It ensures minAvailable is\nnon‑zero and does not exceed replicas, and that maxUnavailable is less than\nthe number of pods. If any rule fails, it returns false with an explanatory\nerror; otherwise it returns true.", + "position": "/Users/deliedit/dev/certsuite/tests/observability/pdb/pdb.go:39", + "calls": [ + { + "name": "intOrStringToValue", + "kind": "function", + "source": [ + "func intOrStringToValue(intOrStr *intstr.IntOrString, replicas int32) (int, error) {", + "\tswitch intOrStr.Type {", + "\tcase intstr.Int:", + "\t\treturn intOrStr.IntValue(), nil", + "\tcase intstr.String:", + "\t\tv, err := percentageToFloat(intOrStr.StrVal)", + "\t\tif err != nil {", + "\t\t\treturn 0, fmt.Errorf(\"invalid value %q: %v\", intOrStr.StrVal, err)", + "\t\t}", + "\t\treturn int(math.RoundToEven(v * float64(replicas))), nil", + "\t}", + "\treturn 0, fmt.Errorf(\"invalid type: neither int nor percentage\")", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "int", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "intOrStringToValue", + "kind": "function", + "source": [ + "func intOrStringToValue(intOrStr *intstr.IntOrString, replicas int32) (int, error) {", + "\tswitch intOrStr.Type {", + "\tcase intstr.Int:", + "\t\treturn intOrStr.IntValue(), nil", + "\tcase intstr.String:", + "\t\tv, err := percentageToFloat(intOrStr.StrVal)", + "\t\tif err != nil {", + "\t\t\treturn 0, fmt.Errorf(\"invalid value %q: %v\", intOrStr.StrVal, err)", + "\t\t}", + "\t\treturn int(math.RoundToEven(v * float64(replicas))), nil", + "\t}", + "\treturn 0, fmt.Errorf(\"invalid type: neither int nor percentage\")", + "}" + ] + }, + { + "name": "int", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability", + "name": "testPodDisruptionBudgets", + "kind": "function", + "source": [ + "func testPodDisruptionBudgets(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through all of the of Deployments and StatefulSets and check if the PDBs are valid", + "\tfor _, d := range env.Deployments {", + "\t\tcheck.LogInfo(\"Testing Deployment %q\", d.ToString())", + "\t\tdeploymentSelector := labels.Set(d.Spec.Template.Labels)", + "\t\tpdbFound := false", + "\t\tfor pdbIndex := range env.PodDisruptionBudgets {", + "\t\t\tpdb := \u0026env.PodDisruptionBudgets[pdbIndex]", + "\t\t\tif pdb.Namespace != d.Namespace {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tpdbSelector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)", + "\t\t\tif err != nil {", + "\t\t\t\tcheck.LogError(\"Could not convert the PDB %q label selector to selector, err: %v\", pdbSelector, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif pdbSelector.Matches(deploymentSelector) {", + "\t\t\t\tpdbFound = true", + "\t\t\t\tif ok, err := pdbv1.CheckPDBIsValid(pdb, d.Spec.Replicas); !ok {", + "\t\t\t\t\tcheck.LogError(\"PDB %q is not valid for Deployment %q, err: %v\", pdb.Name, d.Name, err)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"Invalid PodDisruptionBudget config: %v\", err), testhelper.DeploymentType, false).", + "\t\t\t\t\t\tAddField(testhelper.DeploymentName, d.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, d.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"PDB %q is valid for Deployment: %q\", pdb.Name, d.Name)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"Deployment: references PodDisruptionBudget\", testhelper.DeploymentType, true).", + "\t\t\t\t\t\tAddField(testhelper.DeploymentName, d.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, d.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif !pdbFound {", + "\t\t\tcheck.LogError(\"Deployment %q is missing a corresponding PodDisruptionBudget\", d.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"Deployment is missing a corresponding PodDisruptionBudget\", testhelper.DeploymentType, false).", + "\t\t\t\tAddField(testhelper.DeploymentName, d.Name).", + "\t\t\t\tAddField(testhelper.Namespace, d.Namespace))", + "\t\t}", + "\t}", + "", + "\tfor _, s := range env.StatefulSets {", + "\t\tcheck.LogInfo(\"Testing StatefulSet %q\", s.ToString())", + "\t\tstatefulSetSelector := labels.Set(s.Spec.Template.Labels)", + "\t\tpdbFound := false", + "\t\tfor pdbIndex := range env.PodDisruptionBudgets {", + "\t\t\tpdb := \u0026env.PodDisruptionBudgets[pdbIndex]", + "\t\t\tif pdb.Namespace != s.Namespace {", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tpdbSelector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)", + "\t\t\tif err != nil {", + "\t\t\t\tcheck.LogError(\"Could not convert the PDB %q label selector to selector, err: %v\", pdbSelector, err)", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif pdbSelector.Matches(statefulSetSelector) {", + "\t\t\t\tpdbFound = true", + "\t\t\t\tif ok, err := pdbv1.CheckPDBIsValid(pdb, s.Spec.Replicas); !ok {", + "\t\t\t\t\tcheck.LogError(\"PDB %q is not valid for StatefulSet %q, err: %v\", pdb.Name, s.Name, err)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"Invalid PodDisruptionBudget config: %v\", err), testhelper.StatefulSetType, false).", + "\t\t\t\t\t\tAddField(testhelper.StatefulSetName, s.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"PDB %q is valid for StatefulSet: %q\", pdb.Name, s.Name)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(\"StatefulSet: references PodDisruptionBudget\", testhelper.StatefulSetType, true).", + "\t\t\t\t\t\tAddField(testhelper.StatefulSetName, s.Name).", + "\t\t\t\t\t\tAddField(testhelper.Namespace, s.Namespace).", + "\t\t\t\t\t\tAddField(testhelper.PodDisruptionBudgetReference, pdb.Name))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif !pdbFound {", + "\t\t\tcheck.LogError(\"StatefulSet %q is missing a corresponding PodDisruptionBudget\", s.ToString())", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(\"StatefulSet is missing a corresponding PodDisruptionBudget\", testhelper.StatefulSetType, false).", + "\t\t\t\tAddField(testhelper.StatefulSetName, s.Name).", + "\t\t\t\tAddField(testhelper.Namespace, s.Namespace))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CheckPDBIsValid(pdb *policyv1.PodDisruptionBudget, replicas *int32) (bool, error) {", + "\tvar replicaCount int32", + "\tif replicas != nil {", + "\t\treplicaCount = *replicas", + "\t} else {", + "\t\treplicaCount = 1 // default value", + "\t}", + "", + "\tvar minAvailableValue int", + "\tvar maxUnavailableValue int", + "", + "\tif pdb.Spec.MinAvailable != nil {", + "\t\tvar err error", + "\t\tminAvailableValue, err = intOrStringToValue(pdb.Spec.MinAvailable, replicaCount)", + "\t\tif err != nil {", + "\t\t\treturn false, err", + "\t\t}", + "", + "\t\t// Tests for the minAvailable spec", + "\t\tif minAvailableValue == 0 {", + "\t\t\treturn false, fmt.Errorf(\"field .spec.minAvailable cannot be zero. Currently set to: %d. Replicas set to: %d\", minAvailableValue, replicaCount)", + "\t\t}", + "", + "\t\tif minAvailableValue \u003e int(replicaCount) {", + "\t\t\treturn false, fmt.Errorf(\"minAvailable cannot be greater than replicas. Currently set to: %d. Replicas set to: %d\", minAvailableValue, replicaCount)", + "\t\t}", + "\t}", + "", + "\tif pdb.Spec.MaxUnavailable != nil {", + "\t\tvar err error", + "\t\tmaxUnavailableValue, err = intOrStringToValue(pdb.Spec.MaxUnavailable, replicaCount)", + "\t\tif err != nil {", + "\t\t\treturn false, err", + "\t\t}", + "", + "\t\t// Tests for the maxUnavailable spec", + "\t\tif maxUnavailableValue \u003e= int(replicaCount) {", + "\t\t\treturn false, fmt.Errorf(\"field .spec.maxUnavailable cannot be greater than or equal to the number of pods in the replica. Currently set to: %d. Replicas set to: %d\", maxUnavailableValue, replicaCount)", + "\t\t}", + "\t}", + "", + "\treturn true, nil", + "}" + ] + }, + { + "name": "intOrStringToValue", + "qualifiedName": "intOrStringToValue", + "exported": false, + "signature": "func(*intstr.IntOrString, int32)(int, error)", + "doc": "intOrStringToValue Converts an IntOrString to a concrete integer based on replica count\n\nThe function examines the type of the input value; if it is an integer, that\nvalue is returned directly. If it is a string representing a percentage, the\npercentage is parsed and multiplied by the number of replicas, rounding to\nthe nearest even integer. Errors are produced for unsupported types or\ninvalid percentage strings.", + "position": "/Users/deliedit/dev/certsuite/tests/observability/pdb/pdb.go:90", + "calls": [ + { + "name": "IntValue", + "kind": "function" + }, + { + "name": "percentageToFloat", + "kind": "function", + "source": [ + "func percentageToFloat(percentage string) (float64, error) {", + "\tvar percentageFloat float64", + "\t_, err := fmt.Sscanf(percentage, \"%f%%\", \u0026percentageFloat)", + "\tif err != nil {", + "\t\treturn 0, err", + "\t}", + "\treturn percentageFloat / percentageDivisor, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "int", + "kind": "function" + }, + { + "pkgPath": "math", + "name": "RoundToEven", + "kind": "function" + }, + { + "name": "float64", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability/pdb", + "name": "CheckPDBIsValid", + "kind": "function", + "source": [ + "func CheckPDBIsValid(pdb *policyv1.PodDisruptionBudget, replicas *int32) (bool, error) {", + "\tvar replicaCount int32", + "\tif replicas != nil {", + "\t\treplicaCount = *replicas", + "\t} else {", + "\t\treplicaCount = 1 // default value", + "\t}", + "", + "\tvar minAvailableValue int", + "\tvar maxUnavailableValue int", + "", + "\tif pdb.Spec.MinAvailable != nil {", + "\t\tvar err error", + "\t\tminAvailableValue, err = intOrStringToValue(pdb.Spec.MinAvailable, replicaCount)", + "\t\tif err != nil {", + "\t\t\treturn false, err", + "\t\t}", + "", + "\t\t// Tests for the minAvailable spec", + "\t\tif minAvailableValue == 0 {", + "\t\t\treturn false, fmt.Errorf(\"field .spec.minAvailable cannot be zero. Currently set to: %d. Replicas set to: %d\", minAvailableValue, replicaCount)", + "\t\t}", + "", + "\t\tif minAvailableValue \u003e int(replicaCount) {", + "\t\t\treturn false, fmt.Errorf(\"minAvailable cannot be greater than replicas. Currently set to: %d. Replicas set to: %d\", minAvailableValue, replicaCount)", + "\t\t}", + "\t}", + "", + "\tif pdb.Spec.MaxUnavailable != nil {", + "\t\tvar err error", + "\t\tmaxUnavailableValue, err = intOrStringToValue(pdb.Spec.MaxUnavailable, replicaCount)", + "\t\tif err != nil {", + "\t\t\treturn false, err", + "\t\t}", + "", + "\t\t// Tests for the maxUnavailable spec", + "\t\tif maxUnavailableValue \u003e= int(replicaCount) {", + "\t\t\treturn false, fmt.Errorf(\"field .spec.maxUnavailable cannot be greater than or equal to the number of pods in the replica. Currently set to: %d. Replicas set to: %d\", maxUnavailableValue, replicaCount)", + "\t\t}", + "\t}", + "", + "\treturn true, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func intOrStringToValue(intOrStr *intstr.IntOrString, replicas int32) (int, error) {", + "\tswitch intOrStr.Type {", + "\tcase intstr.Int:", + "\t\treturn intOrStr.IntValue(), nil", + "\tcase intstr.String:", + "\t\tv, err := percentageToFloat(intOrStr.StrVal)", + "\t\tif err != nil {", + "\t\t\treturn 0, fmt.Errorf(\"invalid value %q: %v\", intOrStr.StrVal, err)", + "\t\t}", + "\t\treturn int(math.RoundToEven(v * float64(replicas))), nil", + "\t}", + "\treturn 0, fmt.Errorf(\"invalid type: neither int nor percentage\")", + "}" + ] + }, + { + "name": "percentageToFloat", + "qualifiedName": "percentageToFloat", + "exported": false, + "signature": "func(string)(float64, error)", + "doc": "percentageToFloat Parses a percentage string into a decimal value\n\nThe function reads a string that represents a and extracts the numeric part\nusing formatted scanning. It then converts this number to a float64 and\ndivides by a divisor to express it as a proportion, such as 0.25 for\ntwenty‑five percent. If the input is not in the expected format, an error\nis returned.", + "position": "/Users/deliedit/dev/certsuite/tests/observability/pdb/pdb.go:22", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sscanf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability/pdb", + "name": "intOrStringToValue", + "kind": "function", + "source": [ + "func intOrStringToValue(intOrStr *intstr.IntOrString, replicas int32) (int, error) {", + "\tswitch intOrStr.Type {", + "\tcase intstr.Int:", + "\t\treturn intOrStr.IntValue(), nil", + "\tcase intstr.String:", + "\t\tv, err := percentageToFloat(intOrStr.StrVal)", + "\t\tif err != nil {", + "\t\t\treturn 0, fmt.Errorf(\"invalid value %q: %v\", intOrStr.StrVal, err)", + "\t\t}", + "\t\treturn int(math.RoundToEven(v * float64(replicas))), nil", + "\t}", + "\treturn 0, fmt.Errorf(\"invalid type: neither int nor percentage\")", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func percentageToFloat(percentage string) (float64, error) {", + "\tvar percentageFloat float64", + "\t_, err := fmt.Sscanf(percentage, \"%f%%\", \u0026percentageFloat)", + "\tif err != nil {", + "\t\treturn 0, err", + "\t}", + "\treturn percentageFloat / percentageDivisor, nil", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "percentageDivisor", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/observability/pdb/pdb.go:12" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "operator", + "files": 3, + "imports": [ + "fmt", + "github.com/Masterminds/semver/v3", + "github.com/operator-framework/api/pkg/operators/v1alpha1", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/access", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/catalogsource", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/openapi", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/phasecheck", + "strconv", + "strings" + ], + "structs": [ + { + "name": "CsvResult", + "exported": true, + "doc": "CsvResult contains the parsed CSV components\n\nThis structure holds the two parts produced by splitting a comma-separated\nstring: one part is stored as NameCsv and the other, if prefixed with \"ns=\",\nis stored as Namespace. It is used to return values from the SplitCsv\nfunction.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/helper.go:40", + "fields": { + "NameCsv": "string", + "Namespace": "string" + }, + "methodNames": null, + "source": [ + "type CsvResult struct {", + "\tNameCsv string", + "\tNamespace string", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "LoadChecks", + "qualifiedName": "LoadChecks", + "exported": true, + "signature": "func()()", + "doc": "LoadChecks Registers operator test checks with the internal database\n\nThis routine creates a new check group for operator tests, then adds a series\nof predefined checks to that group. Each check is configured with optional\nskip logic and a function that performs the actual validation against the\ncurrent test environment. The group is logged when created and all added\nchecks are registered in the shared checks database so they can be executed\nduring a certification run.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:59", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "WithBeforeEachFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewChecksGroup", + "kind": "function", + "source": [ + "func NewChecksGroup(groupName string) *ChecksGroup {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\tif dbByGroup == nil {", + "\t\tdbByGroup = map[string]*ChecksGroup{}", + "\t}", + "", + "\tgroup, exists := dbByGroup[groupName]", + "\tif exists {", + "\t\treturn group", + "\t}", + "", + "\tgroup = \u0026ChecksGroup{", + "\t\tname: groupName,", + "\t\tchecks: []*Check{},", + "\t\tcurrentRunningCheckIdx: checkIdxNone,", + "\t}", + "\tdbByGroup[groupName] = group", + "", + "\treturn group", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Operators) == 0 {", + "\t\t\treturn true, \"no operators found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOperatorInstallationPhaseSucceeded", + "kind": "function", + "source": [ + "func testOperatorInstallationPhaseSucceeded(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, op := range env.Operators {", + "\t\tcheck.LogInfo(\"Testing Operator %q\", op)", + "\t\tif phasecheck.WaitOperatorReady(op.Csv) {", + "\t\t\tcheck.LogInfo(\"Operator %q is in Succeeded phase\", op)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name,", + "\t\t\t\t\"Operator on Succeeded state \", true).AddField(testhelper.OperatorPhase, string(op.Csv.Status.Phase)))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Operator %q is not in Succeeded phase (phase=%q)\", op, op.Csv.Status.Phase)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name,", + "\t\t\t\t\"Operator not in Succeeded state \", false).AddField(testhelper.OperatorPhase, string(op.Csv.Status.Phase)))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Operators) == 0 {", + "\t\t\treturn true, \"no operators found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOperatorInstallationAccessToSCC", + "kind": "function", + "source": [ + "func testOperatorInstallationAccessToSCC(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\tcsv := operator.Csv", + "\t\tcheck.LogDebug(\"Checking operator %s\", operator)", + "\t\tclusterPermissions := csv.Spec.InstallStrategy.StrategySpec.ClusterPermissions", + "\t\tif len(clusterPermissions) == 0 {", + "\t\t\tcheck.LogInfo(\"No clusterPermissions found in %s's CSV\", operator)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name,", + "\t\t\t\t\"No RBAC rules for Security Context Constraints found in CSV (no clusterPermissions found)\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Fails in case any cluster permission has a rule that refers to securitycontextconstraints.", + "\t\tif access.PermissionsHaveBadRule(clusterPermissions) {", + "\t\t\tcheck.LogInfo(\"Operator %s has a rule for a service account to access cluster SCCs\",", + "\t\t\t\toperator)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"One or more RBAC rules for Security Context Constraints found in CSV\", false))", + "\t\t} else {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"No RBAC rules for Security Context Constraints found in CSV\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Operators) == 0 {", + "\t\t\treturn true, \"no operators found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOperatorOlmSubscription", + "kind": "function", + "source": [ + "func testOperatorOlmSubscription(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\tcheck.LogInfo(\"Testing Operator %q\", operator)", + "\t\tif operator.SubscriptionName == \"\" {", + "\t\t\tcheck.LogError(\"OLM subscription not found for Operator %q\", operator)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, \"OLM subscription not found for operator, so it is not installed via OLM\", false).", + "\t\t\t\tAddField(testhelper.SubscriptionName, operator.SubscriptionName))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"OLM subscription %q found for Operator %q\", operator.SubscriptionName, operator)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, \"install-status-no-privilege (subscription found)\", true).", + "\t\t\t\tAddField(testhelper.SubscriptionName, operator.SubscriptionName))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Operators) == 0 {", + "\t\t\treturn true, \"no operators found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOperatorSemanticVersioning", + "kind": "function", + "source": [ + "func testOperatorSemanticVersioning(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOperatorSemanticVersioning\")", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, operator := range env.Operators {", + "\t\toperatorVersion := operator.Version", + "\t\tcheck.LogInfo(\"Testing Operator %q for version %s\", operator, operatorVersion)", + "", + "\t\tif versions.IsValidSemanticVersion(operatorVersion) {", + "\t\t\tcheck.LogInfo(\"Operator %q has a valid semantic version %s\", operator, operatorVersion)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name,", + "\t\t\t\t\"Operator has a valid semantic version \", true).AddField(testhelper.Version, operatorVersion))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Operator %q has an invalid semantic version %s\", operator, operatorVersion)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name,", + "\t\t\t\t\"Operator has an invalid semantic version \", false).AddField(testhelper.Version, operatorVersion))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorCrdsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorCrdsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Crds) == 0 {", + "\t\t\treturn true, \"no operator crds found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOperatorCrdVersioning", + "kind": "function", + "source": [ + "func testOperatorCrdVersioning(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOperatorCrdVersioning\")", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, crd := range env.Crds {", + "\t\tdoesUseK8sVersioning := true", + "\t\tnonCompliantVersion := \"\"", + "", + "\t\tfor _, crdVersion := range crd.Spec.Versions {", + "\t\t\tversionName := crdVersion.Name", + "\t\t\tcheck.LogDebug(\"Checking for Operator CRD %s with version %s\", crd.Name, versionName)", + "", + "\t\t\tif !versions.IsValidK8sVersion(versionName) {", + "\t\t\t\tdoesUseK8sVersioning = false", + "\t\t\t\tnonCompliantVersion = versionName", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif doesUseK8sVersioning {", + "\t\t\tcheck.LogInfo(\"Operator CRD %s has valid K8s versioning \", crd.Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD has valid K8s versioning \", true).AddField(testhelper.CrdVersion, crd.Name))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Operator CRD %s has invalid K8s versioning %s \", crd.Name, nonCompliantVersion)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD has invalid K8s versioning \", false).AddField(testhelper.CrdVersion, crd.Name))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorCrdsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorCrdsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Crds) == 0 {", + "\t\t\treturn true, \"no operator crds found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOperatorCrdOpenAPISpec", + "kind": "function", + "source": [ + "func testOperatorCrdOpenAPISpec(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOperatorCrdOpenAPISpec\")", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, crd := range env.Crds {", + "\t\tif openapi.IsCRDDefinedWithOpenAPI3Schema(crd) {", + "\t\t\tcheck.LogInfo(\"Operator CRD %s is defined with OpenAPIV3 schema \", crd.Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD is defined with OpenAPIV3 schema \", true).AddField(testhelper.OpenAPIV3Schema, crd.Name))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Operator CRD %s is not defined with OpenAPIV3 schema \", crd.Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD is not defined with OpenAPIV3 schema \", false).AddField(testhelper.OpenAPIV3Schema, crd.Name))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Operators) == 0 {", + "\t\t\treturn true, \"no operators found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOperatorSingleCrdOwner", + "kind": "function", + "source": [ + "func testOperatorSingleCrdOwner(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Map each CRD to a list of operators that own it", + "\tcrdOwners := map[string][]string{}", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\townedCrds := operator.Csv.Spec.CustomResourceDefinitions.Owned", + "", + "\t\t// Helper map to filter out different versions of the same CRD name.", + "\t\tuniqueOwnedCrds := map[string]struct{}{}", + "\t\tfor j := range ownedCrds {", + "\t\t\tuniqueOwnedCrds[ownedCrds[j].Name] = struct{}{}", + "\t\t}", + "", + "\t\t// Now we can append the operator as CRD owner", + "\t\tfor crdName := range uniqueOwnedCrds {", + "\t\t\tcrdOwners[crdName] = append(crdOwners[crdName], operator.Name)", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"CRDs owned by operator %s: %+v\", operator.Name, uniqueOwnedCrds)", + "\t}", + "", + "\t// Flag those that are owned by more than one operator", + "\tfor crd, opList := range crdOwners {", + "\t\tif len(opList) \u003e 1 {", + "\t\t\tcheck.LogError(\"CRD %q is owned by more than one operator (owners: %v)\", crd, opList)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewCrdReportObject(crd, \"\", \"CRD is owned by more than one operator\", false).", + "\t\t\t\t\tAddField(testhelper.OperatorList, strings.Join(opList, \", \")))", + "\t\t} else {", + "\t\t\tcheck.LogDebug(\"CRD %q is owned by a single operator (%v)\", crd, opList[0])", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewCrdReportObject(crd, \"\", \"CRD is owned by a single operator\", true).", + "\t\t\t\t\tAddField(testhelper.OperatorName, opList[0]))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Operators) == 0 {", + "\t\t\treturn true, \"no operators found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorPodsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.CSVToPodListMap) == 0 {", + "\t\t\treturn true, \"no operator pods found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOperatorPodsNoHugepages", + "kind": "function", + "source": [ + "func testOperatorPodsNoHugepages(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor csv, pods := range env.CSVToPodListMap {", + "\t\tCsvResult := SplitCsv(csv)", + "\t\tcheck.LogInfo(\"Name of csv: %q in namespaces: %q\", CsvResult.NameCsv, CsvResult.Namespace)", + "\t\tfor _, pod := range pods {", + "\t\t\tcheck.LogInfo(\"Testing Pod %q in namespace %q\", pod.Name, pod.Namespace)", + "\t\t\tif pod.HasHugepages() {", + "\t\t\t\tcheck.LogError(\"Pod %q in namespace %q has hugepages\", pod.Name, pod.Namespace)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has hugepages\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Pod %q in namespace %q has no hugepages\", pod.Name, pod.Namespace)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has no hugepages\", true))", + "\t\t\t}", + "\t\t}", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Operators) == 0 {", + "\t\t\treturn true, \"no operators found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOperatorOlmSkipRange", + "kind": "function", + "source": [ + "func testOperatorOlmSkipRange(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\tcheck.LogInfo(\"Testing Operator %q\", operator)", + "", + "\t\tif operator.Csv.Annotations[\"olm.skipRange\"] == \"\" {", + "\t\t\tcheck.LogError(\"OLM skipRange not found for Operator %q\", operator)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, \"OLM skipRange not found for operator\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"OLM skipRange %q found for Operator %q\", operator.Csv.Annotations[\"olm.skipRange\"], operator)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, \"OLM skipRange found for operator\", true).", + "\t\t\t\tAddField(\"olm.SkipRange\", operator.Csv.Annotations[\"olm.skipRange\"]))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Operators) == 0 {", + "\t\t\treturn true, \"no operators found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testMultipleSameOperators", + "kind": "function", + "source": [ + "func testMultipleSameOperators(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Ensure the CSV name is unique and not installed more than once.", + "\t// CSV Names are unique and OLM installs them with name.version format.", + "\t// So, we can check if the CSV name is installed more than once.", + "", + "\tcheck.LogInfo(\"Checking if the operator is installed more than once\")", + "", + "\tfor _, op := range env.AllOperators {", + "\t\tcheck.LogDebug(\"Checking operator %q\", op.Name)", + "\t\tcheck.LogDebug(\"Number of operators to check %s against: %d\", op.Name, len(env.AllOperators))", + "\t\tfor _, op2 := range env.AllOperators {", + "\t\t\t// Check if the operator is installed more than once.", + "\t\t\tif OperatorInstalledMoreThanOnce(op, op2) {", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(", + "\t\t\t\t\top.Namespace, op.Name, \"Operator is installed more than once\", false))", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(", + "\t\t\top.Namespace, op.Name, \"Operator is installed only once\", true))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoCatalogSourcesSkipFn", + "kind": "function", + "source": [ + "func GetNoCatalogSourcesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.AllCatalogSources) == 0 {", + "\t\t\treturn true, \"no catalog sources found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOperatorCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func testOperatorCatalogSourceBundleCount(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tconst (", + "\t\tbundleCountLimit = 1000", + "", + "\t\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count.", + "\t\t// This means we cannot use the package manifests to skip based on channel.", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\tocp412Skip := false", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\tcheck.LogError(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003c= 4.12.\")", + "\t\t\tocp412Skip = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003e 4.12.\")", + "\t\t}", + "\t}", + "", + "\tbundleCountLimitStr := strconv.Itoa(bundleCountLimit)", + "", + "\t// Loop through all labeled operators and check if they have more than 1000 referenced images.", + "\tvar catalogsAlreadyReported []string", + "\tfor _, op := range env.Operators {", + "\t\tcatalogSourceCheckComplete := false", + "\t\tcheck.LogInfo(\"Checking bundle count for operator %q\", op.Csv.Name)", + "", + "\t\t// Search through packagemanifests to match the name of the CSV.", + "\t\tfor _, pm := range env.AllPackageManifests {", + "\t\t\t// Skip package manifests based on channel entries.", + "\t\t\t// Note: This only works for OCP versions \u003e 4.12 due to channel entries existence.", + "\t\t\tif !ocp412Skip \u0026\u0026 catalogsource.SkipPMBasedOnChannel(pm.Status.Channels, op.Csv.Name) {", + "\t\t\t\tlog.Debug(\"Skipping package manifest %q based on channel\", pm.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Search through all catalog sources to match the name and namespace of the package manifest.", + "\t\t\tfor _, catalogSource := range env.AllCatalogSources {", + "\t\t\t\tif catalogSource.Name != pm.Status.CatalogSource || catalogSource.Namespace != pm.Status.CatalogSourceNamespace {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on name or namespace\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the catalog source has already been reported, skip it.", + "\t\t\t\tif stringhelper.StringInSlice(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace, false) {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on already reported\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Found matching catalog source %q for operator %q\", catalogSource.Name, op.Csv.Name)", + "", + "\t\t\t\t// The name and namespace match. Lookup the bundle count.", + "\t\t\t\tbundleCount := provider.GetCatalogSourceBundleCount(env, catalogSource)", + "", + "\t\t\t\tif bundleCount == -1 {", + "\t\t\t\t\tcheck.LogError(\"Failed to get bundle count for CatalogSource %q\", catalogSource.Name)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"Failed to get bundle count\", false))", + "\t\t\t\t} else {", + "\t\t\t\t\tif bundleCount \u003e bundleCountLimit {", + "\t\t\t\t\t\tcheck.LogError(\"CatalogSource %q has more than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has more than \"+bundleCountLimitStr+\" referenced images\", false))", + "\t\t\t\t\t} else {", + "\t\t\t\t\t\tcheck.LogInfo(\"CatalogSource %q has less than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has less than \"+bundleCountLimitStr+\" referenced images\", true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Adding catalog source %q to list of already reported\", catalogSource.Name)", + "\t\t\t\tcatalogsAlreadyReported = append(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace)", + "\t\t\t\t// Signal that the catalog source check is complete.", + "\t\t\t\tcatalogSourceCheckComplete = true", + "\t\t\t\tbreak", + "\t\t\t}", + "", + "\t\t\t// If the catalog source check is complete, break out of the loop.", + "\t\t\tif catalogSourceCheckComplete {", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Operators) == 0 {", + "\t\t\treturn true, \"no operators found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces", + "kind": "function", + "source": [ + "func testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOnlySingleNamespacedOperatorsAllowedInTenantNamespaces\")", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\toperatorNamespaces := make(map[string]bool)", + "\tfor _, operator := range env.Operators {", + "\t\toperatorNamespace := operator.Csv.Annotations[\"olm.operatorNamespace\"]", + "\t\tfor _, namespace := range env.Namespaces {", + "\t\t\tif namespace == operatorNamespace {", + "\t\t\t\toperatorNamespaces[operatorNamespace] = true", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tfor operatorNamespace := range operatorNamespaces { // operator installed namespace", + "\t\tcheck.LogInfo(\"Checking if namespace %s contains only valid single/ multi namespaced operators\", operatorNamespace)", + "", + "\t\tisDedicatedOperatorNamespace, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators,", + "\t\t\tcsvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, err := checkValidOperatorInstallation(operatorNamespace)", + "", + "\t\tcheck.LogInfo(\"isDedicatedOperatorNamespace=%t, singleOrMultiNamespaceOperators=%s, nonSingleOrMultiNamespaceOperators=%s, csvsTargetingNamespace=%s, operatorsFoundButNotUnderTest=%s, podsNotBelongingToOperators=%s\", isDedicatedOperatorNamespace, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators) //nolint:lll", + "", + "\t\tif err != nil {", + "\t\t\tmsg := fmt.Sprintf(\"Operator namespace %s check got error %v\", operatorNamespace, err)", + "\t\t\tcheck.LogError(\"%s\", msg)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(msg, testhelper.Namespace, false, operatorNamespace))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isDedicatedOperatorNamespace {", + "\t\t\tvar msg string", + "\t\t\tif len(singleOrMultiNamespaceOperators) == 0 {", + "\t\t\t\tmsg = \"Namespace contains no installed single/multi namespace operators\"", + "\t\t\t} else {", + "\t\t\t\tmsg = fmt.Sprintf(\"Namespace is dedicated to single/multi namespace operators (%s) \", strings.Join(singleOrMultiNamespaceOperators, \", \"))", + "\t\t\t}", + "", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(msg, testhelper.Namespace, true, operatorNamespace))", + "\t\t} else {", + "\t\t\tmsg := \"Operator namespace is not dedicated to single/multi operators because \"", + "", + "\t\t\tif len(nonSingleOrMultiNamespaceOperators) != 0 {", + "\t\t\t\tmsg += \"- operators are installed with an install mode different from single/multi (\" + strings.Join(nonSingleOrMultiNamespaceOperators, \", \") + \")\\n\"", + "\t\t\t}", + "", + "\t\t\tif len(csvsTargetingNamespace) != 0 {", + "\t\t\t\tmsg += \"- this namespace is the target namespace of other operators (\" + strings.Join(csvsTargetingNamespace, \", \") + \")\\n\"", + "\t\t\t}", + "\t\t\tif len(operatorsFoundButNotUnderTest) != 0 {", + "\t\t\t\tmsg += \"- operators not under test found (\" + strings.Join(operatorsFoundButNotUnderTest, \", \") + \")\\n\"", + "\t\t\t}", + "\t\t\tif len(podsNotBelongingToOperators) != 0 {", + "\t\t\t\tmsg += \"- invalid non operator pods found (\" + strings.Join(podsNotBelongingToOperators, \", \") + \")\"", + "\t\t\t}", + "", + "\t\t\tnonCompliantNs := testhelper.NewNamespacedReportObject(msg, testhelper.Namespace, false, operatorNamespace)", + "", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantNs)", + "\t\t}", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadInternalChecksDB", + "kind": "function", + "source": [ + "func LoadInternalChecksDB() {", + "\taccesscontrol.LoadChecks()", + "\tcertification.LoadChecks()", + "\tlifecycle.LoadChecks()", + "\tmanageability.LoadChecks()", + "\tnetworking.LoadChecks()", + "\tobservability.LoadChecks()", + "\tperformance.LoadChecks()", + "\tplatform.LoadChecks()", + "\toperator.LoadChecks()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "name": "OperatorInstalledMoreThanOnce", + "qualifiedName": "OperatorInstalledMoreThanOnce", + "exported": true, + "signature": "func(*provider.Operator, *provider.Operator)(bool)", + "doc": "OperatorInstalledMoreThanOnce Detects if the same operator appears more than once\n\nThe function compares two operator instances by examining their CSV names and\nversions. It first removes the version suffix from each CSV name, then checks\nthat the base names match while the versions differ. If both conditions hold,\nit reports that the operator is installed multiple times; otherwise it\nreturns false.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/helper.go:76", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "String", + "kind": "function" + }, + { + "name": "String", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "TrimSuffix", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSuffix", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testMultipleSameOperators", + "kind": "function", + "source": [ + "func testMultipleSameOperators(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Ensure the CSV name is unique and not installed more than once.", + "\t// CSV Names are unique and OLM installs them with name.version format.", + "\t// So, we can check if the CSV name is installed more than once.", + "", + "\tcheck.LogInfo(\"Checking if the operator is installed more than once\")", + "", + "\tfor _, op := range env.AllOperators {", + "\t\tcheck.LogDebug(\"Checking operator %q\", op.Name)", + "\t\tcheck.LogDebug(\"Number of operators to check %s against: %d\", op.Name, len(env.AllOperators))", + "\t\tfor _, op2 := range env.AllOperators {", + "\t\t\t// Check if the operator is installed more than once.", + "\t\t\tif OperatorInstalledMoreThanOnce(op, op2) {", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(", + "\t\t\t\t\top.Namespace, op.Name, \"Operator is installed more than once\", false))", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(", + "\t\t\top.Namespace, op.Name, \"Operator is installed only once\", true))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func OperatorInstalledMoreThanOnce(operator1, operator2 *provider.Operator) bool {", + "\t// Safeguard against nil operators (should not happen)", + "\tif operator1 == nil || operator2 == nil {", + "\t\treturn false", + "\t}", + "", + "\tlog.Debug(\"Comparing operator %q with operator %q\", operator1.Name, operator2.Name)", + "", + "\t// Retrieve the version from each CSV", + "\tcsv1Version := operator1.Csv.Spec.Version.String()", + "\tcsv2Version := operator2.Csv.Spec.Version.String()", + "", + "\tlog.Debug(\"CSV1 Version: %s\", csv1Version)", + "\tlog.Debug(\"CSV2 Version: %s\", csv2Version)", + "", + "\t// Strip the version from the CSV name by removing the suffix (which should be the version)", + "\tcsv1Name := strings.TrimSuffix(operator1.Csv.Name, \".v\"+csv1Version)", + "\tcsv2Name := strings.TrimSuffix(operator2.Csv.Name, \".v\"+csv2Version)", + "", + "\tlog.Debug(\"Comparing CSV names %q and %q\", csv1Name, csv2Name)", + "", + "\t// The CSV name should be the same, but the version should be different", + "\t// if the operator is installed more than once.", + "\tif operator1.Csv != nil \u0026\u0026 operator2.Csv != nil \u0026\u0026", + "\t\tcsv1Name == csv2Name \u0026\u0026", + "\t\tcsv1Version != csv2Version {", + "\t\tlog.Error(\"Operator %q is installed more than once\", operator1.Name)", + "\t\treturn true", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "SplitCsv", + "qualifiedName": "SplitCsv", + "exported": true, + "signature": "func(string)(CsvResult)", + "doc": "SplitCsv Separates a CSV string into its name and namespace components\n\nThis function takes a comma‑delimited string, splits it into parts, trims\nwhitespace, and assigns the portion prefixed with \"ns=\" to the Namespace\nfield while the remaining part becomes NameCsv. It returns a CsvResult struct\ncontaining these two fields. If no namespace prefix is present, Namespace\nremains empty.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/helper.go:52", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "HasPrefix", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimPrefix", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorPodsNoHugepages", + "kind": "function", + "source": [ + "func testOperatorPodsNoHugepages(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor csv, pods := range env.CSVToPodListMap {", + "\t\tCsvResult := SplitCsv(csv)", + "\t\tcheck.LogInfo(\"Name of csv: %q in namespaces: %q\", CsvResult.NameCsv, CsvResult.Namespace)", + "\t\tfor _, pod := range pods {", + "\t\t\tcheck.LogInfo(\"Testing Pod %q in namespace %q\", pod.Name, pod.Namespace)", + "\t\t\tif pod.HasHugepages() {", + "\t\t\t\tcheck.LogError(\"Pod %q in namespace %q has hugepages\", pod.Name, pod.Namespace)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has hugepages\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Pod %q in namespace %q has no hugepages\", pod.Name, pod.Namespace)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has no hugepages\", true))", + "\t\t\t}", + "\t\t}", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SplitCsv(csv string) CsvResult {", + "\t// Split by comma to separate components", + "\tparts := strings.Split(csv, \",\")", + "\tvar result CsvResult", + "", + "\tfor _, part := range parts {", + "\t\tpart = strings.TrimSpace(part)", + "", + "\t\tif strings.HasPrefix(part, \"ns=\") {", + "\t\t\tresult.Namespace = strings.TrimPrefix(part, \"ns=\")", + "\t\t} else {", + "\t\t\tresult.NameCsv = part", + "\t\t}", + "\t}", + "\treturn result", + "}" + ] + }, + { + "name": "checkIfCsvUnderTest", + "qualifiedName": "checkIfCsvUnderTest", + "exported": false, + "signature": "func(*v1alpha1.ClusterServiceVersion)(bool)", + "doc": "checkIfCsvUnderTest determines if a CSV is part of the test set\n\nThe function iterates through the global list of operators defined for\ntesting, checking whether any entry’s CSV name matches that of the supplied\nobject. If a match is found it returns true; otherwise false. This boolean\nindicates whether the given CSV should be considered under test in subsequent\nvalidation logic.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/helper.go:169", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "checkValidOperatorInstallation", + "kind": "function", + "source": [ + "func checkValidOperatorInstallation(namespace string) (isDedicatedOperatorNamespace bool, singleOrMultiNamespaceOperators,", + "\tnonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators []string, err error) {", + "\t// 1. operator installation checks", + "\tcsvsInNamespace := getCsvsBy(namespace, env.AllCsvs)", + "", + "\tfor _, csv := range csvsInNamespace {", + "\t\toperatorNamespace := csv.Annotations[\"olm.operatorNamespace\"]", + "\t\ttargetNamespacesStr := csv.Annotations[\"olm.targetNamespaces\"]", + "", + "\t\tvar targetNameSpaces []string", + "\t\tif targetNamespacesStr != \"\" {", + "\t\t\ttargetNameSpaces = strings.Split(targetNamespacesStr, \",\")", + "\t\t}", + "", + "\t\tif namespace == operatorNamespace {", + "\t\t\tif checkIfCsvUnderTest(csv) {", + "\t\t\t\tisSingleOrMultiInstallation := isSingleNamespacedOperator(operatorNamespace, targetNameSpaces) || isMultiNamespacedOperator(operatorNamespace, targetNameSpaces)", + "\t\t\t\tif isSingleOrMultiInstallation {", + "\t\t\t\t\tsingleOrMultiNamespaceOperators = append(singleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t} else {", + "\t\t\t\t\tnonSingleOrMultiNamespaceOperators = append(nonSingleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\toperatorsFoundButNotUnderTest = append(operatorsFoundButNotUnderTest, csv.Name)", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif !isCsvInNamespaceClusterWide(csv.Name, env.AllCsvs) { // check for non-cluster wide operators", + "\t\t\t\tcsvsTargetingNamespace = append(csvsTargetingNamespace, csv.Name)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// 2. non-operator pods check", + "\tpodsNotBelongingToOperators, err = findPodsNotBelongingToOperators(namespace)", + "\tif err != nil {", + "\t\treturn false, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, err", + "\t}", + "", + "\tvar isValid bool", + "\tif len(singleOrMultiNamespaceOperators) \u003e 0 {", + "\t\tisValid = len(nonSingleOrMultiNamespaceOperators) == 0 \u0026\u0026 len(csvsTargetingNamespace) == 0 \u0026\u0026 len(podsNotBelongingToOperators) == 0 \u0026\u0026 len(operatorsFoundButNotUnderTest) == 0", + "\t}", + "", + "\treturn isValid, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func checkIfCsvUnderTest(csv *v1alpha1.ClusterServiceVersion) bool {", + "\tfor _, testOperator := range env.Operators {", + "\t\tif testOperator.Csv.Name == csv.Name {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "checkValidOperatorInstallation", + "qualifiedName": "checkValidOperatorInstallation", + "exported": false, + "signature": "func(string)(bool, []string, error)", + "doc": "checkValidOperatorInstallation Determines if a namespace hosts only valid single or multi‑namespace operators\n\nThe function inspects all ClusterServiceVersions in the specified namespace,\ncategorising them as installed under test, not under test, or targeting other\nnamespaces. It also checks for non‑operator pods that do not belong to any\noperator. The return values indicate whether the namespace is dedicated to\nvalid operators and provide lists of any problematic objects.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/helper.go:206", + "calls": [ + { + "name": "getCsvsBy", + "kind": "function", + "source": [ + "func getCsvsBy(namespace string, allCsvs []*v1alpha1.ClusterServiceVersion) (csvsInNamespace []*v1alpha1.ClusterServiceVersion) {", + "\tfor _, csv := range allCsvs {", + "\t\tif csv.Namespace == namespace {", + "\t\t\tcsvsInNamespace = append(csvsInNamespace, csv)", + "\t\t}", + "\t}", + "\treturn csvsInNamespace", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "checkIfCsvUnderTest", + "kind": "function", + "source": [ + "func checkIfCsvUnderTest(csv *v1alpha1.ClusterServiceVersion) bool {", + "\tfor _, testOperator := range env.Operators {", + "\t\tif testOperator.Csv.Name == csv.Name {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "isSingleNamespacedOperator", + "kind": "function", + "source": [ + "func isSingleNamespacedOperator(operatorNamespace string, targetNamespaces []string) bool {", + "\treturn len(targetNamespaces) == 1 \u0026\u0026 operatorNamespace != targetNamespaces[0]", + "}" + ] + }, + { + "name": "isMultiNamespacedOperator", + "kind": "function", + "source": [ + "func isMultiNamespacedOperator(operatorNamespace string, targetNamespaces []string) bool {", + "\treturn len(targetNamespaces) \u003e 1 \u0026\u0026 !stringhelper.StringInSlice(targetNamespaces, operatorNamespace, false)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "isCsvInNamespaceClusterWide", + "kind": "function", + "source": [ + "func isCsvInNamespaceClusterWide(csvName string, allCsvs []*v1alpha1.ClusterServiceVersion) bool {", + "\tisClusterWide := true", + "\tfor _, eachCsv := range allCsvs {", + "\t\tif eachCsv.Name == csvName {", + "\t\t\ttargetNamespaces, exists := eachCsv.Annotations[\"olm.targetNamespaces\"]", + "\t\t\tif exists \u0026\u0026 targetNamespaces != \"\" {", + "\t\t\t\tisClusterWide = false", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn isClusterWide", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "findPodsNotBelongingToOperators", + "kind": "function", + "source": [ + "func findPodsNotBelongingToOperators(namespace string) (podsBelongingToNoOperators []string, err error) {", + "\tallPods := getAllPodsBy(namespace, env.AllPods)", + "\tfor index := range allPods {", + "\t\tpod := allPods[index]", + "\t\ttopOwners, err := podhelper.GetPodTopOwner(pod.Namespace, pod.OwnerReferences)", + "\t\tif err != nil {", + "\t\t\treturn podsBelongingToNoOperators, err", + "\t\t}", + "", + "\t\tvalidOwnerFound := false", + "\t\tfor _, owner := range topOwners {", + "\t\t\tif owner.Kind == v1alpha1.ClusterServiceVersionKind \u0026\u0026 owner.Namespace == namespace {", + "\t\t\t\tvalidOwnerFound = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !validOwnerFound {", + "\t\t\tpodsBelongingToNoOperators = append(podsBelongingToNoOperators, pod.Name)", + "\t\t}", + "\t}", + "", + "\treturn podsBelongingToNoOperators, nil", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces", + "kind": "function", + "source": [ + "func testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOnlySingleNamespacedOperatorsAllowedInTenantNamespaces\")", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\toperatorNamespaces := make(map[string]bool)", + "\tfor _, operator := range env.Operators {", + "\t\toperatorNamespace := operator.Csv.Annotations[\"olm.operatorNamespace\"]", + "\t\tfor _, namespace := range env.Namespaces {", + "\t\t\tif namespace == operatorNamespace {", + "\t\t\t\toperatorNamespaces[operatorNamespace] = true", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tfor operatorNamespace := range operatorNamespaces { // operator installed namespace", + "\t\tcheck.LogInfo(\"Checking if namespace %s contains only valid single/ multi namespaced operators\", operatorNamespace)", + "", + "\t\tisDedicatedOperatorNamespace, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators,", + "\t\t\tcsvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, err := checkValidOperatorInstallation(operatorNamespace)", + "", + "\t\tcheck.LogInfo(\"isDedicatedOperatorNamespace=%t, singleOrMultiNamespaceOperators=%s, nonSingleOrMultiNamespaceOperators=%s, csvsTargetingNamespace=%s, operatorsFoundButNotUnderTest=%s, podsNotBelongingToOperators=%s\", isDedicatedOperatorNamespace, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators) //nolint:lll", + "", + "\t\tif err != nil {", + "\t\t\tmsg := fmt.Sprintf(\"Operator namespace %s check got error %v\", operatorNamespace, err)", + "\t\t\tcheck.LogError(\"%s\", msg)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(msg, testhelper.Namespace, false, operatorNamespace))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isDedicatedOperatorNamespace {", + "\t\t\tvar msg string", + "\t\t\tif len(singleOrMultiNamespaceOperators) == 0 {", + "\t\t\t\tmsg = \"Namespace contains no installed single/multi namespace operators\"", + "\t\t\t} else {", + "\t\t\t\tmsg = fmt.Sprintf(\"Namespace is dedicated to single/multi namespace operators (%s) \", strings.Join(singleOrMultiNamespaceOperators, \", \"))", + "\t\t\t}", + "", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(msg, testhelper.Namespace, true, operatorNamespace))", + "\t\t} else {", + "\t\t\tmsg := \"Operator namespace is not dedicated to single/multi operators because \"", + "", + "\t\t\tif len(nonSingleOrMultiNamespaceOperators) != 0 {", + "\t\t\t\tmsg += \"- operators are installed with an install mode different from single/multi (\" + strings.Join(nonSingleOrMultiNamespaceOperators, \", \") + \")\\n\"", + "\t\t\t}", + "", + "\t\t\tif len(csvsTargetingNamespace) != 0 {", + "\t\t\t\tmsg += \"- this namespace is the target namespace of other operators (\" + strings.Join(csvsTargetingNamespace, \", \") + \")\\n\"", + "\t\t\t}", + "\t\t\tif len(operatorsFoundButNotUnderTest) != 0 {", + "\t\t\t\tmsg += \"- operators not under test found (\" + strings.Join(operatorsFoundButNotUnderTest, \", \") + \")\\n\"", + "\t\t\t}", + "\t\t\tif len(podsNotBelongingToOperators) != 0 {", + "\t\t\t\tmsg += \"- invalid non operator pods found (\" + strings.Join(podsNotBelongingToOperators, \", \") + \")\"", + "\t\t\t}", + "", + "\t\t\tnonCompliantNs := testhelper.NewNamespacedReportObject(msg, testhelper.Namespace, false, operatorNamespace)", + "", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantNs)", + "\t\t}", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func checkValidOperatorInstallation(namespace string) (isDedicatedOperatorNamespace bool, singleOrMultiNamespaceOperators,", + "\tnonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators []string, err error) {", + "\t// 1. operator installation checks", + "\tcsvsInNamespace := getCsvsBy(namespace, env.AllCsvs)", + "", + "\tfor _, csv := range csvsInNamespace {", + "\t\toperatorNamespace := csv.Annotations[\"olm.operatorNamespace\"]", + "\t\ttargetNamespacesStr := csv.Annotations[\"olm.targetNamespaces\"]", + "", + "\t\tvar targetNameSpaces []string", + "\t\tif targetNamespacesStr != \"\" {", + "\t\t\ttargetNameSpaces = strings.Split(targetNamespacesStr, \",\")", + "\t\t}", + "", + "\t\tif namespace == operatorNamespace {", + "\t\t\tif checkIfCsvUnderTest(csv) {", + "\t\t\t\tisSingleOrMultiInstallation := isSingleNamespacedOperator(operatorNamespace, targetNameSpaces) || isMultiNamespacedOperator(operatorNamespace, targetNameSpaces)", + "\t\t\t\tif isSingleOrMultiInstallation {", + "\t\t\t\t\tsingleOrMultiNamespaceOperators = append(singleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t} else {", + "\t\t\t\t\tnonSingleOrMultiNamespaceOperators = append(nonSingleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\toperatorsFoundButNotUnderTest = append(operatorsFoundButNotUnderTest, csv.Name)", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif !isCsvInNamespaceClusterWide(csv.Name, env.AllCsvs) { // check for non-cluster wide operators", + "\t\t\t\tcsvsTargetingNamespace = append(csvsTargetingNamespace, csv.Name)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// 2. non-operator pods check", + "\tpodsNotBelongingToOperators, err = findPodsNotBelongingToOperators(namespace)", + "\tif err != nil {", + "\t\treturn false, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, err", + "\t}", + "", + "\tvar isValid bool", + "\tif len(singleOrMultiNamespaceOperators) \u003e 0 {", + "\t\tisValid = len(nonSingleOrMultiNamespaceOperators) == 0 \u0026\u0026 len(csvsTargetingNamespace) == 0 \u0026\u0026 len(podsNotBelongingToOperators) == 0 \u0026\u0026 len(operatorsFoundButNotUnderTest) == 0", + "\t}", + "", + "\treturn isValid, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, nil", + "}" + ] + }, + { + "name": "findPodsNotBelongingToOperators", + "qualifiedName": "findPodsNotBelongingToOperators", + "exported": false, + "signature": "func(string)([]string, error)", + "doc": "findPodsNotBelongingToOperators identifies pods that are not managed by any operator in the given namespace\n\nThe function retrieves all pods within a namespace, then for each pod\ndetermines its top-level owners using helper logic. It checks whether any\nowner is a ClusterServiceVersion belonging to the same namespace; if none\nexist, the pod name is added to the result list. The returned slice contains\nnames of pods that are not controlled by an operator, along with an error if\nownership resolution fails.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/helper.go:260", + "calls": [ + { + "name": "getAllPodsBy", + "kind": "function", + "source": [ + "func getAllPodsBy(namespace string, allPods []*provider.Pod) (podsInNamespace []*provider.Pod) {", + "\tfor i := range allPods {", + "\t\tpod := allPods[i]", + "\t\tif pod.Namespace == namespace {", + "\t\t\tpodsInNamespace = append(podsInNamespace, pod)", + "\t\t}", + "\t}", + "\treturn podsInNamespace", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper", + "name": "GetPodTopOwner", + "kind": "function", + "source": [ + "func GetPodTopOwner(podNamespace string, podOwnerReferences []metav1.OwnerReference) (topOwners map[string]TopOwner, err error) {", + "\ttopOwners = make(map[string]TopOwner)", + "\terr = followOwnerReferences(", + "\t\tclientsholder.GetClientsHolder().GroupResources,", + "\t\tclientsholder.GetClientsHolder().DynamicClient,", + "\t\ttopOwners,", + "\t\tpodNamespace,", + "\t\tpodOwnerReferences)", + "\tif err != nil {", + "\t\treturn topOwners, fmt.Errorf(\"could not get top owners, err: %v\", err)", + "\t}", + "\treturn topOwners, nil", + "}" + ] + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "checkValidOperatorInstallation", + "kind": "function", + "source": [ + "func checkValidOperatorInstallation(namespace string) (isDedicatedOperatorNamespace bool, singleOrMultiNamespaceOperators,", + "\tnonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators []string, err error) {", + "\t// 1. operator installation checks", + "\tcsvsInNamespace := getCsvsBy(namespace, env.AllCsvs)", + "", + "\tfor _, csv := range csvsInNamespace {", + "\t\toperatorNamespace := csv.Annotations[\"olm.operatorNamespace\"]", + "\t\ttargetNamespacesStr := csv.Annotations[\"olm.targetNamespaces\"]", + "", + "\t\tvar targetNameSpaces []string", + "\t\tif targetNamespacesStr != \"\" {", + "\t\t\ttargetNameSpaces = strings.Split(targetNamespacesStr, \",\")", + "\t\t}", + "", + "\t\tif namespace == operatorNamespace {", + "\t\t\tif checkIfCsvUnderTest(csv) {", + "\t\t\t\tisSingleOrMultiInstallation := isSingleNamespacedOperator(operatorNamespace, targetNameSpaces) || isMultiNamespacedOperator(operatorNamespace, targetNameSpaces)", + "\t\t\t\tif isSingleOrMultiInstallation {", + "\t\t\t\t\tsingleOrMultiNamespaceOperators = append(singleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t} else {", + "\t\t\t\t\tnonSingleOrMultiNamespaceOperators = append(nonSingleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\toperatorsFoundButNotUnderTest = append(operatorsFoundButNotUnderTest, csv.Name)", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif !isCsvInNamespaceClusterWide(csv.Name, env.AllCsvs) { // check for non-cluster wide operators", + "\t\t\t\tcsvsTargetingNamespace = append(csvsTargetingNamespace, csv.Name)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// 2. non-operator pods check", + "\tpodsNotBelongingToOperators, err = findPodsNotBelongingToOperators(namespace)", + "\tif err != nil {", + "\t\treturn false, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, err", + "\t}", + "", + "\tvar isValid bool", + "\tif len(singleOrMultiNamespaceOperators) \u003e 0 {", + "\t\tisValid = len(nonSingleOrMultiNamespaceOperators) == 0 \u0026\u0026 len(csvsTargetingNamespace) == 0 \u0026\u0026 len(podsNotBelongingToOperators) == 0 \u0026\u0026 len(operatorsFoundButNotUnderTest) == 0", + "\t}", + "", + "\treturn isValid, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func findPodsNotBelongingToOperators(namespace string) (podsBelongingToNoOperators []string, err error) {", + "\tallPods := getAllPodsBy(namespace, env.AllPods)", + "\tfor index := range allPods {", + "\t\tpod := allPods[index]", + "\t\ttopOwners, err := podhelper.GetPodTopOwner(pod.Namespace, pod.OwnerReferences)", + "\t\tif err != nil {", + "\t\t\treturn podsBelongingToNoOperators, err", + "\t\t}", + "", + "\t\tvalidOwnerFound := false", + "\t\tfor _, owner := range topOwners {", + "\t\t\tif owner.Kind == v1alpha1.ClusterServiceVersionKind \u0026\u0026 owner.Namespace == namespace {", + "\t\t\t\tvalidOwnerFound = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !validOwnerFound {", + "\t\t\tpodsBelongingToNoOperators = append(podsBelongingToNoOperators, pod.Name)", + "\t\t}", + "\t}", + "", + "\treturn podsBelongingToNoOperators, nil", + "}" + ] + }, + { + "name": "getAllPodsBy", + "qualifiedName": "getAllPodsBy", + "exported": false, + "signature": "func(string, []*provider.Pod)([]*provider.Pod)", + "doc": "getAllPodsBy Filters pods by namespace\n\nThe function iterates over a slice of pod objects, selecting only those whose\nNamespace field matches the provided namespace string. Matching pods are\nappended to a new slice that is returned to the caller. This helper\nsimplifies gathering all pods within a specific namespace for further\nprocessing.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/helper.go:116", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "findPodsNotBelongingToOperators", + "kind": "function", + "source": [ + "func findPodsNotBelongingToOperators(namespace string) (podsBelongingToNoOperators []string, err error) {", + "\tallPods := getAllPodsBy(namespace, env.AllPods)", + "\tfor index := range allPods {", + "\t\tpod := allPods[index]", + "\t\ttopOwners, err := podhelper.GetPodTopOwner(pod.Namespace, pod.OwnerReferences)", + "\t\tif err != nil {", + "\t\t\treturn podsBelongingToNoOperators, err", + "\t\t}", + "", + "\t\tvalidOwnerFound := false", + "\t\tfor _, owner := range topOwners {", + "\t\t\tif owner.Kind == v1alpha1.ClusterServiceVersionKind \u0026\u0026 owner.Namespace == namespace {", + "\t\t\t\tvalidOwnerFound = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !validOwnerFound {", + "\t\t\tpodsBelongingToNoOperators = append(podsBelongingToNoOperators, pod.Name)", + "\t\t}", + "\t}", + "", + "\treturn podsBelongingToNoOperators, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getAllPodsBy(namespace string, allPods []*provider.Pod) (podsInNamespace []*provider.Pod) {", + "\tfor i := range allPods {", + "\t\tpod := allPods[i]", + "\t\tif pod.Namespace == namespace {", + "\t\t\tpodsInNamespace = append(podsInNamespace, pod)", + "\t\t}", + "\t}", + "\treturn podsInNamespace", + "}" + ] + }, + { + "name": "getCsvsBy", + "qualifiedName": "getCsvsBy", + "exported": false, + "signature": "func(string, []*v1alpha1.ClusterServiceVersion)([]*v1alpha1.ClusterServiceVersion)", + "doc": "getCsvsBy Filters CSVs to a specific namespace\n\nThis function iterates over all provided ClusterServiceVersion objects,\nselecting only those whose Namespace field matches the supplied string. The\nmatching CSVs are collected into a slice that is returned to the caller. If\nno CSVs match, an empty slice is returned.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/helper.go:132", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "checkValidOperatorInstallation", + "kind": "function", + "source": [ + "func checkValidOperatorInstallation(namespace string) (isDedicatedOperatorNamespace bool, singleOrMultiNamespaceOperators,", + "\tnonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators []string, err error) {", + "\t// 1. operator installation checks", + "\tcsvsInNamespace := getCsvsBy(namespace, env.AllCsvs)", + "", + "\tfor _, csv := range csvsInNamespace {", + "\t\toperatorNamespace := csv.Annotations[\"olm.operatorNamespace\"]", + "\t\ttargetNamespacesStr := csv.Annotations[\"olm.targetNamespaces\"]", + "", + "\t\tvar targetNameSpaces []string", + "\t\tif targetNamespacesStr != \"\" {", + "\t\t\ttargetNameSpaces = strings.Split(targetNamespacesStr, \",\")", + "\t\t}", + "", + "\t\tif namespace == operatorNamespace {", + "\t\t\tif checkIfCsvUnderTest(csv) {", + "\t\t\t\tisSingleOrMultiInstallation := isSingleNamespacedOperator(operatorNamespace, targetNameSpaces) || isMultiNamespacedOperator(operatorNamespace, targetNameSpaces)", + "\t\t\t\tif isSingleOrMultiInstallation {", + "\t\t\t\t\tsingleOrMultiNamespaceOperators = append(singleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t} else {", + "\t\t\t\t\tnonSingleOrMultiNamespaceOperators = append(nonSingleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\toperatorsFoundButNotUnderTest = append(operatorsFoundButNotUnderTest, csv.Name)", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif !isCsvInNamespaceClusterWide(csv.Name, env.AllCsvs) { // check for non-cluster wide operators", + "\t\t\t\tcsvsTargetingNamespace = append(csvsTargetingNamespace, csv.Name)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// 2. non-operator pods check", + "\tpodsNotBelongingToOperators, err = findPodsNotBelongingToOperators(namespace)", + "\tif err != nil {", + "\t\treturn false, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, err", + "\t}", + "", + "\tvar isValid bool", + "\tif len(singleOrMultiNamespaceOperators) \u003e 0 {", + "\t\tisValid = len(nonSingleOrMultiNamespaceOperators) == 0 \u0026\u0026 len(csvsTargetingNamespace) == 0 \u0026\u0026 len(podsNotBelongingToOperators) == 0 \u0026\u0026 len(operatorsFoundButNotUnderTest) == 0", + "\t}", + "", + "\treturn isValid, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getCsvsBy(namespace string, allCsvs []*v1alpha1.ClusterServiceVersion) (csvsInNamespace []*v1alpha1.ClusterServiceVersion) {", + "\tfor _, csv := range allCsvs {", + "\t\tif csv.Namespace == namespace {", + "\t\t\tcsvsInNamespace = append(csvsInNamespace, csv)", + "\t\t}", + "\t}", + "\treturn csvsInNamespace", + "}" + ] + }, + { + "name": "isCsvInNamespaceClusterWide", + "qualifiedName": "isCsvInNamespaceClusterWide", + "exported": false, + "signature": "func(string, []*v1alpha1.ClusterServiceVersion)(bool)", + "doc": "isCsvInNamespaceClusterWide determines if a CSV is cluster‑wide based on its annotations\n\nThe function scans all provided ClusterServiceVersions for the one matching\nthe given name. It checks whether that CSV has a nonempty\n\"olm.targetNamespaces\" annotation; if so, it marks the CSV as not\ncluster‑wide. The result is returned as a boolean indicating whether the\noperator applies across the entire cluster.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/helper.go:185", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "checkValidOperatorInstallation", + "kind": "function", + "source": [ + "func checkValidOperatorInstallation(namespace string) (isDedicatedOperatorNamespace bool, singleOrMultiNamespaceOperators,", + "\tnonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators []string, err error) {", + "\t// 1. operator installation checks", + "\tcsvsInNamespace := getCsvsBy(namespace, env.AllCsvs)", + "", + "\tfor _, csv := range csvsInNamespace {", + "\t\toperatorNamespace := csv.Annotations[\"olm.operatorNamespace\"]", + "\t\ttargetNamespacesStr := csv.Annotations[\"olm.targetNamespaces\"]", + "", + "\t\tvar targetNameSpaces []string", + "\t\tif targetNamespacesStr != \"\" {", + "\t\t\ttargetNameSpaces = strings.Split(targetNamespacesStr, \",\")", + "\t\t}", + "", + "\t\tif namespace == operatorNamespace {", + "\t\t\tif checkIfCsvUnderTest(csv) {", + "\t\t\t\tisSingleOrMultiInstallation := isSingleNamespacedOperator(operatorNamespace, targetNameSpaces) || isMultiNamespacedOperator(operatorNamespace, targetNameSpaces)", + "\t\t\t\tif isSingleOrMultiInstallation {", + "\t\t\t\t\tsingleOrMultiNamespaceOperators = append(singleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t} else {", + "\t\t\t\t\tnonSingleOrMultiNamespaceOperators = append(nonSingleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\toperatorsFoundButNotUnderTest = append(operatorsFoundButNotUnderTest, csv.Name)", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif !isCsvInNamespaceClusterWide(csv.Name, env.AllCsvs) { // check for non-cluster wide operators", + "\t\t\t\tcsvsTargetingNamespace = append(csvsTargetingNamespace, csv.Name)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// 2. non-operator pods check", + "\tpodsNotBelongingToOperators, err = findPodsNotBelongingToOperators(namespace)", + "\tif err != nil {", + "\t\treturn false, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, err", + "\t}", + "", + "\tvar isValid bool", + "\tif len(singleOrMultiNamespaceOperators) \u003e 0 {", + "\t\tisValid = len(nonSingleOrMultiNamespaceOperators) == 0 \u0026\u0026 len(csvsTargetingNamespace) == 0 \u0026\u0026 len(podsNotBelongingToOperators) == 0 \u0026\u0026 len(operatorsFoundButNotUnderTest) == 0", + "\t}", + "", + "\treturn isValid, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isCsvInNamespaceClusterWide(csvName string, allCsvs []*v1alpha1.ClusterServiceVersion) bool {", + "\tisClusterWide := true", + "\tfor _, eachCsv := range allCsvs {", + "\t\tif eachCsv.Name == csvName {", + "\t\t\ttargetNamespaces, exists := eachCsv.Annotations[\"olm.targetNamespaces\"]", + "\t\t\tif exists \u0026\u0026 targetNamespaces != \"\" {", + "\t\t\t\tisClusterWide = false", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn isClusterWide", + "}" + ] + }, + { + "name": "isMultiNamespacedOperator", + "qualifiedName": "isMultiNamespacedOperator", + "exported": false, + "signature": "func(string, []string)(bool)", + "doc": "isMultiNamespacedOperator determines if an operator targets multiple namespaces excluding its own\n\nThis function checks whether the list of target namespaces for an operator\ncontains more than one entry and that the operator’s own namespace is not\namong them. It returns true only when the operator is intended to operate\nacross several distinct namespaces, indicating a multi‑namespaced\ndeployment scenario.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/helper.go:158", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "checkValidOperatorInstallation", + "kind": "function", + "source": [ + "func checkValidOperatorInstallation(namespace string) (isDedicatedOperatorNamespace bool, singleOrMultiNamespaceOperators,", + "\tnonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators []string, err error) {", + "\t// 1. operator installation checks", + "\tcsvsInNamespace := getCsvsBy(namespace, env.AllCsvs)", + "", + "\tfor _, csv := range csvsInNamespace {", + "\t\toperatorNamespace := csv.Annotations[\"olm.operatorNamespace\"]", + "\t\ttargetNamespacesStr := csv.Annotations[\"olm.targetNamespaces\"]", + "", + "\t\tvar targetNameSpaces []string", + "\t\tif targetNamespacesStr != \"\" {", + "\t\t\ttargetNameSpaces = strings.Split(targetNamespacesStr, \",\")", + "\t\t}", + "", + "\t\tif namespace == operatorNamespace {", + "\t\t\tif checkIfCsvUnderTest(csv) {", + "\t\t\t\tisSingleOrMultiInstallation := isSingleNamespacedOperator(operatorNamespace, targetNameSpaces) || isMultiNamespacedOperator(operatorNamespace, targetNameSpaces)", + "\t\t\t\tif isSingleOrMultiInstallation {", + "\t\t\t\t\tsingleOrMultiNamespaceOperators = append(singleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t} else {", + "\t\t\t\t\tnonSingleOrMultiNamespaceOperators = append(nonSingleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\toperatorsFoundButNotUnderTest = append(operatorsFoundButNotUnderTest, csv.Name)", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif !isCsvInNamespaceClusterWide(csv.Name, env.AllCsvs) { // check for non-cluster wide operators", + "\t\t\t\tcsvsTargetingNamespace = append(csvsTargetingNamespace, csv.Name)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// 2. non-operator pods check", + "\tpodsNotBelongingToOperators, err = findPodsNotBelongingToOperators(namespace)", + "\tif err != nil {", + "\t\treturn false, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, err", + "\t}", + "", + "\tvar isValid bool", + "\tif len(singleOrMultiNamespaceOperators) \u003e 0 {", + "\t\tisValid = len(nonSingleOrMultiNamespaceOperators) == 0 \u0026\u0026 len(csvsTargetingNamespace) == 0 \u0026\u0026 len(podsNotBelongingToOperators) == 0 \u0026\u0026 len(operatorsFoundButNotUnderTest) == 0", + "\t}", + "", + "\treturn isValid, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isMultiNamespacedOperator(operatorNamespace string, targetNamespaces []string) bool {", + "\treturn len(targetNamespaces) \u003e 1 \u0026\u0026 !stringhelper.StringInSlice(targetNamespaces, operatorNamespace, false)", + "}" + ] + }, + { + "name": "isSingleNamespacedOperator", + "qualifiedName": "isSingleNamespacedOperator", + "exported": false, + "signature": "func(string, []string)(bool)", + "doc": "isSingleNamespacedOperator Determines if an operator is single‑namespace scoped but targets a different namespace\n\nThe function checks that the targetNamespaces slice contains exactly one\nentry and that this entry differs from the operatorNamespace. If both\nconditions hold, it returns true indicating the operator runs in its own\nnamespace yet serves another namespace; otherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/helper.go:147", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "checkValidOperatorInstallation", + "kind": "function", + "source": [ + "func checkValidOperatorInstallation(namespace string) (isDedicatedOperatorNamespace bool, singleOrMultiNamespaceOperators,", + "\tnonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators []string, err error) {", + "\t// 1. operator installation checks", + "\tcsvsInNamespace := getCsvsBy(namespace, env.AllCsvs)", + "", + "\tfor _, csv := range csvsInNamespace {", + "\t\toperatorNamespace := csv.Annotations[\"olm.operatorNamespace\"]", + "\t\ttargetNamespacesStr := csv.Annotations[\"olm.targetNamespaces\"]", + "", + "\t\tvar targetNameSpaces []string", + "\t\tif targetNamespacesStr != \"\" {", + "\t\t\ttargetNameSpaces = strings.Split(targetNamespacesStr, \",\")", + "\t\t}", + "", + "\t\tif namespace == operatorNamespace {", + "\t\t\tif checkIfCsvUnderTest(csv) {", + "\t\t\t\tisSingleOrMultiInstallation := isSingleNamespacedOperator(operatorNamespace, targetNameSpaces) || isMultiNamespacedOperator(operatorNamespace, targetNameSpaces)", + "\t\t\t\tif isSingleOrMultiInstallation {", + "\t\t\t\t\tsingleOrMultiNamespaceOperators = append(singleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t} else {", + "\t\t\t\t\tnonSingleOrMultiNamespaceOperators = append(nonSingleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\toperatorsFoundButNotUnderTest = append(operatorsFoundButNotUnderTest, csv.Name)", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif !isCsvInNamespaceClusterWide(csv.Name, env.AllCsvs) { // check for non-cluster wide operators", + "\t\t\t\tcsvsTargetingNamespace = append(csvsTargetingNamespace, csv.Name)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// 2. non-operator pods check", + "\tpodsNotBelongingToOperators, err = findPodsNotBelongingToOperators(namespace)", + "\tif err != nil {", + "\t\treturn false, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, err", + "\t}", + "", + "\tvar isValid bool", + "\tif len(singleOrMultiNamespaceOperators) \u003e 0 {", + "\t\tisValid = len(nonSingleOrMultiNamespaceOperators) == 0 \u0026\u0026 len(csvsTargetingNamespace) == 0 \u0026\u0026 len(podsNotBelongingToOperators) == 0 \u0026\u0026 len(operatorsFoundButNotUnderTest) == 0", + "\t}", + "", + "\treturn isValid, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isSingleNamespacedOperator(operatorNamespace string, targetNamespaces []string) bool {", + "\treturn len(targetNamespaces) == 1 \u0026\u0026 operatorNamespace != targetNamespaces[0]", + "}" + ] + }, + { + "name": "testMultipleSameOperators", + "qualifiedName": "testMultipleSameOperators", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testMultipleSameOperators Verifies operators are not duplicated across installations\n\nThe function iterates over all installed operators in the test environment,\nchecking each pair for duplicate CSV names with different versions. It\nrecords non‑compliant operators when a duplicate is found and marks others\nas compliant. The results are then set on the provided check object.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:519", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogDebug", + "kind": "function" + }, + { + "name": "LogDebug", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "OperatorInstalledMoreThanOnce", + "kind": "function", + "source": [ + "func OperatorInstalledMoreThanOnce(operator1, operator2 *provider.Operator) bool {", + "\t// Safeguard against nil operators (should not happen)", + "\tif operator1 == nil || operator2 == nil {", + "\t\treturn false", + "\t}", + "", + "\tlog.Debug(\"Comparing operator %q with operator %q\", operator1.Name, operator2.Name)", + "", + "\t// Retrieve the version from each CSV", + "\tcsv1Version := operator1.Csv.Spec.Version.String()", + "\tcsv2Version := operator2.Csv.Spec.Version.String()", + "", + "\tlog.Debug(\"CSV1 Version: %s\", csv1Version)", + "\tlog.Debug(\"CSV2 Version: %s\", csv2Version)", + "", + "\t// Strip the version from the CSV name by removing the suffix (which should be the version)", + "\tcsv1Name := strings.TrimSuffix(operator1.Csv.Name, \".v\"+csv1Version)", + "\tcsv2Name := strings.TrimSuffix(operator2.Csv.Name, \".v\"+csv2Version)", + "", + "\tlog.Debug(\"Comparing CSV names %q and %q\", csv1Name, csv2Name)", + "", + "\t// The CSV name should be the same, but the version should be different", + "\t// if the operator is installed more than once.", + "\tif operator1.Csv != nil \u0026\u0026 operator2.Csv != nil \u0026\u0026", + "\t\tcsv1Name == csv2Name \u0026\u0026", + "\t\tcsv1Version != csv2Version {", + "\t\tlog.Error(\"Operator %q is installed more than once\", operator1.Name)", + "\t\treturn true", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testMultipleSameOperators(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Ensure the CSV name is unique and not installed more than once.", + "\t// CSV Names are unique and OLM installs them with name.version format.", + "\t// So, we can check if the CSV name is installed more than once.", + "", + "\tcheck.LogInfo(\"Checking if the operator is installed more than once\")", + "", + "\tfor _, op := range env.AllOperators {", + "\t\tcheck.LogDebug(\"Checking operator %q\", op.Name)", + "\t\tcheck.LogDebug(\"Number of operators to check %s against: %d\", op.Name, len(env.AllOperators))", + "\t\tfor _, op2 := range env.AllOperators {", + "\t\t\t// Check if the operator is installed more than once.", + "\t\t\tif OperatorInstalledMoreThanOnce(op, op2) {", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(", + "\t\t\t\t\top.Namespace, op.Name, \"Operator is installed more than once\", false))", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(", + "\t\t\top.Namespace, op.Name, \"Operator is installed only once\", true))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces", + "qualifiedName": "testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces Validates that only single or multi‑namespaced operators reside in tenant namespaces\n\nThe routine iterates over all operator namespaces found in the test\nenvironment, checks each for dedicated status, and gathers any operators or\npods that violate the single/multi‑namespace rule. It builds compliance\nreports per namespace, marking those that contain only valid operators as\ncompliant and reporting detailed reasons for non‑compliance otherwise. The\nresults are set on the provided check object.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:158", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "make", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "checkValidOperatorInstallation", + "kind": "function", + "source": [ + "func checkValidOperatorInstallation(namespace string) (isDedicatedOperatorNamespace bool, singleOrMultiNamespaceOperators,", + "\tnonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators []string, err error) {", + "\t// 1. operator installation checks", + "\tcsvsInNamespace := getCsvsBy(namespace, env.AllCsvs)", + "", + "\tfor _, csv := range csvsInNamespace {", + "\t\toperatorNamespace := csv.Annotations[\"olm.operatorNamespace\"]", + "\t\ttargetNamespacesStr := csv.Annotations[\"olm.targetNamespaces\"]", + "", + "\t\tvar targetNameSpaces []string", + "\t\tif targetNamespacesStr != \"\" {", + "\t\t\ttargetNameSpaces = strings.Split(targetNamespacesStr, \",\")", + "\t\t}", + "", + "\t\tif namespace == operatorNamespace {", + "\t\t\tif checkIfCsvUnderTest(csv) {", + "\t\t\t\tisSingleOrMultiInstallation := isSingleNamespacedOperator(operatorNamespace, targetNameSpaces) || isMultiNamespacedOperator(operatorNamespace, targetNameSpaces)", + "\t\t\t\tif isSingleOrMultiInstallation {", + "\t\t\t\t\tsingleOrMultiNamespaceOperators = append(singleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t} else {", + "\t\t\t\t\tnonSingleOrMultiNamespaceOperators = append(nonSingleOrMultiNamespaceOperators, csv.Name)", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\toperatorsFoundButNotUnderTest = append(operatorsFoundButNotUnderTest, csv.Name)", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif !isCsvInNamespaceClusterWide(csv.Name, env.AllCsvs) { // check for non-cluster wide operators", + "\t\t\t\tcsvsTargetingNamespace = append(csvsTargetingNamespace, csv.Name)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// 2. non-operator pods check", + "\tpodsNotBelongingToOperators, err = findPodsNotBelongingToOperators(namespace)", + "\tif err != nil {", + "\t\treturn false, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, err", + "\t}", + "", + "\tvar isValid bool", + "\tif len(singleOrMultiNamespaceOperators) \u003e 0 {", + "\t\tisValid = len(nonSingleOrMultiNamespaceOperators) == 0 \u0026\u0026 len(csvsTargetingNamespace) == 0 \u0026\u0026 len(podsNotBelongingToOperators) == 0 \u0026\u0026 len(operatorsFoundButNotUnderTest) == 0", + "\t}", + "", + "\treturn isValid, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, nil", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNamespacedReportObject", + "kind": "function", + "source": [ + "func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (out *ReportObject) {", + "\treturn NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOnlySingleNamespacedOperatorsAllowedInTenantNamespaces\")", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\toperatorNamespaces := make(map[string]bool)", + "\tfor _, operator := range env.Operators {", + "\t\toperatorNamespace := operator.Csv.Annotations[\"olm.operatorNamespace\"]", + "\t\tfor _, namespace := range env.Namespaces {", + "\t\t\tif namespace == operatorNamespace {", + "\t\t\t\toperatorNamespaces[operatorNamespace] = true", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tfor operatorNamespace := range operatorNamespaces { // operator installed namespace", + "\t\tcheck.LogInfo(\"Checking if namespace %s contains only valid single/ multi namespaced operators\", operatorNamespace)", + "", + "\t\tisDedicatedOperatorNamespace, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators,", + "\t\t\tcsvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, err := checkValidOperatorInstallation(operatorNamespace)", + "", + "\t\tcheck.LogInfo(\"isDedicatedOperatorNamespace=%t, singleOrMultiNamespaceOperators=%s, nonSingleOrMultiNamespaceOperators=%s, csvsTargetingNamespace=%s, operatorsFoundButNotUnderTest=%s, podsNotBelongingToOperators=%s\", isDedicatedOperatorNamespace, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators) //nolint:lll", + "", + "\t\tif err != nil {", + "\t\t\tmsg := fmt.Sprintf(\"Operator namespace %s check got error %v\", operatorNamespace, err)", + "\t\t\tcheck.LogError(\"%s\", msg)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNamespacedReportObject(msg, testhelper.Namespace, false, operatorNamespace))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif isDedicatedOperatorNamespace {", + "\t\t\tvar msg string", + "\t\t\tif len(singleOrMultiNamespaceOperators) == 0 {", + "\t\t\t\tmsg = \"Namespace contains no installed single/multi namespace operators\"", + "\t\t\t} else {", + "\t\t\t\tmsg = fmt.Sprintf(\"Namespace is dedicated to single/multi namespace operators (%s) \", strings.Join(singleOrMultiNamespaceOperators, \", \"))", + "\t\t\t}", + "", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNamespacedReportObject(msg, testhelper.Namespace, true, operatorNamespace))", + "\t\t} else {", + "\t\t\tmsg := \"Operator namespace is not dedicated to single/multi operators because \"", + "", + "\t\t\tif len(nonSingleOrMultiNamespaceOperators) != 0 {", + "\t\t\t\tmsg += \"- operators are installed with an install mode different from single/multi (\" + strings.Join(nonSingleOrMultiNamespaceOperators, \", \") + \")\\n\"", + "\t\t\t}", + "", + "\t\t\tif len(csvsTargetingNamespace) != 0 {", + "\t\t\t\tmsg += \"- this namespace is the target namespace of other operators (\" + strings.Join(csvsTargetingNamespace, \", \") + \")\\n\"", + "\t\t\t}", + "\t\t\tif len(operatorsFoundButNotUnderTest) != 0 {", + "\t\t\t\tmsg += \"- operators not under test found (\" + strings.Join(operatorsFoundButNotUnderTest, \", \") + \")\\n\"", + "\t\t\t}", + "\t\t\tif len(podsNotBelongingToOperators) != 0 {", + "\t\t\t\tmsg += \"- invalid non operator pods found (\" + strings.Join(podsNotBelongingToOperators, \", \") + \")\"", + "\t\t\t}", + "", + "\t\t\tnonCompliantNs := testhelper.NewNamespacedReportObject(msg, testhelper.Namespace, false, operatorNamespace)", + "", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, nonCompliantNs)", + "\t\t}", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "}" + ] + }, + { + "name": "testOperatorCatalogSourceBundleCount", + "qualifiedName": "testOperatorCatalogSourceBundleCount", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOperatorCatalogSourceBundleCount Verifies catalog sources contain fewer than a thousand bundle images\n\nThe function iterates over operators in the test environment, matching each\nto its catalog source via package manifests. It then counts referenced\nbundles using probe containers for older OpenShift versions or package\nmanifests otherwise. If a catalog source exceeds 1000 bundles it logs an\nerror and records non‑compliance; otherwise it records compliance.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:557", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "NewVersion", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "Major", + "kind": "function" + }, + { + "name": "Major", + "kind": "function" + }, + { + "name": "Minor", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/catalogsource", + "name": "SkipPMBasedOnChannel", + "kind": "function", + "source": [ + "func SkipPMBasedOnChannel(channels []olmpkgv1.PackageChannel, csvName string) bool {", + "\t// This logic is in place because it is possible for an operator to pull from a multiple package manifests.", + "\tskipPMBasedOnChannel := true", + "\tfor c := range channels {", + "\t\tlog.Debug(\"Comparing channel currentCSV %q with current CSV %q\", channels[c].CurrentCSV, csvName)", + "\t\tlog.Debug(\"Number of channel entries %d\", len(channels[c].Entries))", + "\t\tfor _, entry := range channels[c].Entries {", + "\t\t\tlog.Debug(\"Comparing entry name %q with current CSV %q\", entry.Name, csvName)", + "", + "\t\t\tif entry.Name == csvName {", + "\t\t\t\tlog.Debug(\"Skipping package manifest based on channel entry %q\", entry.Name)", + "\t\t\t\tskipPMBasedOnChannel = false", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !skipPMBasedOnChannel {", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\treturn skipPMBasedOnChannel", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func GetCatalogSourceBundleCount(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int {", + "\t// Now that we know the catalog source, we are going to count up all of the relatedImages", + "\t// that are associated with the catalog source. This will give us the number of bundles that", + "\t// are available in the catalog source.", + "", + "\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count", + "\tconst (", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn 0", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\treturn getCatalogSourceBundleCountFromProbeContainer(env, cs)", + "\t\t}", + "", + "\t\t// If we didn't find the bundle count via the probe container, we can attempt to use the package manifests", + "\t}", + "", + "\t// If we didn't find the bundle count via the probe container, we can use the package manifests", + "\t// to get the bundle count", + "\treturn getCatalogSourceBundleCountFromPackageManifests(env, cs)", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCatalogSourceReportObject", + "kind": "function", + "source": [ + "func NewCatalogSourceReportObject(aNamespace, aCatalogSourceName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CatalogSourceType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aCatalogSourceName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCatalogSourceReportObject", + "kind": "function", + "source": [ + "func NewCatalogSourceReportObject(aNamespace, aCatalogSourceName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CatalogSourceType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aCatalogSourceName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCatalogSourceReportObject", + "kind": "function", + "source": [ + "func NewCatalogSourceReportObject(aNamespace, aCatalogSourceName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CatalogSourceType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aCatalogSourceName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOperatorCatalogSourceBundleCount(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tconst (", + "\t\tbundleCountLimit = 1000", + "", + "\t\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count.", + "\t\t// This means we cannot use the package manifests to skip based on channel.", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\tocp412Skip := false", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\tcheck.LogError(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003c= 4.12.\")", + "\t\t\tocp412Skip = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003e 4.12.\")", + "\t\t}", + "\t}", + "", + "\tbundleCountLimitStr := strconv.Itoa(bundleCountLimit)", + "", + "\t// Loop through all labeled operators and check if they have more than 1000 referenced images.", + "\tvar catalogsAlreadyReported []string", + "\tfor _, op := range env.Operators {", + "\t\tcatalogSourceCheckComplete := false", + "\t\tcheck.LogInfo(\"Checking bundle count for operator %q\", op.Csv.Name)", + "", + "\t\t// Search through packagemanifests to match the name of the CSV.", + "\t\tfor _, pm := range env.AllPackageManifests {", + "\t\t\t// Skip package manifests based on channel entries.", + "\t\t\t// Note: This only works for OCP versions \u003e 4.12 due to channel entries existence.", + "\t\t\tif !ocp412Skip \u0026\u0026 catalogsource.SkipPMBasedOnChannel(pm.Status.Channels, op.Csv.Name) {", + "\t\t\t\tlog.Debug(\"Skipping package manifest %q based on channel\", pm.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Search through all catalog sources to match the name and namespace of the package manifest.", + "\t\t\tfor _, catalogSource := range env.AllCatalogSources {", + "\t\t\t\tif catalogSource.Name != pm.Status.CatalogSource || catalogSource.Namespace != pm.Status.CatalogSourceNamespace {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on name or namespace\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the catalog source has already been reported, skip it.", + "\t\t\t\tif stringhelper.StringInSlice(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace, false) {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on already reported\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Found matching catalog source %q for operator %q\", catalogSource.Name, op.Csv.Name)", + "", + "\t\t\t\t// The name and namespace match. Lookup the bundle count.", + "\t\t\t\tbundleCount := provider.GetCatalogSourceBundleCount(env, catalogSource)", + "", + "\t\t\t\tif bundleCount == -1 {", + "\t\t\t\t\tcheck.LogError(\"Failed to get bundle count for CatalogSource %q\", catalogSource.Name)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"Failed to get bundle count\", false))", + "\t\t\t\t} else {", + "\t\t\t\t\tif bundleCount \u003e bundleCountLimit {", + "\t\t\t\t\t\tcheck.LogError(\"CatalogSource %q has more than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has more than \"+bundleCountLimitStr+\" referenced images\", false))", + "\t\t\t\t\t} else {", + "\t\t\t\t\t\tcheck.LogInfo(\"CatalogSource %q has less than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has less than \"+bundleCountLimitStr+\" referenced images\", true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Adding catalog source %q to list of already reported\", catalogSource.Name)", + "\t\t\t\tcatalogsAlreadyReported = append(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace)", + "\t\t\t\t// Signal that the catalog source check is complete.", + "\t\t\t\tcatalogSourceCheckComplete = true", + "\t\t\t\tbreak", + "\t\t\t}", + "", + "\t\t\t// If the catalog source check is complete, break out of the loop.", + "\t\t\tif catalogSourceCheckComplete {", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testOperatorCrdOpenAPISpec", + "qualifiedName": "testOperatorCrdOpenAPISpec", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOperatorCrdOpenAPISpec Verifies that operator CRDs use OpenAPI v3 schemas\n\nThe function iterates over all CRDs in the test environment, checks whether\neach has an OpenAPI v3 schema defined, and records compliance status. It logs\nthe result for each CRD and collects compliant and non‑compliant objects\ninto separate slices. Finally it reports these results via the check’s\nSetResult method.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:271", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/openapi", + "name": "IsCRDDefinedWithOpenAPI3Schema", + "kind": "function", + "source": [ + "func IsCRDDefinedWithOpenAPI3Schema(crd *apiextv1.CustomResourceDefinition) bool {", + "\tfor _, version := range crd.Spec.Versions {", + "\t\tcrdSchema := version.Schema.String()", + "", + "\t\tcontainsOpenAPIV3SchemaSubstr := strings.Contains(strings.ToLower(crdSchema),", + "\t\t\tstrings.ToLower(testhelper.OpenAPIV3Schema))", + "", + "\t\tif containsOpenAPIV3SchemaSubstr {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOperatorCrdOpenAPISpec(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOperatorCrdOpenAPISpec\")", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, crd := range env.Crds {", + "\t\tif openapi.IsCRDDefinedWithOpenAPI3Schema(crd) {", + "\t\t\tcheck.LogInfo(\"Operator CRD %s is defined with OpenAPIV3 schema \", crd.Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD is defined with OpenAPIV3 schema \", true).AddField(testhelper.OpenAPIV3Schema, crd.Name))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Operator CRD %s is not defined with OpenAPIV3 schema \", crd.Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD is not defined with OpenAPIV3 schema \", false).AddField(testhelper.OpenAPIV3Schema, crd.Name))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testOperatorCrdVersioning", + "qualifiedName": "testOperatorCrdVersioning", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOperatorCrdVersioning Verifies that all operator CRDs use Kubernetes-compatible versioning\n\nThe routine iterates over each CRD in the test environment, checking every\ndeclared version against a Kubernetes semantic‑version pattern. If any\nnon‑conforming version is found, it logs an error and records the CRD as\nnon‑compliant; otherwise it logs success and marks it compliant. Finally,\nit reports the lists of compliant and non‑compliant objects for the test\nresult.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:231", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogDebug", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions", + "name": "IsValidK8sVersion", + "kind": "function", + "source": [ + "func IsValidK8sVersion(version string) bool {", + "\tr := regexp.MustCompile(`^(v)([1-9]\\d*)+((alpha|beta)([1-9]\\d*)+){0,2}$`)", + "\treturn r.MatchString(version)", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOperatorCrdVersioning(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOperatorCrdVersioning\")", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, crd := range env.Crds {", + "\t\tdoesUseK8sVersioning := true", + "\t\tnonCompliantVersion := \"\"", + "", + "\t\tfor _, crdVersion := range crd.Spec.Versions {", + "\t\t\tversionName := crdVersion.Name", + "\t\t\tcheck.LogDebug(\"Checking for Operator CRD %s with version %s\", crd.Name, versionName)", + "", + "\t\t\tif !versions.IsValidK8sVersion(versionName) {", + "\t\t\t\tdoesUseK8sVersioning = false", + "\t\t\t\tnonCompliantVersion = versionName", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif doesUseK8sVersioning {", + "\t\t\tcheck.LogInfo(\"Operator CRD %s has valid K8s versioning \", crd.Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD has valid K8s versioning \", true).AddField(testhelper.CrdVersion, crd.Name))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Operator CRD %s has invalid K8s versioning %s \", crd.Name, nonCompliantVersion)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD has invalid K8s versioning \", false).AddField(testhelper.CrdVersion, crd.Name))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testOperatorInstallationAccessToSCC", + "qualifiedName": "testOperatorInstallationAccessToSCC", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOperatorInstallationAccessToSCC Checks operators’ CSV cluster permissions for disallowed SCC access\n\nThe function iterates over all operators in the test environment, examining\neach operator’s ClusterServiceVersion for clusterPermissions. If no\npermissions are defined it records compliance; otherwise it calls a helper to\ndetect any rule granting access to securitycontextconstraints and logs\nnon‑compliance. Results are collected into report objects and set on the\ncheck.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:353", + "calls": [ + { + "name": "LogDebug", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/access", + "name": "PermissionsHaveBadRule", + "kind": "function", + "source": [ + "func PermissionsHaveBadRule(clusterPermissions []v1alpha1.StrategyDeploymentPermissions) bool {", + "\tbadRuleFound := false", + "\tfor permissionIndex := range clusterPermissions {", + "\t\tpermission := \u0026clusterPermissions[permissionIndex]", + "\t\tfor ruleIndex := range permission.Rules {", + "\t\t\trule := \u0026permission.Rules[ruleIndex]", + "", + "\t\t\t// Check whether the rule is for the security api group.", + "\t\t\tsecurityGroupFound := false", + "\t\t\tfor _, group := range rule.APIGroups {", + "\t\t\t\tif group == \"*\" || group == \"security.openshift.io\" {", + "\t\t\t\t\tsecurityGroupFound = true", + "\t\t\t\t\tbreak", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tif !securityGroupFound {", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Now check whether it grants some access to securitycontextconstraint resources.", + "\t\t\tfor _, resource := range rule.Resources {", + "\t\t\t\tif resource == \"*\" || resource == \"securitycontextconstraints\" {", + "\t\t\t\t\t// Keep reviewing other permissions' rules so we can log all the failing ones in the claim file.", + "\t\t\t\t\tbadRuleFound = true", + "\t\t\t\t\tbreak", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn badRuleFound", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOperatorInstallationAccessToSCC(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\tcsv := operator.Csv", + "\t\tcheck.LogDebug(\"Checking operator %s\", operator)", + "\t\tclusterPermissions := csv.Spec.InstallStrategy.StrategySpec.ClusterPermissions", + "\t\tif len(clusterPermissions) == 0 {", + "\t\t\tcheck.LogInfo(\"No clusterPermissions found in %s's CSV\", operator)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name,", + "\t\t\t\t\"No RBAC rules for Security Context Constraints found in CSV (no clusterPermissions found)\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Fails in case any cluster permission has a rule that refers to securitycontextconstraints.", + "\t\tif access.PermissionsHaveBadRule(clusterPermissions) {", + "\t\t\tcheck.LogInfo(\"Operator %s has a rule for a service account to access cluster SCCs\",", + "\t\t\t\toperator)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"One or more RBAC rules for Security Context Constraints found in CSV\", false))", + "\t\t} else {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"No RBAC rules for Security Context Constraints found in CSV\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testOperatorInstallationPhaseSucceeded", + "qualifiedName": "testOperatorInstallationPhaseSucceeded", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOperatorInstallationPhaseSucceeded Verifies that each operator reaches the Succeeded phase\n\nThe function iterates over all operators in the test environment, waiting for\neach ClusterServiceVersion to report a Succeeded status. It logs success or\nfailure, collects compliant and non‑compliant objects into report entries,\nand finally records the results on the check object.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:326", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/phasecheck", + "name": "WaitOperatorReady", + "kind": "function", + "source": [ + "func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool {", + "\toc := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tif isOperatorPhaseSucceeded(csv) {", + "\t\t\tlog.Debug(\"%s is ready\", provider.CsvToString(csv))", + "\t\t\treturn true", + "\t\t} else if isOperatorPhaseFailedOrUnknown(csv) {", + "\t\t\tlog.Debug(\"%s failed to be ready, status=%s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Operator is not ready, but we need to take into account that its pods", + "\t\t// could have been deleted by some of the lifecycle test cases, so they", + "\t\t// could be restarting. Let's give it some time before declaring it failed.", + "\t\tlog.Debug(\"Waiting for %s to be in Succeeded phase: %s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\ttime.Sleep(time.Second)", + "", + "\t\tfreshCsv, err := oc.OlmClient.OperatorsV1alpha1().ClusterServiceVersions(csv.Namespace).Get(context.TODO(), csv.Name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not get csv %s, err: %v\", provider.CsvToString(freshCsv), err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// update old csv and check status again", + "\t\t*csv = *freshCsv", + "\t}", + "\tif time.Since(start) \u003e timeout {", + "\t\tlog.Error(\"timeout waiting for csv %s to be ready\", provider.CsvToString(csv))", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "string", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "string", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOperatorInstallationPhaseSucceeded(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, op := range env.Operators {", + "\t\tcheck.LogInfo(\"Testing Operator %q\", op)", + "\t\tif phasecheck.WaitOperatorReady(op.Csv) {", + "\t\t\tcheck.LogInfo(\"Operator %q is in Succeeded phase\", op)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name,", + "\t\t\t\t\"Operator on Succeeded state \", true).AddField(testhelper.OperatorPhase, string(op.Csv.Status.Phase)))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Operator %q is not in Succeeded phase (phase=%q)\", op, op.Csv.Status.Phase)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name,", + "\t\t\t\t\"Operator not in Succeeded state \", false).AddField(testhelper.OperatorPhase, string(op.Csv.Status.Phase)))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testOperatorOlmSkipRange", + "qualifiedName": "testOperatorOlmSkipRange", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOperatorOlmSkipRange Verifies the presence of an OLM skipRange annotation on each operator\n\nThe function iterates over all operators in the test environment, checking\nwhether each has a non-empty \"olm.skipRange\" annotation. It logs information\nabout the check and records compliant or non-compliant operators accordingly.\nFinally, it sets the result of the check with lists of compliant and\nnon‑compliant report objects.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:493", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOperatorOlmSkipRange(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\tcheck.LogInfo(\"Testing Operator %q\", operator)", + "", + "\t\tif operator.Csv.Annotations[\"olm.skipRange\"] == \"\" {", + "\t\t\tcheck.LogError(\"OLM skipRange not found for Operator %q\", operator)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, \"OLM skipRange not found for operator\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"OLM skipRange %q found for Operator %q\", operator.Csv.Annotations[\"olm.skipRange\"], operator)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, \"OLM skipRange found for operator\", true).", + "\t\t\t\tAddField(\"olm.SkipRange\", operator.Csv.Annotations[\"olm.skipRange\"]))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testOperatorOlmSubscription", + "qualifiedName": "testOperatorOlmSubscription", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOperatorOlmSubscription Verifies that each operator has an OLM subscription\n\nThe function iterates over all operators in the test environment, logging\nstatus for each one. It checks whether a SubscriptionName exists; if missing,\nit records a non‑compliant report object and logs an error. If present, it\ncreates a compliant report object noting the subscription was found. Finally,\nit sets the check result with the collected objects.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:388", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOperatorOlmSubscription(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\tcheck.LogInfo(\"Testing Operator %q\", operator)", + "\t\tif operator.SubscriptionName == \"\" {", + "\t\t\tcheck.LogError(\"OLM subscription not found for Operator %q\", operator)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, \"OLM subscription not found for operator, so it is not installed via OLM\", false).", + "\t\t\t\tAddField(testhelper.SubscriptionName, operator.SubscriptionName))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"OLM subscription %q found for Operator %q\", operator.SubscriptionName, operator)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(env.Operators[i].Namespace, env.Operators[i].Name, \"install-status-no-privilege (subscription found)\", true).", + "\t\t\t\tAddField(testhelper.SubscriptionName, operator.SubscriptionName))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testOperatorPodsNoHugepages", + "qualifiedName": "testOperatorPodsNoHugepages", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOperatorPodsNoHugepages Checks that operator pods do not use hugepages\n\nThe function iterates over all CSV-to-pod mappings in the test environment,\nexamining each pod to determine whether it requests hugepage memory. Pods\nrequesting hugepages are marked non‑compliant and logged as errors;\notherwise they are considered compliant and logged positively. After\nprocessing all pods, the results are set on the check object for reporting.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:465", + "calls": [ + { + "name": "SplitCsv", + "kind": "function", + "source": [ + "func SplitCsv(csv string) CsvResult {", + "\t// Split by comma to separate components", + "\tparts := strings.Split(csv, \",\")", + "\tvar result CsvResult", + "", + "\tfor _, part := range parts {", + "\t\tpart = strings.TrimSpace(part)", + "", + "\t\tif strings.HasPrefix(part, \"ns=\") {", + "\t\t\tresult.Namespace = strings.TrimPrefix(part, \"ns=\")", + "\t\t} else {", + "\t\t\tresult.NameCsv = part", + "\t\t}", + "\t}", + "\treturn result", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "HasHugepages", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOperatorPodsNoHugepages(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor csv, pods := range env.CSVToPodListMap {", + "\t\tCsvResult := SplitCsv(csv)", + "\t\tcheck.LogInfo(\"Name of csv: %q in namespaces: %q\", CsvResult.NameCsv, CsvResult.Namespace)", + "\t\tfor _, pod := range pods {", + "\t\t\tcheck.LogInfo(\"Testing Pod %q in namespace %q\", pod.Name, pod.Namespace)", + "\t\t\tif pod.HasHugepages() {", + "\t\t\t\tcheck.LogError(\"Pod %q in namespace %q has hugepages\", pod.Name, pod.Namespace)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has hugepages\", false))", + "\t\t\t} else {", + "\t\t\t\tcheck.LogInfo(\"Pod %q in namespace %q has no hugepages\", pod.Name, pod.Namespace)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(pod.Namespace, pod.Name, \"Pod has no hugepages\", true))", + "\t\t\t}", + "\t\t}", + "\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t}", + "}" + ] + }, + { + "name": "testOperatorSemanticVersioning", + "qualifiedName": "testOperatorSemanticVersioning", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOperatorSemanticVersioning Verifies operators use semantic versioning\n\nThe function iterates through each operator in the test environment, checks\nif its version string conforms to semantic version rules, and logs the\noutcome. It collects compliant and non‑compliant operators into separate\nlists of report objects, then sets these as the result for the check.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:297", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions", + "name": "IsValidSemanticVersion", + "kind": "function", + "source": [ + "func IsValidSemanticVersion(version string) bool {", + "\t_, err := semver.NewVersion(version)", + "\treturn err == nil", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOperatorSemanticVersioning(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOperatorSemanticVersioning\")", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, operator := range env.Operators {", + "\t\toperatorVersion := operator.Version", + "\t\tcheck.LogInfo(\"Testing Operator %q for version %s\", operator, operatorVersion)", + "", + "\t\tif versions.IsValidSemanticVersion(operatorVersion) {", + "\t\t\tcheck.LogInfo(\"Operator %q has a valid semantic version %s\", operator, operatorVersion)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name,", + "\t\t\t\t\"Operator has a valid semantic version \", true).AddField(testhelper.Version, operatorVersion))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Operator %q has an invalid semantic version %s\", operator, operatorVersion)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name,", + "\t\t\t\t\"Operator has an invalid semantic version \", false).AddField(testhelper.Version, operatorVersion))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testOperatorSingleCrdOwner", + "qualifiedName": "testOperatorSingleCrdOwner", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOperatorSingleCrdOwner Verifies that each CustomResourceDefinition is owned by only one operator\n\nThe function builds a mapping of CRD names to the operators that declare\nownership in their CSVs, filtering duplicate versions per operator. It then\niterates through this map, marking any CRD with multiple owners as\nnon‑compliant and generating report objects accordingly. Finally, it\nrecords compliant and non‑compliant results on the check instance.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:416", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCrdReportObject", + "kind": "function", + "source": [ + "func NewCrdReportObject(aName, aVersion, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CustomResourceDefinitionType, isCompliant)", + "\tout.AddField(CustomResourceDefinitionName, aName)", + "\tout.AddField(CustomResourceDefinitionVersion, aVersion)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "name": "LogDebug", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewCrdReportObject", + "kind": "function", + "source": [ + "func NewCrdReportObject(aName, aVersion, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, CustomResourceDefinitionType, isCompliant)", + "\tout.AddField(CustomResourceDefinitionName, aName)", + "\tout.AddField(CustomResourceDefinitionVersion, aVersion)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.OperatorTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.OperatorTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorInstallStatusSucceededIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationPhaseSucceeded(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorNoSCCAccess)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorInstallationAccessToSCC(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorIsInstalledViaOLMIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSubscription(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorHasSemanticVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSemanticVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdVersioningIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdVersioning(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCrdSchemaIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorCrdsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCrdOpenAPISpec(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorSingleCrdOwnerIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorSingleCrdOwner(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorPodsNoHugepages)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env), testhelper.GetNoOperatorPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorPodsNoHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorOlmSkipRange)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorOlmSkipRange(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestMultipleSameOperatorsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestMultipleSameOperators(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOperatorCatalogSourceBundleCountIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoCatalogSourcesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOperatorCatalogSourceBundleCount(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSingleOrMultiNamespacedOperatorInstallationInTenantNamespace)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOperatorSingleCrdOwner(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Map each CRD to a list of operators that own it", + "\tcrdOwners := map[string][]string{}", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\townedCrds := operator.Csv.Spec.CustomResourceDefinitions.Owned", + "", + "\t\t// Helper map to filter out different versions of the same CRD name.", + "\t\tuniqueOwnedCrds := map[string]struct{}{}", + "\t\tfor j := range ownedCrds {", + "\t\t\tuniqueOwnedCrds[ownedCrds[j].Name] = struct{}{}", + "\t\t}", + "", + "\t\t// Now we can append the operator as CRD owner", + "\t\tfor crdName := range uniqueOwnedCrds {", + "\t\t\tcrdOwners[crdName] = append(crdOwners[crdName], operator.Name)", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"CRDs owned by operator %s: %+v\", operator.Name, uniqueOwnedCrds)", + "\t}", + "", + "\t// Flag those that are owned by more than one operator", + "\tfor crd, opList := range crdOwners {", + "\t\tif len(opList) \u003e 1 {", + "\t\t\tcheck.LogError(\"CRD %q is owned by more than one operator (owners: %v)\", crd, opList)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\ttesthelper.NewCrdReportObject(crd, \"\", \"CRD is owned by more than one operator\", false).", + "\t\t\t\t\tAddField(testhelper.OperatorList, strings.Join(opList, \", \")))", + "\t\t} else {", + "\t\t\tcheck.LogDebug(\"CRD %q is owned by a single operator (%v)\", crd, opList[0])", + "\t\t\tcompliantObjects = append(compliantObjects,", + "\t\t\t\ttesthelper.NewCrdReportObject(crd, \"\", \"CRD is owned by a single operator\", true).", + "\t\t\t\t\tAddField(testhelper.OperatorName, opList[0]))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "globals": [ + { + "name": "beforeEachFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:43" + }, + { + "name": "env", + "exported": false, + "type": "provider.TestEnvironment", + "position": "/Users/deliedit/dev/certsuite/tests/operator/suite.go:41" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/access", + "name": "access", + "files": 1, + "imports": [ + "github.com/operator-framework/api/pkg/operators/v1alpha1" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "PermissionsHaveBadRule", + "qualifiedName": "PermissionsHaveBadRule", + "exported": true, + "signature": "func([]v1alpha1.StrategyDeploymentPermissions)(bool)", + "doc": "PermissionsHaveBadRule detects if any RBAC rule grants access to security context constraints\n\nThe function iterates over a slice of cluster permissions, examining each\nrule for the presence of the security API group or a wildcard. When such a\ngroup is found, it then checks whether the rule targets the\nsecuritycontextconstraints resource or all resources. If any matching rule\nexists, the function returns true to indicate a problematic configuration;\notherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/access/access.go:15", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorInstallationAccessToSCC", + "kind": "function", + "source": [ + "func testOperatorInstallationAccessToSCC(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.Operators {", + "\t\toperator := env.Operators[i]", + "\t\tcsv := operator.Csv", + "\t\tcheck.LogDebug(\"Checking operator %s\", operator)", + "\t\tclusterPermissions := csv.Spec.InstallStrategy.StrategySpec.ClusterPermissions", + "\t\tif len(clusterPermissions) == 0 {", + "\t\t\tcheck.LogInfo(\"No clusterPermissions found in %s's CSV\", operator)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name,", + "\t\t\t\t\"No RBAC rules for Security Context Constraints found in CSV (no clusterPermissions found)\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Fails in case any cluster permission has a rule that refers to securitycontextconstraints.", + "\t\tif access.PermissionsHaveBadRule(clusterPermissions) {", + "\t\t\tcheck.LogInfo(\"Operator %s has a rule for a service account to access cluster SCCs\",", + "\t\t\t\toperator)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"One or more RBAC rules for Security Context Constraints found in CSV\", false))", + "\t\t} else {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(operator.Namespace, operator.Name, \"No RBAC rules for Security Context Constraints found in CSV\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func PermissionsHaveBadRule(clusterPermissions []v1alpha1.StrategyDeploymentPermissions) bool {", + "\tbadRuleFound := false", + "\tfor permissionIndex := range clusterPermissions {", + "\t\tpermission := \u0026clusterPermissions[permissionIndex]", + "\t\tfor ruleIndex := range permission.Rules {", + "\t\t\trule := \u0026permission.Rules[ruleIndex]", + "", + "\t\t\t// Check whether the rule is for the security api group.", + "\t\t\tsecurityGroupFound := false", + "\t\t\tfor _, group := range rule.APIGroups {", + "\t\t\t\tif group == \"*\" || group == \"security.openshift.io\" {", + "\t\t\t\t\tsecurityGroupFound = true", + "\t\t\t\t\tbreak", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tif !securityGroupFound {", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Now check whether it grants some access to securitycontextconstraint resources.", + "\t\t\tfor _, resource := range rule.Resources {", + "\t\t\t\tif resource == \"*\" || resource == \"securitycontextconstraints\" {", + "\t\t\t\t\t// Keep reviewing other permissions' rules so we can log all the failing ones in the claim file.", + "\t\t\t\t\tbadRuleFound = true", + "\t\t\t\t\tbreak", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn badRuleFound", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/catalogsource", + "name": "catalogsource", + "files": 1, + "imports": [ + "github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/apis/operators/v1", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "SkipPMBasedOnChannel", + "qualifiedName": "SkipPMBasedOnChannel", + "exported": true, + "signature": "func([]olmpkgv1.PackageChannel, string)(bool)", + "doc": "SkipPMBasedOnChannel Decides whether a package manifest should be ignored based on channel entries\n\nThe function examines each channel in the provided list, checking if any\nentry name matches the given CSV name. If a match is found, it indicates that\nthe package manifest belongs to the same operator and should not be skipped.\nIt returns true when no matching entry exists, meaning the manifest can be\nignored; otherwise false.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/catalogsource/catalogsource.go:15", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorCatalogSourceBundleCount", + "kind": "function", + "source": [ + "func testOperatorCatalogSourceBundleCount(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tconst (", + "\t\tbundleCountLimit = 1000", + "", + "\t\t// If the OCP version is \u003c= 4.12, we need to use the probe container to get the bundle count.", + "\t\t// This means we cannot use the package manifests to skip based on channel.", + "\t\tocpMajorVersion = 4", + "\t\tocpMinorVersion = 12", + "\t)", + "", + "\tocp412Skip := false", + "\t// Check if the cluster is running an OCP version \u003c= 4.12", + "\tif env.OpenshiftVersion != \"\" {", + "\t\tlog.Info(\"Cluster is determined to be running Openshift version %q.\", env.OpenshiftVersion)", + "\t\tversion, err := semver.NewVersion(env.OpenshiftVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\tcheck.LogError(\"Failed to parse Openshift version %q.\", env.OpenshiftVersion)", + "\t\t\treturn", + "\t\t}", + "", + "\t\tif version.Major() \u003c ocpMajorVersion || (version.Major() == ocpMajorVersion \u0026\u0026 version.Minor() \u003c= ocpMinorVersion) {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003c= 4.12.\")", + "\t\t\tocp412Skip = true", + "\t\t} else {", + "\t\t\tlog.Info(\"Cluster is running an OCP version \u003e 4.12.\")", + "\t\t}", + "\t}", + "", + "\tbundleCountLimitStr := strconv.Itoa(bundleCountLimit)", + "", + "\t// Loop through all labeled operators and check if they have more than 1000 referenced images.", + "\tvar catalogsAlreadyReported []string", + "\tfor _, op := range env.Operators {", + "\t\tcatalogSourceCheckComplete := false", + "\t\tcheck.LogInfo(\"Checking bundle count for operator %q\", op.Csv.Name)", + "", + "\t\t// Search through packagemanifests to match the name of the CSV.", + "\t\tfor _, pm := range env.AllPackageManifests {", + "\t\t\t// Skip package manifests based on channel entries.", + "\t\t\t// Note: This only works for OCP versions \u003e 4.12 due to channel entries existence.", + "\t\t\tif !ocp412Skip \u0026\u0026 catalogsource.SkipPMBasedOnChannel(pm.Status.Channels, op.Csv.Name) {", + "\t\t\t\tlog.Debug(\"Skipping package manifest %q based on channel\", pm.Name)", + "\t\t\t\tcontinue", + "\t\t\t}", + "", + "\t\t\t// Search through all catalog sources to match the name and namespace of the package manifest.", + "\t\t\tfor _, catalogSource := range env.AllCatalogSources {", + "\t\t\t\tif catalogSource.Name != pm.Status.CatalogSource || catalogSource.Namespace != pm.Status.CatalogSourceNamespace {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on name or namespace\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the catalog source has already been reported, skip it.", + "\t\t\t\tif stringhelper.StringInSlice(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace, false) {", + "\t\t\t\t\tlog.Debug(\"Skipping catalog source %q based on already reported\", catalogSource.Name)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Found matching catalog source %q for operator %q\", catalogSource.Name, op.Csv.Name)", + "", + "\t\t\t\t// The name and namespace match. Lookup the bundle count.", + "\t\t\t\tbundleCount := provider.GetCatalogSourceBundleCount(env, catalogSource)", + "", + "\t\t\t\tif bundleCount == -1 {", + "\t\t\t\t\tcheck.LogError(\"Failed to get bundle count for CatalogSource %q\", catalogSource.Name)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"Failed to get bundle count\", false))", + "\t\t\t\t} else {", + "\t\t\t\t\tif bundleCount \u003e bundleCountLimit {", + "\t\t\t\t\t\tcheck.LogError(\"CatalogSource %q has more than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has more than \"+bundleCountLimitStr+\" referenced images\", false))", + "\t\t\t\t\t} else {", + "\t\t\t\t\t\tcheck.LogInfo(\"CatalogSource %q has less than \"+bundleCountLimitStr+\" (\"+strconv.Itoa(bundleCount)+\") referenced images\", catalogSource.Name)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewCatalogSourceReportObject(catalogSource.Namespace, catalogSource.Name, \"CatalogSource has less than \"+bundleCountLimitStr+\" referenced images\", true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "", + "\t\t\t\tlog.Debug(\"Adding catalog source %q to list of already reported\", catalogSource.Name)", + "\t\t\t\tcatalogsAlreadyReported = append(catalogsAlreadyReported, catalogSource.Name+\".\"+catalogSource.Namespace)", + "\t\t\t\t// Signal that the catalog source check is complete.", + "\t\t\t\tcatalogSourceCheckComplete = true", + "\t\t\t\tbreak", + "\t\t\t}", + "", + "\t\t\t// If the catalog source check is complete, break out of the loop.", + "\t\t\tif catalogSourceCheckComplete {", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func SkipPMBasedOnChannel(channels []olmpkgv1.PackageChannel, csvName string) bool {", + "\t// This logic is in place because it is possible for an operator to pull from a multiple package manifests.", + "\tskipPMBasedOnChannel := true", + "\tfor c := range channels {", + "\t\tlog.Debug(\"Comparing channel currentCSV %q with current CSV %q\", channels[c].CurrentCSV, csvName)", + "\t\tlog.Debug(\"Number of channel entries %d\", len(channels[c].Entries))", + "\t\tfor _, entry := range channels[c].Entries {", + "\t\t\tlog.Debug(\"Comparing entry name %q with current CSV %q\", entry.Name, csvName)", + "", + "\t\t\tif entry.Name == csvName {", + "\t\t\t\tlog.Debug(\"Skipping package manifest based on channel entry %q\", entry.Name)", + "\t\t\t\tskipPMBasedOnChannel = false", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\tif !skipPMBasedOnChannel {", + "\t\t\tbreak", + "\t\t}", + "\t}", + "", + "\treturn skipPMBasedOnChannel", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/openapi", + "name": "openapi", + "files": 1, + "imports": [ + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "IsCRDDefinedWithOpenAPI3Schema", + "qualifiedName": "IsCRDDefinedWithOpenAPI3Schema", + "exported": true, + "signature": "func(*apiextv1.CustomResourceDefinition)(bool)", + "doc": "IsCRDDefinedWithOpenAPI3Schema Checks if a CRD uses an OpenAPI v3 schema\n\nThe function inspects each version of the provided CustomResourceDefinition,\nconverting its schema definition to a string. It searches for the substring\nthat identifies an OpenAPI v3 schema, ignoring case. If any version contains\nthis substring, it returns true; otherwise, it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/openapi/openapi.go:16", + "calls": [ + { + "name": "String", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ToLower", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ToLower", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorCrdOpenAPISpec", + "kind": "function", + "source": [ + "func testOperatorCrdOpenAPISpec(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tcheck.LogInfo(\"Starting testOperatorCrdOpenAPISpec\")", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, crd := range env.Crds {", + "\t\tif openapi.IsCRDDefinedWithOpenAPI3Schema(crd) {", + "\t\t\tcheck.LogInfo(\"Operator CRD %s is defined with OpenAPIV3 schema \", crd.Name)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD is defined with OpenAPIV3 schema \", true).AddField(testhelper.OpenAPIV3Schema, crd.Name))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Operator CRD %s is not defined with OpenAPIV3 schema \", crd.Name)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(crd.Namespace, crd.Name,", + "\t\t\t\t\"Operator CRD is not defined with OpenAPIV3 schema \", false).AddField(testhelper.OpenAPIV3Schema, crd.Name))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsCRDDefinedWithOpenAPI3Schema(crd *apiextv1.CustomResourceDefinition) bool {", + "\tfor _, version := range crd.Spec.Versions {", + "\t\tcrdSchema := version.Schema.String()", + "", + "\t\tcontainsOpenAPIV3SchemaSubstr := strings.Contains(strings.ToLower(crdSchema),", + "\t\t\tstrings.ToLower(testhelper.OpenAPIV3Schema))", + "", + "\t\tif containsOpenAPIV3SchemaSubstr {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/phasecheck", + "name": "phasecheck", + "files": 1, + "imports": [ + "context", + "github.com/operator-framework/api/pkg/operators/v1alpha1", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "time" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "WaitOperatorReady", + "qualifiedName": "WaitOperatorReady", + "exported": true, + "signature": "func(*v1alpha1.ClusterServiceVersion)(bool)", + "doc": "WaitOperatorReady Waits until an operator reaches the Succeeded phase\n\nThe function repeatedly polls a ClusterServiceVersion object, returning true\nif it enters the Succeeded phase before a timeout or false if it fails or\ntimes out. It also handles transient pod restarts by refreshing the CSV on\neach iteration and logs debugging information throughout.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/phasecheck/phasecheck.go:40", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "time", + "name": "Now", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Since", + "kind": "function" + }, + { + "name": "isOperatorPhaseSucceeded", + "kind": "function", + "source": [ + "func isOperatorPhaseSucceeded(csv *v1alpha1.ClusterServiceVersion) bool {", + "\tlog.Debug(\"Checking succeeded status phase for csv %s (ns %s). Phase: %v\", csv.Name, csv.Namespace, csv.Status.Phase)", + "\treturn csv.Status.Phase == v1alpha1.CSVPhaseSucceeded", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "CsvToString", + "kind": "function", + "source": [ + "func CsvToString(csv *olmv1Alpha.ClusterServiceVersion) string {", + "\treturn fmt.Sprintf(\"operator csv: %s ns: %s\",", + "\t\tcsv.Name,", + "\t\tcsv.Namespace,", + "\t)", + "}" + ] + }, + { + "name": "isOperatorPhaseFailedOrUnknown", + "kind": "function", + "source": [ + "func isOperatorPhaseFailedOrUnknown(csv *v1alpha1.ClusterServiceVersion) bool {", + "\tlog.Debug(\"Checking failed status phase for csv %s (ns %s). Phase: %v\", csv.Name, csv.Namespace, csv.Status.Phase)", + "\treturn csv.Status.Phase == v1alpha1.CSVPhaseFailed ||", + "\t\tcsv.Status.Phase == v1alpha1.CSVPhaseUnknown", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "CsvToString", + "kind": "function", + "source": [ + "func CsvToString(csv *olmv1Alpha.ClusterServiceVersion) string {", + "\treturn fmt.Sprintf(\"operator csv: %s ns: %s\",", + "\t\tcsv.Name,", + "\t\tcsv.Namespace,", + "\t)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "CsvToString", + "kind": "function", + "source": [ + "func CsvToString(csv *olmv1Alpha.ClusterServiceVersion) string {", + "\treturn fmt.Sprintf(\"operator csv: %s ns: %s\",", + "\t\tcsv.Name,", + "\t\tcsv.Namespace,", + "\t)", + "}" + ] + }, + { + "pkgPath": "time", + "name": "Sleep", + "kind": "function" + }, + { + "name": "Get", + "kind": "function" + }, + { + "name": "ClusterServiceVersions", + "kind": "function" + }, + { + "name": "OperatorsV1alpha1", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "CsvToString", + "kind": "function", + "source": [ + "func CsvToString(csv *olmv1Alpha.ClusterServiceVersion) string {", + "\treturn fmt.Sprintf(\"operator csv: %s ns: %s\",", + "\t\tcsv.Name,", + "\t\tcsv.Namespace,", + "\t)", + "}" + ] + }, + { + "pkgPath": "time", + "name": "Since", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "CsvToString", + "kind": "function", + "source": [ + "func CsvToString(csv *olmv1Alpha.ClusterServiceVersion) string {", + "\treturn fmt.Sprintf(\"operator csv: %s ns: %s\",", + "\t\tcsv.Name,", + "\t\tcsv.Namespace,", + "\t)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator", + "name": "testOperatorInstallationPhaseSucceeded", + "kind": "function", + "source": [ + "func testOperatorInstallationPhaseSucceeded(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, op := range env.Operators {", + "\t\tcheck.LogInfo(\"Testing Operator %q\", op)", + "\t\tif phasecheck.WaitOperatorReady(op.Csv) {", + "\t\t\tcheck.LogInfo(\"Operator %q is in Succeeded phase\", op)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name,", + "\t\t\t\t\"Operator on Succeeded state \", true).AddField(testhelper.OperatorPhase, string(op.Csv.Status.Phase)))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Operator %q is not in Succeeded phase (phase=%q)\", op, op.Csv.Status.Phase)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name,", + "\t\t\t\t\"Operator not in Succeeded state \", false).AddField(testhelper.OperatorPhase, string(op.Csv.Status.Phase)))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool {", + "\toc := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tif isOperatorPhaseSucceeded(csv) {", + "\t\t\tlog.Debug(\"%s is ready\", provider.CsvToString(csv))", + "\t\t\treturn true", + "\t\t} else if isOperatorPhaseFailedOrUnknown(csv) {", + "\t\t\tlog.Debug(\"%s failed to be ready, status=%s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Operator is not ready, but we need to take into account that its pods", + "\t\t// could have been deleted by some of the lifecycle test cases, so they", + "\t\t// could be restarting. Let's give it some time before declaring it failed.", + "\t\tlog.Debug(\"Waiting for %s to be in Succeeded phase: %s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\ttime.Sleep(time.Second)", + "", + "\t\tfreshCsv, err := oc.OlmClient.OperatorsV1alpha1().ClusterServiceVersions(csv.Namespace).Get(context.TODO(), csv.Name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not get csv %s, err: %v\", provider.CsvToString(freshCsv), err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// update old csv and check status again", + "\t\t*csv = *freshCsv", + "\t}", + "\tif time.Since(start) \u003e timeout {", + "\t\tlog.Error(\"timeout waiting for csv %s to be ready\", provider.CsvToString(csv))", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "isOperatorPhaseFailedOrUnknown", + "qualifiedName": "isOperatorPhaseFailedOrUnknown", + "exported": false, + "signature": "func(*v1alpha1.ClusterServiceVersion)(bool)", + "doc": "isOperatorPhaseFailedOrUnknown determines if a CSV has failed or is unknown\n\nThe function examines the status phase of a ClusterServiceVersion object. It\nreturns true when the phase equals Failed or Unknown, indicating that the\noperator cannot reach a successful state. Otherwise it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/phasecheck/phasecheck.go:90", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/phasecheck", + "name": "WaitOperatorReady", + "kind": "function", + "source": [ + "func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool {", + "\toc := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tif isOperatorPhaseSucceeded(csv) {", + "\t\t\tlog.Debug(\"%s is ready\", provider.CsvToString(csv))", + "\t\t\treturn true", + "\t\t} else if isOperatorPhaseFailedOrUnknown(csv) {", + "\t\t\tlog.Debug(\"%s failed to be ready, status=%s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Operator is not ready, but we need to take into account that its pods", + "\t\t// could have been deleted by some of the lifecycle test cases, so they", + "\t\t// could be restarting. Let's give it some time before declaring it failed.", + "\t\tlog.Debug(\"Waiting for %s to be in Succeeded phase: %s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\ttime.Sleep(time.Second)", + "", + "\t\tfreshCsv, err := oc.OlmClient.OperatorsV1alpha1().ClusterServiceVersions(csv.Namespace).Get(context.TODO(), csv.Name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not get csv %s, err: %v\", provider.CsvToString(freshCsv), err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// update old csv and check status again", + "\t\t*csv = *freshCsv", + "\t}", + "\tif time.Since(start) \u003e timeout {", + "\t\tlog.Error(\"timeout waiting for csv %s to be ready\", provider.CsvToString(csv))", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isOperatorPhaseFailedOrUnknown(csv *v1alpha1.ClusterServiceVersion) bool {", + "\tlog.Debug(\"Checking failed status phase for csv %s (ns %s). Phase: %v\", csv.Name, csv.Namespace, csv.Status.Phase)", + "\treturn csv.Status.Phase == v1alpha1.CSVPhaseFailed ||", + "\t\tcsv.Status.Phase == v1alpha1.CSVPhaseUnknown", + "}" + ] + }, + { + "name": "isOperatorPhaseSucceeded", + "qualifiedName": "isOperatorPhaseSucceeded", + "exported": false, + "signature": "func(*v1alpha1.ClusterServiceVersion)(bool)", + "doc": "isOperatorPhaseSucceeded Determines if an operator is in the succeeded phase\n\nThe function inspects the status of a ClusterServiceVersion object and\nreturns true when its phase equals the succeeded constant. It logs the\ncurrent phase for debugging purposes before performing the comparison. The\nreturn value indicates whether the operator has completed successfully.", + "position": "/Users/deliedit/dev/certsuite/tests/operator/phasecheck/phasecheck.go:80", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/phasecheck", + "name": "WaitOperatorReady", + "kind": "function", + "source": [ + "func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool {", + "\toc := clientsholder.GetClientsHolder()", + "\tstart := time.Now()", + "\tfor time.Since(start) \u003c timeout {", + "\t\tif isOperatorPhaseSucceeded(csv) {", + "\t\t\tlog.Debug(\"%s is ready\", provider.CsvToString(csv))", + "\t\t\treturn true", + "\t\t} else if isOperatorPhaseFailedOrUnknown(csv) {", + "\t\t\tlog.Debug(\"%s failed to be ready, status=%s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// Operator is not ready, but we need to take into account that its pods", + "\t\t// could have been deleted by some of the lifecycle test cases, so they", + "\t\t// could be restarting. Let's give it some time before declaring it failed.", + "\t\tlog.Debug(\"Waiting for %s to be in Succeeded phase: %s\", provider.CsvToString(csv), csv.Status.Phase)", + "\t\ttime.Sleep(time.Second)", + "", + "\t\tfreshCsv, err := oc.OlmClient.OperatorsV1alpha1().ClusterServiceVersions(csv.Namespace).Get(context.TODO(), csv.Name, metav1.GetOptions{})", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not get csv %s, err: %v\", provider.CsvToString(freshCsv), err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// update old csv and check status again", + "\t\t*csv = *freshCsv", + "\t}", + "\tif time.Since(start) \u003e timeout {", + "\t\tlog.Error(\"timeout waiting for csv %s to be ready\", provider.CsvToString(csv))", + "\t}", + "", + "\treturn false", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func isOperatorPhaseSucceeded(csv *v1alpha1.ClusterServiceVersion) bool {", + "\tlog.Debug(\"Checking succeeded status phase for csv %s (ns %s). Phase: %v\", csv.Name, csv.Namespace, csv.Status.Phase)", + "\treturn csv.Status.Phase == v1alpha1.CSVPhaseSucceeded", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "timeout", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/operator/phasecheck/phasecheck.go:31" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "performance", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/resources", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "slices", + "strconv", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "LoadChecks", + "qualifiedName": "LoadChecks", + "exported": true, + "signature": "func()()", + "doc": "LoadChecks Loads the performance test suite checks into the registry\n\nThe function logs that it is loading checks for the performance suite,\ncreates or retrieves a checks group identified by the performance key, and\nthen registers several specific checks. Each check is configured with\noptional skip conditions and a callback that runs the actual test logic. The\nsetup prepares the tests to be executed later as part of the overall testing\nframework.", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:90", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "WithBeforeEachFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewChecksGroup", + "kind": "function", + "source": [ + "func NewChecksGroup(groupName string) *ChecksGroup {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\tif dbByGroup == nil {", + "\t\tdbByGroup = map[string]*ChecksGroup{}", + "\t}", + "", + "\tgroup, exists := dbByGroup[groupName]", + "\tif exists {", + "\t\treturn group", + "\t}", + "", + "\tgroup = \u0026ChecksGroup{", + "\t\tname: groupName,", + "\t\tchecks: []*Check{},", + "\t\tcurrentRunningCheckIdx: checkIdxNone,", + "\t}", + "\tdbByGroup[groupName] = group", + "", + "\treturn group", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testExclusiveCPUPool", + "kind": "function", + "source": [ + "func testExclusiveCPUPool(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tnBExclusiveCPUPoolContainers := 0", + "\t\tnBSharedCPUPoolContainers := 0", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tif resources.HasExclusiveCPUsAssigned(cut, check.GetLogger()) {", + "\t\t\t\tnBExclusiveCPUPoolContainers++", + "\t\t\t} else {", + "\t\t\t\tnBSharedCPUPoolContainers++", + "\t\t\t}", + "\t\t}", + "", + "\t\tif nBExclusiveCPUPoolContainers \u003e 0 \u0026\u0026 nBSharedCPUPoolContainers \u003e 0 {", + "\t\t\texclusiveStr := strconv.Itoa(nBExclusiveCPUPoolContainers)", + "\t\t\tsharedStr := strconv.Itoa(nBSharedCPUPoolContainers)", + "", + "\t\t\tcheck.LogError(\"Pod %q has containers whose CPUs belong to different pools. Containers in the shared cpu pool: %d \"+", + "\t\t\t\t\"Containers in the exclusive cpu pool: %d\", put, nBSharedCPUPoolContainers, nBExclusiveCPUPoolContainers)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has containers whose CPUs belong to different pools\", false).", + "\t\t\t\tAddField(\"SharedCPUPoolContainers\", sharedStr).", + "\t\t\t\tAddField(\"ExclusiveCPUPoolContainers\", exclusiveStr))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has no containers whose CPUs belong to different pools\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has no containers whose CPUs belong to different pools\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "name": "testRtAppsNoExecProbes", + "kind": "function", + "source": [ + "func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcuts := env.GetNonGuaranteedPodContainersWithoutHostPID()", + "\tfor _, cut := range cuts {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif !cut.HasExecProbes() {", + "\t\t\tcheck.LogInfo(\"Container %q does not define exec probes\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not define exec probes\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tprocesses, err := crclient.GetContainerProcesses(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not determine the processes pids for container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the processes pids for container\", false))", + "\t\t\tbreak", + "\t\t}", + "", + "\t\tnotExecProbeProcesses, compliantObjectsProbes := filterProbeProcesses(processes, cut)", + "\t\tcompliantObjects = append(compliantObjects, compliantObjectsProbes...)", + "\t\tallProcessesCompliant := true", + "\t\tfor _, p := range notExecProbeProcesses {", + "\t\t\tcheck.LogInfo(\"Testing process %q\", p)", + "\t\t\tschedPolicy, _, err := scheduling.GetProcessCPUScheduling(p.Pid, cut)", + "\t\t\tif err != nil {", + "\t\t\t\t// If the process does not exist anymore it means that it has finished since the time the process list", + "\t\t\t\t// was retrieved. In this case, just ignore the error and continue processing the rest of the processes.", + "\t\t\t\tif strings.Contains(err.Error(), noProcessFoundErrMsg) {", + "\t\t\t\t\tcheck.LogWarn(\"Container process %q disappeared\", p)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container process disappeared\", true).", + "\t\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\tcheck.LogError(\"Could not determine the scheduling policy for container %q (pid=%d), err: %v\", cut, p.Pid, err)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the scheduling policy for container\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif scheduling.PolicyIsRT(schedPolicy) {", + "\t\t\t\tcheck.LogError(\"Container %q defines exec probes while having a RT scheduling policy for process %q\", cut, p)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes while having a RT scheduling policy\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t}", + "\t\t}", + "", + "\t\tif allProcessesCompliant {", + "\t\t\tcheck.LogInfo(\"Container %q defines exec probes but does not have a RT scheduling policy\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes but does not have a RT scheduling policy\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "name": "testSchedulingPolicyInCPUPool", + "kind": "function", + "source": [ + "func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvironment,", + "\tpodContainers []*provider.Container, schedulingType string) {", + "\tvar compliantContainersPids []*testhelper.ReportObject", + "\tvar nonCompliantContainersPids []*testhelper.ReportObject", + "\tfor _, cut := range podContainers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\t// Get the pid namespace", + "\t\tpidNamespace, err := crclient.GetContainerPidNamespace(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get pid namespace for Container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcheck.LogDebug(\"PID namespace for Container %q is %q\", cut, pidNamespace)", + "", + "\t\t// Get the list of process ids running in the pid namespace", + "\t\tprocesses, err := crclient.GetPidsFromPidNamespace(pidNamespace, cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get PIDs from PID namespace %q for Container %q, err: %v\", pidNamespace, cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t}", + "", + "\t\tcompliantPids, nonCompliantPids := scheduling.ProcessPidsCPUScheduling(processes, cut, schedulingType, check.GetLogger())", + "\t\t// Check for the specified priority for each processes running in that pid namespace", + "", + "\t\tcompliantContainersPids = append(compliantContainersPids, compliantPids...)", + "\t\tnonCompliantContainersPids = append(nonCompliantContainersPids, nonCompliantPids...)", + "\t}", + "", + "\tcheck.SetResult(compliantContainersPids, nonCompliantContainersPids)", + "}" + ] + }, + { + "name": "GetNonGuaranteedPodContainersWithoutHostPID", + "kind": "function" + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "name": "testSchedulingPolicyInCPUPool", + "kind": "function", + "source": [ + "func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvironment,", + "\tpodContainers []*provider.Container, schedulingType string) {", + "\tvar compliantContainersPids []*testhelper.ReportObject", + "\tvar nonCompliantContainersPids []*testhelper.ReportObject", + "\tfor _, cut := range podContainers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\t// Get the pid namespace", + "\t\tpidNamespace, err := crclient.GetContainerPidNamespace(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get pid namespace for Container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcheck.LogDebug(\"PID namespace for Container %q is %q\", cut, pidNamespace)", + "", + "\t\t// Get the list of process ids running in the pid namespace", + "\t\tprocesses, err := crclient.GetPidsFromPidNamespace(pidNamespace, cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get PIDs from PID namespace %q for Container %q, err: %v\", pidNamespace, cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t}", + "", + "\t\tcompliantPids, nonCompliantPids := scheduling.ProcessPidsCPUScheduling(processes, cut, schedulingType, check.GetLogger())", + "\t\t// Check for the specified priority for each processes running in that pid namespace", + "", + "\t\tcompliantContainersPids = append(compliantContainersPids, compliantPids...)", + "\t\tnonCompliantContainersPids = append(nonCompliantContainersPids, nonCompliantPids...)", + "\t}", + "", + "\tcheck.SetResult(compliantContainersPids, nonCompliantContainersPids)", + "}" + ] + }, + { + "name": "GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID", + "kind": "function" + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "name": "testSchedulingPolicyInCPUPool", + "kind": "function", + "source": [ + "func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvironment,", + "\tpodContainers []*provider.Container, schedulingType string) {", + "\tvar compliantContainersPids []*testhelper.ReportObject", + "\tvar nonCompliantContainersPids []*testhelper.ReportObject", + "\tfor _, cut := range podContainers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\t// Get the pid namespace", + "\t\tpidNamespace, err := crclient.GetContainerPidNamespace(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get pid namespace for Container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcheck.LogDebug(\"PID namespace for Container %q is %q\", cut, pidNamespace)", + "", + "\t\t// Get the list of process ids running in the pid namespace", + "\t\tprocesses, err := crclient.GetPidsFromPidNamespace(pidNamespace, cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get PIDs from PID namespace %q for Container %q, err: %v\", pidNamespace, cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t}", + "", + "\t\tcompliantPids, nonCompliantPids := scheduling.ProcessPidsCPUScheduling(processes, cut, schedulingType, check.GetLogger())", + "\t\t// Check for the specified priority for each processes running in that pid namespace", + "", + "\t\tcompliantContainersPids = append(compliantContainersPids, compliantPids...)", + "\t\tnonCompliantContainersPids = append(nonCompliantContainersPids, nonCompliantPids...)", + "\t}", + "", + "\tcheck.SetResult(compliantContainersPids, nonCompliantContainersPids)", + "}" + ] + }, + { + "name": "GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID", + "kind": "function" + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testLimitedUseOfExecProbes", + "kind": "function", + "source": [ + "func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcounter := 0", + "\tfor _, put := range env.Pods {", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t\tif cut.LivenessProbe != nil \u0026\u0026 cut.LivenessProbe.Exec != nil {", + "\t\t\t\tcounter++", + "\t\t\t\tif cut.LivenessProbe.PeriodSeconds \u003e= minExecProbePeriodSeconds {", + "\t\t\t\t\tcheck.LogInfo(\"Container %q has a LivenessProbe with PeriodSeconds greater than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"LivenessProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\tcut.LivenessProbe.PeriodSeconds), true))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogError(\"Container %q has a LivenessProbe with PeriodSeconds less than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\ttesthelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"LivenessProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\t\tcut.LivenessProbe.PeriodSeconds), false))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tif cut.StartupProbe != nil \u0026\u0026 cut.StartupProbe.Exec != nil {", + "\t\t\t\tcounter++", + "\t\t\t\tif cut.StartupProbe.PeriodSeconds \u003e= minExecProbePeriodSeconds {", + "\t\t\t\t\tcheck.LogInfo(\"Container %q has a StartupProbe with PeriodSeconds greater than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"StartupProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\tcut.StartupProbe.PeriodSeconds), true))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogError(\"Container %q has a StartupProbe with PeriodSeconds less than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\ttesthelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"StartupProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\t\tcut.StartupProbe.PeriodSeconds), false))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tif cut.ReadinessProbe != nil \u0026\u0026 cut.ReadinessProbe.Exec != nil {", + "\t\t\t\tcounter++", + "\t\t\t\tif cut.ReadinessProbe.PeriodSeconds \u003e= minExecProbePeriodSeconds {", + "\t\t\t\t\tcheck.LogInfo(\"Container %q has a ReadinessProbe with PeriodSeconds greater than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"ReadinessProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\tcut.ReadinessProbe.PeriodSeconds), true))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogError(\"Container %q has a ReadinessProbe with PeriodSeconds less than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\ttesthelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"ReadinessProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\t\tcut.ReadinessProbe.PeriodSeconds), false))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// If there \u003e=10 exec probes, mark the entire cluster as a failure", + "\tif counter \u003e= maxNumberOfExecProbes {", + "\t\tcheck.LogError(\"CNF has 10 or more exec probes (nb-exec-probes=%d)\", counter)", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"CNF has 10 or more exec probes (%d exec probes)\", counter), testhelper.CnfType, false))", + "\t} else {", + "\t\tcheck.LogInfo(\"CNF has less than 10 exec probes (nb-exec-probes=%d)\", counter)", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"CNF has less than 10 exec probes (%d exec probes)\", counter), testhelper.CnfType, true))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadInternalChecksDB", + "kind": "function", + "source": [ + "func LoadInternalChecksDB() {", + "\taccesscontrol.LoadChecks()", + "\tcertification.LoadChecks()", + "\tlifecycle.LoadChecks()", + "\tmanageability.LoadChecks()", + "\tnetworking.LoadChecks()", + "\tobservability.LoadChecks()", + "\tperformance.LoadChecks()", + "\tplatform.LoadChecks()", + "\toperator.LoadChecks()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PerformanceTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestExclusiveCPUPool(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRtAppNoExecProbes)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUs).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestRtAppsNoExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSharedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoNonGuaranteedPodContainersWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetNonGuaranteedPodContainersWithoutHostPID(), scheduling.SharedCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsolatedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLimitedUseOfExecProbesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestLimitedUseOfExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "name": "filterProbeProcesses", + "qualifiedName": "filterProbeProcesses", + "exported": false, + "signature": "func([]*crclient.Process, *provider.Container)([]*crclient.Process, []*testhelper.ReportObject)", + "doc": "filterProbeProcesses Separates exec probe processes from other container processes\n\nThe function receives a list of all running processes in a container and the\ncontainer definition. It identifies which processes belong to exec probes by\ncomparing command lines with those defined in liveness, readiness, and\nstartup probes. Processes that are part of an exec probe or their descendants\nare marked as compliant and excluded from further checks, while the remaining\nprocesses are returned for additional verification.", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:420", + "calls": [ + { + "name": "getExecProbesCmds", + "kind": "function", + "source": [ + "func getExecProbesCmds(c *provider.Container) map[string]bool {", + "\tcmds := map[string]bool{}", + "", + "\tif c.LivenessProbe != nil \u0026\u0026 c.LivenessProbe.Exec != nil {", + "\t\tcmd := strings.Join(c.LivenessProbe.Exec.Command, \"\")", + "\t\tcmd = strings.Join(strings.Fields(cmd), \"\")", + "\t\tcmds[cmd] = true", + "\t}", + "", + "\tif c.ReadinessProbe != nil \u0026\u0026 c.ReadinessProbe.Exec != nil {", + "\t\tcmd := strings.Join(c.ReadinessProbe.Exec.Command, \"\")", + "\t\tcmd = strings.Join(strings.Fields(cmd), \"\")", + "\t\tcmds[cmd] = true", + "\t}", + "", + "\tif c.StartupProbe != nil \u0026\u0026 c.StartupProbe.Exec != nil {", + "\t\tcmd := strings.Join(c.StartupProbe.Exec.Command, \"\")", + "\t\tcmd = strings.Join(strings.Fields(cmd), \"\")", + "\t\tcmds[cmd] = true", + "\t}", + "", + "\treturn cmds", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Fields", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "slices", + "name": "Contains", + "kind": "function" + }, + { + "pkgPath": "slices", + "name": "Contains", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "testRtAppsNoExecProbes", + "kind": "function", + "source": [ + "func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcuts := env.GetNonGuaranteedPodContainersWithoutHostPID()", + "\tfor _, cut := range cuts {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif !cut.HasExecProbes() {", + "\t\t\tcheck.LogInfo(\"Container %q does not define exec probes\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not define exec probes\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tprocesses, err := crclient.GetContainerProcesses(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not determine the processes pids for container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the processes pids for container\", false))", + "\t\t\tbreak", + "\t\t}", + "", + "\t\tnotExecProbeProcesses, compliantObjectsProbes := filterProbeProcesses(processes, cut)", + "\t\tcompliantObjects = append(compliantObjects, compliantObjectsProbes...)", + "\t\tallProcessesCompliant := true", + "\t\tfor _, p := range notExecProbeProcesses {", + "\t\t\tcheck.LogInfo(\"Testing process %q\", p)", + "\t\t\tschedPolicy, _, err := scheduling.GetProcessCPUScheduling(p.Pid, cut)", + "\t\t\tif err != nil {", + "\t\t\t\t// If the process does not exist anymore it means that it has finished since the time the process list", + "\t\t\t\t// was retrieved. In this case, just ignore the error and continue processing the rest of the processes.", + "\t\t\t\tif strings.Contains(err.Error(), noProcessFoundErrMsg) {", + "\t\t\t\t\tcheck.LogWarn(\"Container process %q disappeared\", p)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container process disappeared\", true).", + "\t\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\tcheck.LogError(\"Could not determine the scheduling policy for container %q (pid=%d), err: %v\", cut, p.Pid, err)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the scheduling policy for container\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif scheduling.PolicyIsRT(schedPolicy) {", + "\t\t\t\tcheck.LogError(\"Container %q defines exec probes while having a RT scheduling policy for process %q\", cut, p)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes while having a RT scheduling policy\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t}", + "\t\t}", + "", + "\t\tif allProcessesCompliant {", + "\t\t\tcheck.LogInfo(\"Container %q defines exec probes but does not have a RT scheduling policy\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes but does not have a RT scheduling policy\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func filterProbeProcesses(allProcesses []*crclient.Process, cut *provider.Container) (notExecProbeProcesses []*crclient.Process, compliantObjects []*testhelper.ReportObject) {", + "\texecProbeProcesses := []int{}", + "\texecProbesCmds := getExecProbesCmds(cut)", + "\t// find all exec probes by matching command line", + "\tfor _, p := range allProcesses {", + "\t\tif execProbesCmds[strings.Join(strings.Fields(p.Args), \"\")] {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container process belongs to an exec probe (skipping verification)\", true).", + "\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\texecProbeProcesses = append(execProbeProcesses, p.Pid)", + "\t\t}", + "\t}", + "\t// remove all exec probes and their children from the process list", + "\tfor _, p := range allProcesses {", + "\t\tif slices.Contains(execProbeProcesses, p.Pid) || slices.Contains(execProbeProcesses, p.PPid) {", + "\t\t\t// this process is part of an exec probe (child or parent), continue", + "\t\t\tcontinue", + "\t\t}", + "\t\tnotExecProbeProcesses = append(notExecProbeProcesses, p)", + "\t}", + "\treturn notExecProbeProcesses, compliantObjects", + "}" + ] + }, + { + "name": "getExecProbesCmds", + "qualifiedName": "getExecProbesCmds", + "exported": false, + "signature": "func(*provider.Container)(map[string]bool)", + "doc": "getExecProbesCmds Collects normalized exec probe command strings\n\nThe function examines a container's liveness, readiness, and startup probes\nfor an Exec configuration. For each present probe it joins the command array\ninto a single string, removes any extra whitespace, and stores that cleaned\ncommand as a key in a map with a true value. The resulting map is used to\nquickly determine whether a running process matches one of the probe\ncommands.", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:319", + "calls": [ + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Fields", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Fields", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Fields", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "filterProbeProcesses", + "kind": "function", + "source": [ + "func filterProbeProcesses(allProcesses []*crclient.Process, cut *provider.Container) (notExecProbeProcesses []*crclient.Process, compliantObjects []*testhelper.ReportObject) {", + "\texecProbeProcesses := []int{}", + "\texecProbesCmds := getExecProbesCmds(cut)", + "\t// find all exec probes by matching command line", + "\tfor _, p := range allProcesses {", + "\t\tif execProbesCmds[strings.Join(strings.Fields(p.Args), \"\")] {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container process belongs to an exec probe (skipping verification)\", true).", + "\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\texecProbeProcesses = append(execProbeProcesses, p.Pid)", + "\t\t}", + "\t}", + "\t// remove all exec probes and their children from the process list", + "\tfor _, p := range allProcesses {", + "\t\tif slices.Contains(execProbeProcesses, p.Pid) || slices.Contains(execProbeProcesses, p.PPid) {", + "\t\t\t// this process is part of an exec probe (child or parent), continue", + "\t\t\tcontinue", + "\t\t}", + "\t\tnotExecProbeProcesses = append(notExecProbeProcesses, p)", + "\t}", + "\treturn notExecProbeProcesses, compliantObjects", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getExecProbesCmds(c *provider.Container) map[string]bool {", + "\tcmds := map[string]bool{}", + "", + "\tif c.LivenessProbe != nil \u0026\u0026 c.LivenessProbe.Exec != nil {", + "\t\tcmd := strings.Join(c.LivenessProbe.Exec.Command, \"\")", + "\t\tcmd = strings.Join(strings.Fields(cmd), \"\")", + "\t\tcmds[cmd] = true", + "\t}", + "", + "\tif c.ReadinessProbe != nil \u0026\u0026 c.ReadinessProbe.Exec != nil {", + "\t\tcmd := strings.Join(c.ReadinessProbe.Exec.Command, \"\")", + "\t\tcmd = strings.Join(strings.Fields(cmd), \"\")", + "\t\tcmds[cmd] = true", + "\t}", + "", + "\tif c.StartupProbe != nil \u0026\u0026 c.StartupProbe.Exec != nil {", + "\t\tcmd := strings.Join(c.StartupProbe.Exec.Command, \"\")", + "\t\tcmd = strings.Join(strings.Fields(cmd), \"\")", + "\t\tcmds[cmd] = true", + "\t}", + "", + "\treturn cmds", + "}" + ] + }, + { + "name": "testExclusiveCPUPool", + "qualifiedName": "testExclusiveCPUPool", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testExclusiveCPUPool Verifies that all containers in a pod use the same CPU pool\n\nThe function iterates over every pod in the test environment, counting how\nmany of its containers are assigned to exclusive CPUs versus shared CPUs. If\nboth types appear within a single pod it logs an error and records the pod as\nnon‑compliant, including counts for each pool. Pods that contain only one\ntype of CPU assignment are marked compliant. Finally, the results are stored\nin the check object.", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:236", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/resources", + "name": "HasExclusiveCPUsAssigned", + "kind": "function", + "source": [ + "func HasExclusiveCPUsAssigned(cut *provider.Container, logger *log.Logger) bool {", + "\tcpuLimits := cut.Resources.Limits.Cpu()", + "\tmemLimits := cut.Resources.Limits.Memory()", + "", + "\t// if no cpu or memory limits are specified the container will run in the shared cpu pool", + "\tif cpuLimits.IsZero() || memLimits.IsZero() {", + "\t\tlogger.Debug(\"Container %q has been found missing cpu/memory resource limits\", cut)", + "\t\treturn false", + "\t}", + "", + "\t// if the cpu limits quantity is not an integer the container will run in the shared cpu pool", + "\tcpuLimitsVal, isInteger := cpuLimits.AsInt64()", + "\tif !isInteger {", + "\t\tlogger.Debug(\"Container %q cpu resource limit is not an integer\", cut)", + "\t\treturn false", + "\t}", + "", + "\t// if the cpu and memory limits and requests are equal to each other the container will run in the exclusive cpu pool", + "\tcpuRequestsVal, _ := cut.Resources.Requests.Cpu().AsInt64()", + "\tmemRequestsVal, _ := cut.Resources.Requests.Memory().AsInt64()", + "\tmemLimitsVal, _ := memLimits.AsInt64()", + "\tif cpuLimitsVal == cpuRequestsVal \u0026\u0026 memLimitsVal == memRequestsVal {", + "\t\treturn true", + "\t}", + "", + "\t// if the cpu limits and request are different, the container will run in the shared cpu pool", + "\tlogger.Debug(\"Container %q cpu/memory resources and limits are not equal to each other\", cut)", + "\treturn false", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PerformanceTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestExclusiveCPUPool(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRtAppNoExecProbes)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUs).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestRtAppsNoExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSharedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoNonGuaranteedPodContainersWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetNonGuaranteedPodContainersWithoutHostPID(), scheduling.SharedCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsolatedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLimitedUseOfExecProbesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestLimitedUseOfExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testExclusiveCPUPool(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tfor _, put := range env.Pods {", + "\t\tnBExclusiveCPUPoolContainers := 0", + "\t\tnBSharedCPUPoolContainers := 0", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tif resources.HasExclusiveCPUsAssigned(cut, check.GetLogger()) {", + "\t\t\t\tnBExclusiveCPUPoolContainers++", + "\t\t\t} else {", + "\t\t\t\tnBSharedCPUPoolContainers++", + "\t\t\t}", + "\t\t}", + "", + "\t\tif nBExclusiveCPUPoolContainers \u003e 0 \u0026\u0026 nBSharedCPUPoolContainers \u003e 0 {", + "\t\t\texclusiveStr := strconv.Itoa(nBExclusiveCPUPoolContainers)", + "\t\t\tsharedStr := strconv.Itoa(nBSharedCPUPoolContainers)", + "", + "\t\t\tcheck.LogError(\"Pod %q has containers whose CPUs belong to different pools. Containers in the shared cpu pool: %d \"+", + "\t\t\t\t\"Containers in the exclusive cpu pool: %d\", put, nBSharedCPUPoolContainers, nBExclusiveCPUPoolContainers)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has containers whose CPUs belong to different pools\", false).", + "\t\t\t\tAddField(\"SharedCPUPoolContainers\", sharedStr).", + "\t\t\t\tAddField(\"ExclusiveCPUPoolContainers\", exclusiveStr))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has no containers whose CPUs belong to different pools\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has no containers whose CPUs belong to different pools\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testLimitedUseOfExecProbes", + "qualifiedName": "testLimitedUseOfExecProbes", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testLimitedUseOfExecProbes Evaluates the use of exec probes across containers\n\nThe routine iterates through all pods and their containers, checking\nliveness, startup, and readiness probes that execute commands. It counts each\nexec probe and records compliance if the period exceeds a defined threshold;\notherwise it logs an error. If the total number of exec probes reaches ten or\nmore, the entire CNF is marked non‑compliant, and the result is set\naccordingly.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:149", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewReportObject", + "kind": "function", + "source": [ + "func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) {", + "\tout = \u0026ReportObject{}", + "\tout.ObjectType = aType", + "\tif isCompliant {", + "\t\tout.AddField(ReasonForCompliance, aReason)", + "\t} else {", + "\t\tout.AddField(ReasonForNonCompliance, aReason)", + "\t}", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PerformanceTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestExclusiveCPUPool(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRtAppNoExecProbes)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUs).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestRtAppsNoExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSharedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoNonGuaranteedPodContainersWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetNonGuaranteedPodContainersWithoutHostPID(), scheduling.SharedCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsolatedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLimitedUseOfExecProbesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestLimitedUseOfExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcounter := 0", + "\tfor _, put := range env.Pods {", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\t\tif cut.LivenessProbe != nil \u0026\u0026 cut.LivenessProbe.Exec != nil {", + "\t\t\t\tcounter++", + "\t\t\t\tif cut.LivenessProbe.PeriodSeconds \u003e= minExecProbePeriodSeconds {", + "\t\t\t\t\tcheck.LogInfo(\"Container %q has a LivenessProbe with PeriodSeconds greater than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"LivenessProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\tcut.LivenessProbe.PeriodSeconds), true))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogError(\"Container %q has a LivenessProbe with PeriodSeconds less than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\ttesthelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"LivenessProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\t\tcut.LivenessProbe.PeriodSeconds), false))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tif cut.StartupProbe != nil \u0026\u0026 cut.StartupProbe.Exec != nil {", + "\t\t\t\tcounter++", + "\t\t\t\tif cut.StartupProbe.PeriodSeconds \u003e= minExecProbePeriodSeconds {", + "\t\t\t\t\tcheck.LogInfo(\"Container %q has a StartupProbe with PeriodSeconds greater than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"StartupProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\tcut.StartupProbe.PeriodSeconds), true))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogError(\"Container %q has a StartupProbe with PeriodSeconds less than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\ttesthelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"StartupProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\t\tcut.StartupProbe.PeriodSeconds), false))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t\tif cut.ReadinessProbe != nil \u0026\u0026 cut.ReadinessProbe.Exec != nil {", + "\t\t\t\tcounter++", + "\t\t\t\tif cut.ReadinessProbe.PeriodSeconds \u003e= minExecProbePeriodSeconds {", + "\t\t\t\t\tcheck.LogInfo(\"Container %q has a ReadinessProbe with PeriodSeconds greater than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"ReadinessProbe exec probe has a PeriodSeconds greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\tcut.ReadinessProbe.PeriodSeconds), true))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogError(\"Container %q has a ReadinessProbe with PeriodSeconds less than %d (%d seconds)\",", + "\t\t\t\t\t\tcut, minExecProbePeriodSeconds, cut.LivenessProbe.PeriodSeconds)", + "", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects,", + "\t\t\t\t\t\ttesthelper.NewContainerReportObject(put.Namespace, put.Name,", + "\t\t\t\t\t\t\tcut.Name, fmt.Sprintf(\"ReadinessProbe exec probe has a PeriodSeconds that is not greater than 10 (%d seconds)\",", + "\t\t\t\t\t\t\t\tcut.ReadinessProbe.PeriodSeconds), false))", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// If there \u003e=10 exec probes, mark the entire cluster as a failure", + "\tif counter \u003e= maxNumberOfExecProbes {", + "\t\tcheck.LogError(\"CNF has 10 or more exec probes (nb-exec-probes=%d)\", counter)", + "\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"CNF has 10 or more exec probes (%d exec probes)\", counter), testhelper.CnfType, false))", + "\t} else {", + "\t\tcheck.LogInfo(\"CNF has less than 10 exec probes (nb-exec-probes=%d)\", counter)", + "\t\tcompliantObjects = append(compliantObjects, testhelper.NewReportObject(fmt.Sprintf(\"CNF has less than 10 exec probes (%d exec probes)\", counter), testhelper.CnfType, true))", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testRtAppsNoExecProbes", + "qualifiedName": "testRtAppsNoExecProbes", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testRtAppsNoExecProbes Verifies that non‑guaranteed containers without host PID do not use exec probes with real‑time scheduling\n\nThe routine iterates over all eligible containers, checking whether they\ndeclare exec probes. For those that do, it gathers running processes, filters\nout probe processes, and inspects the CPU scheduling policy of each remaining\nprocess. If any process runs under a real‑time policy, the container is\nmarked non‑compliant; otherwise it is compliant. Results are recorded as\nreport objects for later aggregation.", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:353", + "calls": [ + { + "name": "GetNonGuaranteedPodContainersWithoutHostPID", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "HasExecProbes", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetContainerProcesses", + "kind": "function", + "source": [ + "func GetContainerProcesses(container *provider.Container, env *provider.TestEnvironment) ([]*Process, error) {", + "\tpidNs, err := GetContainerPidNamespace(container, env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"could not get the containers' pid namespace, err: %v\", err)", + "\t}", + "", + "\treturn GetPidsFromPidNamespace(pidNs, container)", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "filterProbeProcesses", + "kind": "function", + "source": [ + "func filterProbeProcesses(allProcesses []*crclient.Process, cut *provider.Container) (notExecProbeProcesses []*crclient.Process, compliantObjects []*testhelper.ReportObject) {", + "\texecProbeProcesses := []int{}", + "\texecProbesCmds := getExecProbesCmds(cut)", + "\t// find all exec probes by matching command line", + "\tfor _, p := range allProcesses {", + "\t\tif execProbesCmds[strings.Join(strings.Fields(p.Args), \"\")] {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container process belongs to an exec probe (skipping verification)\", true).", + "\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\texecProbeProcesses = append(execProbeProcesses, p.Pid)", + "\t\t}", + "\t}", + "\t// remove all exec probes and their children from the process list", + "\tfor _, p := range allProcesses {", + "\t\tif slices.Contains(execProbeProcesses, p.Pid) || slices.Contains(execProbeProcesses, p.PPid) {", + "\t\t\t// this process is part of an exec probe (child or parent), continue", + "\t\t\tcontinue", + "\t\t}", + "\t\tnotExecProbeProcesses = append(notExecProbeProcesses, p)", + "\t}", + "\treturn notExecProbeProcesses, compliantObjects", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling", + "name": "GetProcessCPUScheduling", + "kind": "function", + "source": [ + "func GetProcessCPUScheduling(pid int, testContainer *provider.Container) (schedulePolicy string, schedulePriority int, err error) {", + "\tlog.Info(\"Checking the scheduling policy/priority in %v for pid=%d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"chrt -p %d\", pid)", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := crclient.GetNodeProbePodContext(testContainer.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn \"\", 0, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tch := clientsholder.GetClientsHolder()", + "", + "\tstdout, stderr, err := ch.ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"command %q failed to run in probe pod %s (node %s): %v (stderr: %v)\",", + "\t\t\tcommand, ctx.GetPodName(), testContainer.NodeName, err, stderr)", + "\t}", + "", + "\tschedulePolicy, schedulePriority, err = parseSchedulingPolicyAndPriority(stdout)", + "\tif err != nil {", + "\t\treturn schedulePolicy, InvalidPriority, fmt.Errorf(\"error getting the scheduling policy and priority for %v : %v\", testContainer, err)", + "\t}", + "\tlog.Info(\"pid %d in %v has the cpu scheduling policy %s, scheduling priority %d\", pid, testContainer, schedulePolicy, schedulePriority)", + "", + "\treturn schedulePolicy, schedulePriority, err", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "LogWarn", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling", + "name": "PolicyIsRT", + "kind": "function", + "source": [ + "func PolicyIsRT(schedPolicy string) bool {", + "\treturn schedPolicy == SchedulingFirstInFirstOut || schedPolicy == SchedulingRoundRobin", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PerformanceTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestExclusiveCPUPool(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRtAppNoExecProbes)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUs).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestRtAppsNoExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSharedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoNonGuaranteedPodContainersWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetNonGuaranteedPodContainersWithoutHostPID(), scheduling.SharedCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsolatedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLimitedUseOfExecProbesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestLimitedUseOfExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tcuts := env.GetNonGuaranteedPodContainersWithoutHostPID()", + "\tfor _, cut := range cuts {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif !cut.HasExecProbes() {", + "\t\t\tcheck.LogInfo(\"Container %q does not define exec probes\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container does not define exec probes\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tprocesses, err := crclient.GetContainerProcesses(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not determine the processes pids for container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the processes pids for container\", false))", + "\t\t\tbreak", + "\t\t}", + "", + "\t\tnotExecProbeProcesses, compliantObjectsProbes := filterProbeProcesses(processes, cut)", + "\t\tcompliantObjects = append(compliantObjects, compliantObjectsProbes...)", + "\t\tallProcessesCompliant := true", + "\t\tfor _, p := range notExecProbeProcesses {", + "\t\t\tcheck.LogInfo(\"Testing process %q\", p)", + "\t\t\tschedPolicy, _, err := scheduling.GetProcessCPUScheduling(p.Pid, cut)", + "\t\t\tif err != nil {", + "\t\t\t\t// If the process does not exist anymore it means that it has finished since the time the process list", + "\t\t\t\t// was retrieved. In this case, just ignore the error and continue processing the rest of the processes.", + "\t\t\t\tif strings.Contains(err.Error(), noProcessFoundErrMsg) {", + "\t\t\t\t\tcheck.LogWarn(\"Container process %q disappeared\", p)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container process disappeared\", true).", + "\t\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\tcheck.LogError(\"Could not determine the scheduling policy for container %q (pid=%d), err: %v\", cut, p.Pid, err)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Could not determine the scheduling policy for container\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)).", + "\t\t\t\t\tAddField(testhelper.ProcessCommandLine, p.Args))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t\tcontinue", + "\t\t\t}", + "\t\t\tif scheduling.PolicyIsRT(schedPolicy) {", + "\t\t\t\tcheck.LogError(\"Container %q defines exec probes while having a RT scheduling policy for process %q\", cut, p)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes while having a RT scheduling policy\", false).", + "\t\t\t\t\tAddField(testhelper.ProcessID, strconv.Itoa(p.Pid)))", + "\t\t\t\tallProcessesCompliant = false", + "\t\t\t}", + "\t\t}", + "", + "\t\tif allProcessesCompliant {", + "\t\t\tcheck.LogInfo(\"Container %q defines exec probes but does not have a RT scheduling policy\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container defines exec probes but does not have a RT scheduling policy\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testSchedulingPolicyInCPUPool", + "qualifiedName": "testSchedulingPolicyInCPUPool", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment, []*provider.Container, string)()", + "doc": "testSchedulingPolicyInCPUPool Evaluates CPU scheduling compliance for container processes\n\nThe function iterates over a set of containers, retrieves each container's\nPID namespace, then lists all process IDs within that namespace. For every\nprocess, it checks whether the CPU scheduling policy and priority meet the\nspecified . Containers are reported as compliant or non‑compliant based on\nthe outcomes of these checks.", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:276", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetContainerPidNamespace", + "kind": "function", + "source": [ + "func GetContainerPidNamespace(testContainer *provider.Container, env *provider.TestEnvironment) (string, error) {", + "\t// Get the container pid", + "\tocpContext, err := GetNodeProbePodContext(testContainer.NodeName, env)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", testContainer, err)", + "\t}", + "", + "\tpid, err := GetPidFromContainer(testContainer, ocpContext)", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"unable to get container process id due to: %v\", err)", + "\t}", + "\tlog.Debug(\"Obtained process id for %s is %d\", testContainer, pid)", + "", + "\tcommand := fmt.Sprintf(\"lsns -p %d -t pid -n\", pid)", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ocpContext, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn \"\", fmt.Errorf(\"unable to run nsenter due to : %v\", err)", + "\t}", + "", + "\treturn strings.Fields(stdout)[0], nil", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogDebug", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient", + "name": "GetPidsFromPidNamespace", + "kind": "function", + "source": [ + "func GetPidsFromPidNamespace(pidNamespace string, container *provider.Container) (p []*Process, err error) {", + "\tconst command = \"trap \\\"\\\" SIGURG ; ps -e -o pidns,pid,ppid,args\"", + "\tenv := provider.GetTestEnvironment()", + "\tctx, err := GetNodeProbePodContext(container.NodeName, \u0026env)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get probe pod's context for container %s: %v\", container, err)", + "\t}", + "", + "\tstdout, stderr, err := clientsholder.GetClientsHolder().ExecCommandContainer(ctx, command)", + "\tif err != nil || stderr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"command %q failed to run in probe pod=%s (node=%s): %v\", command, ctx.GetPodName(), container.NodeName, err)", + "\t}", + "", + "\tre := regexp.MustCompile(PsRegex)", + "\tmatches := re.FindAllStringSubmatch(stdout, -1)", + "\t// If we do not find a successful log, we fail", + "\tfor _, v := range matches {", + "\t\t// Matching only the right PidNs", + "\t\tif pidNamespace != v[1] {", + "\t\t\tcontinue", + "\t\t}", + "\t\taPidNs, err := strconv.Atoi(v[1])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[1], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPid, err := strconv.Atoi(v[2])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[2], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\taPPid, err := strconv.Atoi(v[3])", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"could not convert string %s to integer, err=%s\", v[3], err)", + "\t\t\tcontinue", + "\t\t}", + "\t\tp = append(p, \u0026Process{PidNs: aPidNs, Pid: aPid, Args: v[4], PPid: aPPid})", + "\t}", + "\treturn p, nil", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling", + "name": "ProcessPidsCPUScheduling", + "kind": "function", + "source": [ + "func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *provider.Container, check string, logger *log.Logger) (compliantContainerPids, nonCompliantContainerPids []*testhelper.ReportObject) {", + "\thasCPUSchedulingConditionSuccess := false", + "\tfor _, process := range processes {", + "\t\tlogger.Debug(\"Testing process %q\", process)", + "\t\tschedulePolicy, schedulePriority, err := GetProcessCPUSchedulingFn(process.Pid, testContainer)", + "\t\tif err != nil {", + "\t\t\tlogger.Error(\"Unable to get the scheduling policy and priority : %v\", err)", + "\t\t\treturn compliantContainerPids, nonCompliantContainerPids", + "\t\t}", + "", + "\t\tswitch check {", + "\t\tcase SharedCPUScheduling:", + "\t\t\thasCPUSchedulingConditionSuccess = schedulePriority == 0", + "\t\tcase ExclusiveCPUScheduling:", + "\t\t\thasCPUSchedulingConditionSuccess = schedulePriority == 0 || (schedulePriority \u003c 10 \u0026\u0026 (schedulePolicy == SchedulingRoundRobin || schedulePolicy == SchedulingFirstInFirstOut))", + "\t\tcase IsolatedCPUScheduling:", + "\t\t\thasCPUSchedulingConditionSuccess = schedulePriority \u003e= 10 \u0026\u0026 (schedulePolicy == SchedulingRoundRobin || schedulePolicy == SchedulingFirstInFirstOut)", + "\t\t}", + "", + "\t\tif !hasCPUSchedulingConditionSuccess {", + "\t\t\tlogger.Error(\"Process %q in Container %q with cpu scheduling policy=%s, priority=%d did not satisfy cpu scheduling requirements\", process, testContainer, schedulePolicy, schedulePriority)", + "\t\t\taPidOut := testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, \"process does not satisfy: \"+schedulingRequirements[check], false).", + "\t\t\t\tSetContainerProcessValues(schedulePolicy, fmt.Sprint(schedulePriority), process.Args)", + "\t\t\tnonCompliantContainerPids = append(nonCompliantContainerPids, aPidOut)", + "\t\t\tcontinue", + "\t\t}", + "\t\tlogger.Info(\"Process %q in Container %q with cpu scheduling policy=%s, priority=%d satisfies cpu scheduling requirements\", process, testContainer, schedulePolicy, schedulePriority)", + "\t\taPidOut := testhelper.NewContainerReportObject(testContainer.Namespace, testContainer.Podname, testContainer.Name, \"process satisfies: \"+schedulingRequirements[check], true).", + "\t\t\tSetContainerProcessValues(schedulePolicy, fmt.Sprint(schedulePriority), process.Args)", + "\t\tcompliantContainerPids = append(compliantContainerPids, aPidOut)", + "\t}", + "\treturn compliantContainerPids, nonCompliantContainerPids", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PerformanceTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PerformanceTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestExclusiveCPUPool(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRtAppNoExecProbes)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUs).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestRtAppsNoExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSharedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoNonGuaranteedPodContainersWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetNonGuaranteedPodContainersWithoutHostPID(), scheduling.SharedCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestExclusiveCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsolatedCPUPoolSchedulingPolicy)).", + "\t\tWithSkipCheckFn(skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSchedulingPolicyInCPUPool(c, \u0026env, env.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID(), scheduling.ExclusiveCPUScheduling)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestLimitedUseOfExecProbesIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestLimitedUseOfExecProbes(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvironment,", + "\tpodContainers []*provider.Container, schedulingType string) {", + "\tvar compliantContainersPids []*testhelper.ReportObject", + "\tvar nonCompliantContainersPids []*testhelper.ReportObject", + "\tfor _, cut := range podContainers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "", + "\t\t// Get the pid namespace", + "\t\tpidNamespace, err := crclient.GetContainerPidNamespace(cut, env)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get pid namespace for Container %q, err: %v\", cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t\tcontinue", + "\t\t}", + "\t\tcheck.LogDebug(\"PID namespace for Container %q is %q\", cut, pidNamespace)", + "", + "\t\t// Get the list of process ids running in the pid namespace", + "\t\tprocesses, err := crclient.GetPidsFromPidNamespace(pidNamespace, cut)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get PIDs from PID namespace %q for Container %q, err: %v\", pidNamespace, cut, err)", + "\t\t\tnonCompliantContainersPids = append(nonCompliantContainersPids,", + "\t\t\t\ttesthelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Internal error, err=%s\", err), false))", + "\t\t}", + "", + "\t\tcompliantPids, nonCompliantPids := scheduling.ProcessPidsCPUScheduling(processes, cut, schedulingType, check.GetLogger())", + "\t\t// Check for the specified priority for each processes running in that pid namespace", + "", + "\t\tcompliantContainersPids = append(compliantContainersPids, compliantPids...)", + "\t\tnonCompliantContainersPids = append(nonCompliantContainersPids, nonCompliantPids...)", + "\t}", + "", + "\tcheck.SetResult(compliantContainersPids, nonCompliantContainersPids)", + "}" + ] + } + ], + "globals": [ + { + "name": "beforeEachFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:44" + }, + { + "name": "env", + "exported": false, + "type": "provider.TestEnvironment", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:42" + }, + { + "name": "skipIfNoGuaranteedPodContainersWithExclusiveCPUs", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:49" + }, + { + "name": "skipIfNoGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:65" + }, + { + "name": "skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:73" + }, + { + "name": "skipIfNoNonGuaranteedPodContainersWithoutHostPID", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:57" + } + ], + "consts": [ + { + "name": "maxNumberOfExecProbes", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:37" + }, + { + "name": "minExecProbePeriodSeconds", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:38" + }, + { + "name": "noProcessFoundErrMsg", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/performance/suite.go:343" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "platform", + "files": 2, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/clusteroperator", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/isredhat", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/operatingsystem", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/sysctlconfig", + "strconv", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "LoadChecks", + "qualifiedName": "LoadChecks", + "exported": true, + "signature": "func()()", + "doc": "LoadChecks Registers platform alteration tests into the internal checks database\n\nThe function logs that it is loading the platform alteration suite and\ncreates a new checks group identified by a common key. It registers a\nbefore‑each hook and then adds numerous checks, each with its own skip\nconditions and execution logic. Each check is built from an identifier,\nconfigured to run only when appropriate environment conditions are met, and\ninvokes a specific test function that evaluates node or pod properties. The\nassembled group is added to the checks database for later execution.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:64", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "WithBeforeEachFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewChecksGroup", + "kind": "function", + "source": [ + "func NewChecksGroup(groupName string) *ChecksGroup {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\tif dbByGroup == nil {", + "\t\tdbByGroup = map[string]*ChecksGroup{}", + "\t}", + "", + "\tgroup, exists := dbByGroup[groupName]", + "\tif exists {", + "\t\treturn group", + "\t}", + "", + "\tgroup = \u0026ChecksGroup{", + "\t\tname: groupName,", + "\t\tchecks: []*Check{},", + "\t\tcurrentRunningCheckIdx: checkIdxNone,", + "\t}", + "\tdbByGroup[groupName] = group", + "", + "\treturn group", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoBareMetalNodesSkipFn", + "kind": "function", + "source": [ + "func GetNoBareMetalNodesSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetBaremetalNodes()) == 0 {", + "\t\t\treturn true, \"no baremetal nodes found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testHyperThreadingEnabled", + "kind": "function", + "source": [ + "func testHyperThreadingEnabled(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tbaremetalNodes := env.GetBaremetalNodes()", + "\tfor _, node := range baremetalNodes {", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\tenable, err := node.IsHyperThreadNode(env)", + "\t\t//nolint:gocritic", + "\t\tif enable {", + "\t\t\tcheck.LogInfo(\"Node %q has hyperthreading enabled\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has hyperthreading enabled\", true))", + "\t\t} else if err != nil {", + "\t\t\tcheck.LogError(\"Hyperthreading check fail for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Error with executing the check for hyperthreading: \"+err.Error(), false))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Node %q has hyperthreading disabled\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has hyperthreading disabled \", false))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNonOCPClusterSkipFn", + "kind": "function", + "source": [ + "func GetNonOCPClusterSkipFn() func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !provider.IsOCPCluster() {", + "\t\t\treturn true, \"non-OCP cluster detected\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testContainersFsDiff", + "kind": "function", + "source": [ + "func testContainersFsDiff(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "", + "\t\t// If the probe pod is not found, we cannot run the test.", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Probe Pod not found for node %q\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"certsuite probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Check whether or not a container is available to prevent a panic.", + "\t\tif len(probePod.Spec.Containers) == 0 {", + "\t\t\tcheck.LogError(\"Probe Pod %q has no containers\", probePod)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"certsuite probe pod has no containers\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tctxt := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tfsDiffTester := cnffsdiff.NewFsDiffTester(check, clientsholder.GetClientsHolder(), ctxt, env.OpenshiftVersion)", + "\t\tfsDiffTester.RunTest(cut.UID)", + "\t\tswitch fsDiffTester.GetResults() {", + "\t\tcase testhelper.SUCCESS:", + "\t\t\tcheck.LogInfo(\"Container %q is not modified\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not modified\", true))", + "\t\t\tcontinue", + "\t\tcase testhelper.FAILURE:", + "\t\t\tcheck.LogError(\"Container %q modified (changed folders: %v, deleted folders: %v\", cut, fsDiffTester.ChangedFolders, fsDiffTester.DeletedFolders)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is modified\", false).", + "\t\t\t\tAddField(\"ChangedFolders\", strings.Join(fsDiffTester.ChangedFolders, \",\")).", + "\t\t\t\tAddField(\"DeletedFolders\", strings.Join(fsDiffTester.DeletedFolders, \",\")))", + "", + "\t\tcase testhelper.ERROR:", + "\t\t\tcheck.LogError(\"Could not run fs-diff in Container %q, err: %v\", cut, fsDiffTester.Error)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Error while running fs-diff\", false).AddField(testhelper.Error, fsDiffTester.Error.Error()))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testTainted", + "kind": "function", + "source": [ + "func testTainted(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// errNodes has nodes that failed some operation while checking kernel taints.", + "\terrNodes := []string{}", + "", + "\ttype badModuleTaints struct {", + "\t\tname string", + "\t\ttaints []string", + "\t}", + "", + "\t// badModules maps node names to list of \"bad\"/offending modules.", + "\tbadModules := map[string][]badModuleTaints{}", + "\t// otherTaints maps a node to a list of taint bits that haven't been set by any module.", + "\totherTaints := map[string][]int{}", + "", + "\tcheck.LogInfo(\"Modules allowlist: %+v\", env.Config.AcceptedKernelTaints)", + "\t// helper map to make the checks easier.", + "\tallowListedModules := map[string]bool{}", + "\tfor _, module := range env.Config.AcceptedKernelTaints {", + "\t\tallowListedModules[module.Module] = true", + "\t}", + "", + "\t// Loop through the probe pods that are tied to each node.", + "\tfor _, n := range env.Nodes {", + "\t\tnodeName := n.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "", + "\t\t// Ensure we are only testing nodes that have CNF workload deployed on them.", + "\t\tif !n.HasWorkloadDeployed(env.Pods) {", + "\t\t\tcheck.LogInfo(\"Node %q has no workload deployed on it. Skipping tainted kernel check.\", nodeName)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tdp := env.ProbePods[nodeName]", + "", + "\t\tocpContext := clientsholder.NewContext(dp.Namespace, dp.Name, dp.Spec.Containers[0].Name)", + "\t\ttf := nodetainted.NewNodeTaintedTester(\u0026ocpContext, nodeName)", + "", + "\t\t// Get the taints mask from the node kernel", + "\t\ttaintsMask, err := tf.GetKernelTaintsMask()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to retrieve kernel taint information from node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to retrieve kernel taint information from node\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif taintsMask == 0 {", + "\t\t\tcheck.LogInfo(\"Node %q has no non-approved kernel taints.\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has no non-approved kernel taints\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Node %q kernel is tainted. Taints mask=%d - Decoded taints: %v\",", + "\t\t\tnodeName, taintsMask, nodetainted.DecodeKernelTaintsFromBitMask(taintsMask))", + "", + "\t\t// Check the allow list. If empty, mark this node as failed.", + "\t\tif len(allowListedModules) == 0 {", + "\t\t\ttaintsMaskStr := strconv.FormatUint(taintsMask, 10)", + "\t\t\ttaintsStr := strings.Join(nodetainted.DecodeKernelTaintsFromBitMask(taintsMask), \",\")", + "\t\t\tcheck.LogError(\"Node %q contains taints not covered by module allowlist. Taints: %q (mask=%q)\", nodeName, taintsStr, taintsMaskStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node contains taints not covered by module allowlist\", false).", + "\t\t\t\tAddField(testhelper.TaintMask, taintsMaskStr).", + "\t\t\t\tAddField(testhelper.Taints, taintsStr))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// allow list check.", + "\t\t// Get the list of modules (tainters) that have set a taint bit.", + "\t\t// 1. Each module should appear in the allow list.", + "\t\t// 2. All kernel taint bits (one bit \u003c-\u003e one letter) should have been set by at least", + "\t\t// one tainter module.", + "\t\ttainters, taintBitsByAllModules, err := tf.GetTainterModules(allowListedModules)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get tainter modules from node %q, err: %v\", nodeName, err)", + "\t\t\terrNodes = append(errNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to get tainter modules\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Keep track of whether or not this node is compliant with module allow list.", + "\t\tcompliantNode := true", + "", + "\t\t// Save modules' names only.", + "\t\tfor moduleName, taintsLetters := range tainters {", + "\t\t\tmoduleTaints := nodetainted.DecodeKernelTaintsFromLetters(taintsLetters)", + "\t\t\tbadModules[nodeName] = append(badModules[nodeName], badModuleTaints{name: moduleName, taints: moduleTaints})", + "", + "\t\t\t// Create non-compliant taint objects for each of the taints", + "\t\t\tfor _, taint := range moduleTaints {", + "\t\t\t\tcheck.LogError(\"Node %q - module %q taints kernel: %q\", nodeName, moduleName, taint)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(nodetainted.RemoveAllExceptNumbers(taint), nodeName, taint, false).AddField(testhelper.ModuleName, moduleName))", + "", + "\t\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\t\tcompliantNode = false", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Lastly, check that all kernel taint bits come from modules.", + "\t\totherKernelTaints := nodetainted.GetOtherTaintedBits(taintsMask, taintBitsByAllModules)", + "\t\tfor _, taintedBit := range otherKernelTaints {", + "\t\t\tcheck.LogError(\"Node %q - taint bit %d is set but it is not caused by any module.\", nodeName, taintedBit)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(strconv.Itoa(taintedBit), nodeName, nodetainted.GetTaintMsg(taintedBit), false).", + "\t\t\t\tAddField(testhelper.ModuleName, \"N/A\"))", + "\t\t\totherTaints[nodeName] = append(otherTaints[nodeName], taintedBit)", + "", + "\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\tcompliantNode = false", + "\t\t}", + "", + "\t\tif compliantNode {", + "\t\t\tcheck.LogInfo(\"Node %q passed the tainted kernel check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the tainted kernel check\", true))", + "\t\t}", + "\t}", + "", + "\tif len(errNodes) \u003e 0 {", + "\t\tcheck.LogError(\"Failed to get kernel taints from some nodes: %+v\", errNodes)", + "\t}", + "", + "\tif len(badModules) \u003e 0 || len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Nodes have been found to be tainted. Tainted modules: %+v\", badModules)", + "\t}", + "", + "\tif len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Taints not related to any module: %+v\", otherTaints)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testIsRedHatRelease", + "kind": "function", + "source": [ + "func testIsRedHatRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tbaseImageTester := isredhat.NewBaseImageTester(clientsholder.GetClientsHolder(), clientsholder.NewContext(cut.Namespace, cut.Podname, cut.Name))", + "", + "\t\tresult, err := baseImageTester.TestContainerIsRedHatRelease()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not collect release information from Container %q, err=%v\", cut, err)", + "\t\t}", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Container %q has failed the RHEL release check\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Failed the RHEL release check\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has passed the RHEL release check\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Passed the RHEL release check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNonOCPClusterSkipFn", + "kind": "function", + "source": [ + "func GetNonOCPClusterSkipFn() func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !provider.IsOCPCluster() {", + "\t\t\treturn true, \"non-OCP cluster detected\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testIsSELinuxEnforcing", + "kind": "function", + "source": [ + "func testIsSELinuxEnforcing(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tconst (", + "\t\tgetenforceCommand = `chroot /host getenforce`", + "\t\tenforcingString = \"Enforcing\\n\"", + "\t)", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\to := clientsholder.GetClientsHolder()", + "\tnodesFailed := 0", + "\tnodesError := 0", + "\tfor _, probePod := range env.ProbePods {", + "\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, getenforceCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tcheck.LogError(\"Could not execute command %q in Probe Pod %q, errStr: %q, err: %v\", getenforceCommand, probePod, errStr, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(probePod.Namespace, probePod.Name, \"Failed to execute command\", false))", + "\t\t\tnodesError++", + "\t\t\tcontinue", + "\t\t}", + "\t\tif outStr != enforcingString {", + "\t\t\tcheck.LogError(\"Node %q is not running SELinux, %s command returned: %s\", probePod.Spec.NodeName, getenforceCommand, outStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(probePod.Spec.NodeName, \"SELinux is not enforced\", false))", + "\t\t\tnodesFailed++", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q is running SELinux\", probePod.Spec.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(probePod.Spec.NodeName, \"SELinux is enforced\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNonOCPClusterSkipFn", + "kind": "function", + "source": [ + "func GetNonOCPClusterSkipFn() func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !provider.IsOCPCluster() {", + "\t\t\treturn true, \"non-OCP cluster detected\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testHugepages", + "kind": "function", + "source": [ + "func testHugepages(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.Nodes {", + "\t\tnode := env.Nodes[i]", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\tif !node.IsWorkerNode() {", + "\t\t\tcheck.LogInfo(\"Node %q is not a worker node\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Not a worker node\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tprobePod, exist := env.ProbePods[nodeName]", + "\t\tif !exist {", + "\t\t\tcheck.LogError(\"Could not find a Probe Pod in node %q.\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"tnf probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\thpTester, err := hugepages.NewTester(\u0026node, probePod, clientsholder.GetClientsHolder())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get node hugepages tester for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Unable to get node hugepages tester\", false))", + "\t\t}", + "", + "\t\tif err := hpTester.Run(); err != nil {", + "\t\t\tcheck.LogError(\"Hugepages check failed for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, err.Error(), false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q passed the hugepages check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the hugepages check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNonOCPClusterSkipFn", + "kind": "function", + "source": [ + "func GetNonOCPClusterSkipFn() func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !provider.IsOCPCluster() {", + "\t\t\treturn true, \"non-OCP cluster detected\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testUnalteredBootParams", + "kind": "function", + "source": [ + "func testUnalteredBootParams(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\talreadyCheckedNodes := map[string]bool{}", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif alreadyCheckedNodes[cut.NodeName] {", + "\t\t\tcheck.LogInfo(\"Skipping node %q: already checked.\", cut.NodeName)", + "\t\t\tcontinue", + "\t\t}", + "\t\talreadyCheckedNodes[cut.NodeName] = true", + "", + "\t\terr := bootparams.TestBootParamsHelper(env, cut, check.GetLogger())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Node %q failed the boot params check\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Failed the boot params check\", false).", + "\t\t\t\tAddField(testhelper.ProbePodName, env.ProbePods[cut.NodeName].Name))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q passed the boot params check\", cut.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Passed the boot params check\", true).", + "\t\t\t\tAddField(testhelper.ProbePodName, env.ProbePods[cut.NodeName].Name))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNonOCPClusterSkipFn", + "kind": "function", + "source": [ + "func GetNonOCPClusterSkipFn() func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !provider.IsOCPCluster() {", + "\t\t\treturn true, \"non-OCP cluster detected\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetDaemonSetFailedToSpawnSkipFn", + "kind": "function", + "source": [ + "func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif env.DaemonsetFailedToSpawn {", + "\t\t\treturn true, \"probe daemonset failed to spawn. please check the logs.\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testSysctlConfigs", + "kind": "function", + "source": [ + "func testSysctlConfigs(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\talreadyCheckedNodes := map[string]bool{}", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif alreadyCheckedNodes[cut.NodeName] {", + "\t\t\tcontinue", + "\t\t}", + "\t\talreadyCheckedNodes[cut.NodeName] = true", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Probe Pod not found for node %q\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"tnf probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tsysctlSettings, err := sysctlconfig.GetSysctlSettings(env, cut.NodeName)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get sysctl settings for node %q, error: %v\", cut.NodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Could not get sysctl settings\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tmcKernelArgumentsMap := bootparams.GetMcKernelArguments(env, cut.NodeName)", + "\t\tvalidSettings := true", + "\t\tfor key, sysctlConfigVal := range sysctlSettings {", + "\t\t\tif mcVal, ok := mcKernelArgumentsMap[key]; ok {", + "\t\t\t\tif mcVal != sysctlConfigVal {", + "\t\t\t\t\tcheck.LogError(\"Kernel config mismatch in node %q for %q (sysctl value: %q, machine config value: %q)\",", + "\t\t\t\t\t\tcut.NodeName, key, sysctlConfigVal, mcVal)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, fmt.Sprintf(\"Kernel config mismatch for %s\", key), false))", + "\t\t\t\t\tvalidSettings = false", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif validSettings {", + "\t\t\tcheck.LogInfo(\"Node %q passed the sysctl config check\", cut.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Passed the sysctl config check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoIstioSkipFn", + "kind": "function", + "source": [ + "func GetNoIstioSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !env.IstioServiceMeshFound {", + "\t\t\treturn true, \"no istio service mesh found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoPodsUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Pods) == 0 {", + "\t\t\treturn true, \"no pods to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testServiceMesh", + "kind": "function", + "source": [ + "func testServiceMesh(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tistioProxyFound := false", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tif cut.IsIstioProxy() {", + "\t\t\t\tcheck.LogInfo(\"Istio proxy container found on Pod %q (Container %q)\", put, cut)", + "\t\t\t\tistioProxyFound = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !istioProxyFound {", + "\t\t\tcheck.LogError(\"Pod %q found without service mesh\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod found without service mesh container\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q found with service mesh\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod found with service mesh container\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNonOCPClusterSkipFn", + "kind": "function", + "source": [ + "func GetNonOCPClusterSkipFn() func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !provider.IsOCPCluster() {", + "\t\t\treturn true, \"non-OCP cluster detected\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testOCPStatus", + "kind": "function", + "source": [ + "func testOCPStatus(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tclusterIsInEOL := false", + "\tswitch env.OCPStatus {", + "\tcase compatibility.OCPStatusEOL:", + "\t\tcheck.LogError(\"OCP Version %q has been found to be in end of life\", env.OpenshiftVersion)", + "\t\tclusterIsInEOL = true", + "\tcase compatibility.OCPStatusMS:", + "\t\tcheck.LogInfo(\"OCP Version %q has been found to be in maintenance support\", env.OpenshiftVersion)", + "\tcase compatibility.OCPStatusGA:", + "\t\tcheck.LogInfo(\"OCP Version %q has been found to be in general availability\", env.OpenshiftVersion)", + "\tcase compatibility.OCPStatusPreGA:", + "\t\tcheck.LogInfo(\"OCP Version %q has been found to be in pre-general availability\", env.OpenshiftVersion)", + "\tdefault:", + "\t\tcheck.LogInfo(\"OCP Version %q was unable to be found in the lifecycle compatibility matrix\", env.OpenshiftVersion)", + "\t}", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tif clusterIsInEOL {", + "\t\tnonCompliantObjects = []*testhelper.ReportObject{testhelper.NewClusterVersionReportObject(env.OpenshiftVersion, \"Openshift Version is in End Of Life (EOL)\", false)}", + "\t} else {", + "\t\tcompliantObjects = []*testhelper.ReportObject{testhelper.NewClusterVersionReportObject(env.OpenshiftVersion, \"Openshift Version is not in End Of Life (EOL)\", true)}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNonOCPClusterSkipFn", + "kind": "function", + "source": [ + "func GetNonOCPClusterSkipFn() func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !provider.IsOCPCluster() {", + "\t\t\treturn true, \"non-OCP cluster detected\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testNodeOperatingSystemStatus", + "kind": "function", + "source": [ + "func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tfailedControlPlaneNodes := []string{}", + "\tfailedWorkerNodes := []string{}", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, node := range env.Nodes {", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\t// Get the OSImage which should tell us what version of operating system the node is running.", + "\t\tcheck.LogInfo(\"Node %q is running operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "", + "\t\t// Control plane nodes must be RHCOS (also CentOS Stream starting in OCP 4.13)", + "\t\t// Per the release notes from OCP documentation:", + "\t\t// \"You must use RHCOS machines for the control plane, and you can use either RHCOS or RHEL for compute machines.\"", + "\t\tif node.IsControlPlaneNode() \u0026\u0026 !node.IsRHCOS() \u0026\u0026 !node.IsCSCOS() {", + "\t\t\tcheck.LogError(\"Control plane node %q has been found to be running an incompatible operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "\t\t\tfailedControlPlaneNodes = append(failedControlPlaneNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Control plane node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Worker nodes can either be RHEL or RHCOS", + "\t\tif node.IsWorkerNode() {", + "\t\t\t//nolint:gocritic", + "\t\t\tif node.IsRHCOS() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetRHCOSVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather RHCOS version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather RHCOS version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tif shortVersion == operatingsystem.NotFoundStr {", + "\t\t\t\t\tcheck.LogInfo(\"Node %q has an RHCOS operating system that is not found in our internal database. Skipping as to not cause failures due to database mismatch.\", nodeName)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the node's RHCOS version and the OpenShift version are not compatible, the node fails.", + "\t\t\t\tcheck.LogDebug(\"Comparing RHCOS shortVersion %q to openshiftVersion %q\", shortVersion, env.OpenshiftVersion)", + "\t\t\t\tif !compatibility.IsRHCOSCompatible(shortVersion, env.OpenshiftVersion) {", + "\t\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible version of RHCOS %q\", nodeName, shortVersion)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).", + "\t\t\t\t\t\tAddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\tcheck.LogInfo(\"Worker node %q has been found to be running a compatible version of RHCOS %q\", nodeName, shortVersion)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running a compatible OS\", true).", + "\t\t\t\t\tAddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t} else if node.IsCSCOS() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetCSCOSVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather CentOS Stream CoreOS version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather CentOS Stream CoreOS version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// Warning: CentOS Stream CoreOS has not been released yet in any", + "\t\t\t\t// OCP RC/GA versions, so for the moment, we cannot compare the", + "\t\t\t\t// version with the OCP one, or retrieve it on the internal database", + "\t\t\t\tmsg := `", + "\t\t\t\t\tNode %s is using CentOS Stream CoreOS %s, which is not being used yet in any", + "\t\t\t\t\tOCP RC/GA version. Relaxing the conditions to check the OS as a result.", + "\t\t\t\t\t`", + "\t\t\t\tcheck.LogDebug(msg, nodeName, shortVersion)", + "\t\t\t} else if node.IsRHEL() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetRHELVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather RHEL version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather RHEL version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the node's RHEL version and the OpenShift version are not compatible, the node fails.", + "\t\t\t\tcheck.LogDebug(\"Comparing RHEL shortVersion %q to openshiftVersion %q\", shortVersion, env.OpenshiftVersion)", + "\t\t\t\tif !compatibility.IsRHELCompatible(shortVersion, env.OpenshiftVersion) {", + "\t\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible version of RHEL %q\", nodeName, shortVersion)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"Worker node %q has been found to be running a compatible version of RHEL %q\", nodeName, shortVersion)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running a compatible OS\", true).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tif n := len(failedControlPlaneNodes); n \u003e 0 {", + "\t\tcheck.LogError(\"Number of control plane nodes running non-RHCOS based operating systems: %d\", n)", + "\t}", + "", + "\tif n := len(failedWorkerNodes); n \u003e 0 {", + "\t\tcheck.LogError(\"Number of worker nodes running non-RHCOS or non-RHEL based operating systems: %d\", n)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNonOCPClusterSkipFn", + "kind": "function", + "source": [ + "func GetNonOCPClusterSkipFn() func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !provider.IsOCPCluster() {", + "\t\t\treturn true, \"non-OCP cluster detected\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoHugepagesPodsSkipFn", + "kind": "function", + "source": [ + "func GetNoHugepagesPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetHugepagesPods()) == 0 {", + "\t\t\treturn true, \"no pods requesting hugepages found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodHugePagesSize", + "kind": "function", + "source": [ + "func testPodHugePagesSize(check *checksdb.Check, env *provider.TestEnvironment, size string) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.GetHugepagesPods() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tresult := put.CheckResourceHugePagesSize(size)", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Pod %q has been found to be running with an incorrect hugepages size (expected size %q)\", put, size)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found to be running with an incorrect hugepages size\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has been found to be running with a correct hugepages size %q\", put, size)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found to be running with a correct hugepages size\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNonOCPClusterSkipFn", + "kind": "function", + "source": [ + "func GetNonOCPClusterSkipFn() func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !provider.IsOCPCluster() {", + "\t\t\treturn true, \"non-OCP cluster detected\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoHugepagesPodsSkipFn", + "kind": "function", + "source": [ + "func GetNoHugepagesPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.GetHugepagesPods()) == 0 {", + "\t\t\treturn true, \"no pods requesting hugepages found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testPodHugePagesSize", + "kind": "function", + "source": [ + "func testPodHugePagesSize(check *checksdb.Check, env *provider.TestEnvironment, size string) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.GetHugepagesPods() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tresult := put.CheckResourceHugePagesSize(size)", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Pod %q has been found to be running with an incorrect hugepages size (expected size %q)\", put, size)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found to be running with an incorrect hugepages size\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has been found to be running with a correct hugepages size %q\", put, size)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found to be running with a correct hugepages size\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNonOCPClusterSkipFn", + "kind": "function", + "source": [ + "func GetNonOCPClusterSkipFn() func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif !provider.IsOCPCluster() {", + "\t\t\treturn true, \"non-OCP cluster detected\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "testClusterOperatorHealth", + "kind": "function", + "source": [ + "func testClusterOperatorHealth(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Checks the various ClusterOperator(s) to see if they are all in an 'Available' state.", + "\t// If they are not in an 'Available' state, the check will fail.", + "\t// Note: This check is only applicable to OCP clusters and is skipped for non-OCP clusters.", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through the ClusterOperators and check their status.", + "\tfor i := range env.ClusterOperators {", + "\t\tcheck.LogInfo(\"Testing ClusterOperator %q to ensure it is in an 'Available' state.\", env.ClusterOperators[i].Name)", + "", + "\t\tif clusteroperator.IsClusterOperatorAvailable(\u0026env.ClusterOperators[i]) {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewClusterOperatorReportObject(env.ClusterOperators[i].Name, \"ClusterOperator is in an 'Available' state\", true))", + "\t\t} else {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewClusterOperatorReportObject(env.ClusterOperators[i].Name, \"ClusterOperator is not in an 'Available' state\", false))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadInternalChecksDB", + "kind": "function", + "source": [ + "func LoadInternalChecksDB() {", + "\taccesscontrol.LoadChecks()", + "\tcertification.LoadChecks()", + "\tlifecycle.LoadChecks()", + "\tmanageability.LoadChecks()", + "\tnetworking.LoadChecks()", + "\tobservability.LoadChecks()", + "\tperformance.LoadChecks()", + "\tplatform.LoadChecks()", + "\toperator.LoadChecks()", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "name": "testClusterOperatorHealth", + "qualifiedName": "testClusterOperatorHealth", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testClusterOperatorHealth Verifies that all cluster operators are available\n\nThe function iterates over each operator in the test environment, logging a\ncheck for each one. It uses a helper to determine if an operator is in the\n'Available' state and records compliant or non‑compliant results\naccordingly. Finally, it aggregates these results into the test's outcome.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:823", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/clusteroperator", + "name": "IsClusterOperatorAvailable", + "kind": "function", + "source": [ + "func IsClusterOperatorAvailable(co *configv1.ClusterOperator) bool {", + "\t// Loop through the conditions, looking for the 'Available' state.", + "\tfor _, condition := range co.Status.Conditions {", + "\t\tif condition.Type == configv1.OperatorAvailable {", + "\t\t\tlog.Info(\"ClusterOperator %q is in an 'Available' state\", co.Name)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"ClusterOperator %q is not in an 'Available' state\", co.Name)", + "\treturn false", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewClusterOperatorReportObject", + "kind": "function", + "source": [ + "func NewClusterOperatorReportObject(aClusterOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ClusterOperatorType, isCompliant)", + "\tout.AddField(Name, aClusterOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewClusterOperatorReportObject", + "kind": "function", + "source": [ + "func NewClusterOperatorReportObject(aClusterOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ClusterOperatorType, isCompliant)", + "\tout.AddField(Name, aClusterOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testClusterOperatorHealth(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Checks the various ClusterOperator(s) to see if they are all in an 'Available' state.", + "\t// If they are not in an 'Available' state, the check will fail.", + "\t// Note: This check is only applicable to OCP clusters and is skipped for non-OCP clusters.", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through the ClusterOperators and check their status.", + "\tfor i := range env.ClusterOperators {", + "\t\tcheck.LogInfo(\"Testing ClusterOperator %q to ensure it is in an 'Available' state.\", env.ClusterOperators[i].Name)", + "", + "\t\tif clusteroperator.IsClusterOperatorAvailable(\u0026env.ClusterOperators[i]) {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewClusterOperatorReportObject(env.ClusterOperators[i].Name, \"ClusterOperator is in an 'Available' state\", true))", + "\t\t} else {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewClusterOperatorReportObject(env.ClusterOperators[i].Name, \"ClusterOperator is not in an 'Available' state\", false))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testContainersFsDiff", + "qualifiedName": "testContainersFsDiff", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testContainersFsDiff Verifies containers have not been altered by comparing file system snapshots\n\nThe routine iterates over each container under test, locating a corresponding\nprobe pod to obtain the original filesystem state. It runs a diff check; if\nthe container shows no changes it records compliance, otherwise it logs the\nmodified or deleted directories and marks non‑compliance. Errors during the\ndiff process are captured as failures and reported with error details.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:257", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "NewFsDiffTester", + "kind": "function", + "source": [ + "func NewFsDiffTester(check *checksdb.Check, client clientsholder.Command, ctxt clientsholder.Context, ocpVersion string) *FsDiff {", + "\tuseCustomPodman := shouldUseCustomPodman(check, ocpVersion)", + "\tcheck.LogDebug(\"Using custom podman: %v.\", useCustomPodman)", + "", + "\treturn \u0026FsDiff{", + "\t\tcheck: check,", + "\t\tclientHolder: client,", + "\t\tctxt: ctxt,", + "\t\tresult: testhelper.ERROR,", + "\t\tuseCustomPodman: useCustomPodman,", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "RunTest", + "kind": "function" + }, + { + "name": "GetResults", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testContainersFsDiff(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "", + "\t\t// If the probe pod is not found, we cannot run the test.", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Probe Pod not found for node %q\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"certsuite probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Check whether or not a container is available to prevent a panic.", + "\t\tif len(probePod.Spec.Containers) == 0 {", + "\t\t\tcheck.LogError(\"Probe Pod %q has no containers\", probePod)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"certsuite probe pod has no containers\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tctxt := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tfsDiffTester := cnffsdiff.NewFsDiffTester(check, clientsholder.GetClientsHolder(), ctxt, env.OpenshiftVersion)", + "\t\tfsDiffTester.RunTest(cut.UID)", + "\t\tswitch fsDiffTester.GetResults() {", + "\t\tcase testhelper.SUCCESS:", + "\t\t\tcheck.LogInfo(\"Container %q is not modified\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not modified\", true))", + "\t\t\tcontinue", + "\t\tcase testhelper.FAILURE:", + "\t\t\tcheck.LogError(\"Container %q modified (changed folders: %v, deleted folders: %v\", cut, fsDiffTester.ChangedFolders, fsDiffTester.DeletedFolders)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is modified\", false).", + "\t\t\t\tAddField(\"ChangedFolders\", strings.Join(fsDiffTester.ChangedFolders, \",\")).", + "\t\t\t\tAddField(\"DeletedFolders\", strings.Join(fsDiffTester.DeletedFolders, \",\")))", + "", + "\t\tcase testhelper.ERROR:", + "\t\t\tcheck.LogError(\"Could not run fs-diff in Container %q, err: %v\", cut, fsDiffTester.Error)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Error while running fs-diff\", false).AddField(testhelper.Error, fsDiffTester.Error.Error()))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testHugepages", + "qualifiedName": "testHugepages", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testHugepages Verifies that node hugepages configuration has not been altered\n\nThe function iterates over all nodes in the test environment, skipping\nnon‑worker nodes as compliant. For each worker node it looks up a probe\npod, creates a hugepages tester and runs its check. Results are collected\ninto compliant or non‑compliant report objects which are then set on the\nprovided check.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:520", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsWorkerNode", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "NewTester", + "kind": "function", + "source": [ + "func NewTester(node *provider.Node, probePod *corev1.Pod, commander clientsholder.Command) (*Tester, error) {", + "\ttester := \u0026Tester{", + "\t\tnode: node,", + "\t\tcommander: commander,", + "\t\tcontext: clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name),", + "\t}", + "", + "\tlog.Info(\"Getting node %s numa's hugepages values.\", node.Data.Name)", + "\tvar err error", + "\ttester.nodeHugepagesByNuma, err = tester.getNodeNumaHugePages()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to get node hugepages, err: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Parsing machineconfig's kernelArguments and systemd's hugepages units.\")", + "\ttester.mcSystemdHugepagesByNuma, err = getMcSystemdUnitsHugepagesConfig(\u0026tester.node.Mc)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get MC systemd hugepages config, err: %v\", err)", + "\t}", + "", + "\treturn tester, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "Run", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testHugepages(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.Nodes {", + "\t\tnode := env.Nodes[i]", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\tif !node.IsWorkerNode() {", + "\t\t\tcheck.LogInfo(\"Node %q is not a worker node\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Not a worker node\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tprobePod, exist := env.ProbePods[nodeName]", + "\t\tif !exist {", + "\t\t\tcheck.LogError(\"Could not find a Probe Pod in node %q.\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"tnf probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\thpTester, err := hugepages.NewTester(\u0026node, probePod, clientsholder.GetClientsHolder())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get node hugepages tester for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Unable to get node hugepages tester\", false))", + "\t\t}", + "", + "\t\tif err := hpTester.Run(); err != nil {", + "\t\t\tcheck.LogError(\"Hugepages check failed for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, err.Error(), false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q passed the hugepages check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the hugepages check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testHyperThreadingEnabled", + "qualifiedName": "testHyperThreadingEnabled", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testHyperThreadingEnabled Verifies hyper‑threading status on all bare metal nodes\n\nThe routine retrieves every bare metal node from the test environment and\nqueries whether hyper‑threading is active for each one. It records\ncompliant nodes where hyper‑threading is enabled, logs errors for disabled\nor query failures, and compiles separate lists of compliant and\nnon‑compliant objects before setting the check result.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:195", + "calls": [ + { + "name": "GetBaremetalNodes", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsHyperThreadNode", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testHyperThreadingEnabled(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tbaremetalNodes := env.GetBaremetalNodes()", + "\tfor _, node := range baremetalNodes {", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\tenable, err := node.IsHyperThreadNode(env)", + "\t\t//nolint:gocritic", + "\t\tif enable {", + "\t\t\tcheck.LogInfo(\"Node %q has hyperthreading enabled\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has hyperthreading enabled\", true))", + "\t\t} else if err != nil {", + "\t\t\tcheck.LogError(\"Hyperthreading check fail for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Error with executing the check for hyperthreading: \"+err.Error(), false))", + "\t\t} else {", + "\t\t\tcheck.LogError(\"Node %q has hyperthreading disabled\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has hyperthreading disabled \", false))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testIsRedHatRelease", + "qualifiedName": "testIsRedHatRelease", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testIsRedHatRelease Verifies that containers use a Red Hat Enterprise Linux base image\n\nThe function iterates over all test containers, creating a tester for each\nbased on its namespace, pod name, and container name. It calls the tester to\ndetermine if the underlying image is a RHEL release; any errors are logged as\nfailures. Containers that pass or fail are recorded in separate report lists\nwhich are then stored in the check result.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:452", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/isredhat", + "name": "NewBaseImageTester", + "kind": "function", + "source": [ + "func NewBaseImageTester(client clientsholder.Command, ctx clientsholder.Context) *BaseImageInfo {", + "\treturn \u0026BaseImageInfo{", + "\t\tClientHolder: client,", + "\t\tOCPContext: ctx,", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "name": "TestContainerIsRedHatRelease", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testIsRedHatRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tbaseImageTester := isredhat.NewBaseImageTester(clientsholder.GetClientsHolder(), clientsholder.NewContext(cut.Namespace, cut.Podname, cut.Name))", + "", + "\t\tresult, err := baseImageTester.TestContainerIsRedHatRelease()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not collect release information from Container %q, err=%v\", cut, err)", + "\t\t}", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Container %q has failed the RHEL release check\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Failed the RHEL release check\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has passed the RHEL release check\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Passed the RHEL release check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testIsSELinuxEnforcing", + "qualifiedName": "testIsSELinuxEnforcing", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testIsSELinuxEnforcing Checks that SELinux is enforcing on cluster nodes\n\nThe function runs a command inside each probe pod to read the SELinux mode\nvia chroot and verifies it matches \"Enforcing\\n\". It records compliant or\nnon‑compliant results per node, logging errors for execution failures. The\nfinal result aggregates all objects and updates the check status.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:481", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testIsSELinuxEnforcing(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tconst (", + "\t\tgetenforceCommand = `chroot /host getenforce`", + "\t\tenforcingString = \"Enforcing\\n\"", + "\t)", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\to := clientsholder.GetClientsHolder()", + "\tnodesFailed := 0", + "\tnodesError := 0", + "\tfor _, probePod := range env.ProbePods {", + "\t\tctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\toutStr, errStr, err := o.ExecCommandContainer(ctx, getenforceCommand)", + "\t\tif err != nil || errStr != \"\" {", + "\t\t\tcheck.LogError(\"Could not execute command %q in Probe Pod %q, errStr: %q, err: %v\", getenforceCommand, probePod, errStr, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(probePod.Namespace, probePod.Name, \"Failed to execute command\", false))", + "\t\t\tnodesError++", + "\t\t\tcontinue", + "\t\t}", + "\t\tif outStr != enforcingString {", + "\t\t\tcheck.LogError(\"Node %q is not running SELinux, %s command returned: %s\", probePod.Spec.NodeName, getenforceCommand, outStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(probePod.Spec.NodeName, \"SELinux is not enforced\", false))", + "\t\t\tnodesFailed++", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q is running SELinux\", probePod.Spec.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(probePod.Spec.NodeName, \"SELinux is enforced\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testNodeOperatingSystemStatus", + "qualifiedName": "testNodeOperatingSystemStatus", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testNodeOperatingSystemStatus Verifies node operating system compatibility\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:684", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsControlPlaneNode", + "kind": "function" + }, + { + "name": "IsRHCOS", + "kind": "function" + }, + { + "name": "IsCSCOS", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "IsWorkerNode", + "kind": "function" + }, + { + "name": "IsRHCOS", + "kind": "function" + }, + { + "name": "GetRHCOSVersion", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogDebug", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "name": "IsRHCOSCompatible", + "kind": "function", + "source": [ + "func IsRHCOSCompatible(machineVersion, ocpVersion string) bool {", + "\tif machineVersion == \"\" || ocpVersion == \"\" {", + "\t\treturn false", + "\t}", + "", + "\t// Exception for beta versions", + "\tif BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion) {", + "\t\treturn true", + "\t}", + "", + "\t// Split the incoming version on the \".\" and make sure we are only looking at major.minor.", + "\tocpVersion = FindMajorMinor(ocpVersion)", + "", + "\tlifecycleInfo := GetLifeCycleDates()", + "\tif entry, ok := lifecycleInfo[ocpVersion]; ok {", + "\t\t// Collect the machine version and the entry version", + "\t\tmv, err := gv.NewVersion(machineVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error parsing machineVersion: %s err: %v\", machineVersion, err)", + "\t\t\treturn false", + "\t\t}", + "\t\tev, err := gv.NewVersion(entry.MinRHCOSVersion)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Error parsing MinRHCOSVersion: %s err: %v\", entry.MinRHCOSVersion, err)", + "\t\t\treturn false", + "\t\t}", + "", + "\t\t// If the machine version \u003e= the entry version", + "\t\treturn mv.GreaterThanOrEqual(ev)", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "IsCSCOS", + "kind": "function" + }, + { + "name": "GetCSCOSVersion", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogDebug", + "kind": "function" + }, + { + "name": "IsRHEL", + "kind": "function" + }, + { + "name": "GetRHELVersion", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogDebug", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility", + "name": "IsRHELCompatible", + "kind": "function", + "source": [ + "func IsRHELCompatible(machineVersion, ocpVersion string) bool {", + "\tif machineVersion == \"\" || ocpVersion == \"\" {", + "\t\treturn false", + "\t}", + "", + "\tlifecycleInfo := GetLifeCycleDates()", + "\tif entry, ok := lifecycleInfo[ocpVersion]; ok {", + "\t\tif len(entry.RHELVersionsAccepted) \u003e= 2 { //nolint:mnd", + "\t\t\t// Need to be a specific major.minor version", + "\t\t\tfor _, v := range entry.RHELVersionsAccepted {", + "\t\t\t\tif v == machineVersion {", + "\t\t\t\t\treturn true", + "\t\t\t\t}", + "\t\t\t}", + "\t\t} else {", + "\t\t\t// Collect the machine version and the entry version", + "\t\t\tmv, _ := gv.NewVersion(machineVersion)", + "\t\t\tev, _ := gv.NewVersion(entry.RHELVersionsAccepted[0])", + "", + "\t\t\t// If the machine version \u003e= the entry version", + "\t\t\treturn mv.GreaterThanOrEqual(ev)", + "\t\t}", + "\t}", + "", + "\treturn false", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tfailedControlPlaneNodes := []string{}", + "\tfailedWorkerNodes := []string{}", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, node := range env.Nodes {", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\t// Get the OSImage which should tell us what version of operating system the node is running.", + "\t\tcheck.LogInfo(\"Node %q is running operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "", + "\t\t// Control plane nodes must be RHCOS (also CentOS Stream starting in OCP 4.13)", + "\t\t// Per the release notes from OCP documentation:", + "\t\t// \"You must use RHCOS machines for the control plane, and you can use either RHCOS or RHEL for compute machines.\"", + "\t\tif node.IsControlPlaneNode() \u0026\u0026 !node.IsRHCOS() \u0026\u0026 !node.IsCSCOS() {", + "\t\t\tcheck.LogError(\"Control plane node %q has been found to be running an incompatible operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "\t\t\tfailedControlPlaneNodes = append(failedControlPlaneNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Control plane node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Worker nodes can either be RHEL or RHCOS", + "\t\tif node.IsWorkerNode() {", + "\t\t\t//nolint:gocritic", + "\t\t\tif node.IsRHCOS() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetRHCOSVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather RHCOS version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather RHCOS version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\tif shortVersion == operatingsystem.NotFoundStr {", + "\t\t\t\t\tcheck.LogInfo(\"Node %q has an RHCOS operating system that is not found in our internal database. Skipping as to not cause failures due to database mismatch.\", nodeName)", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the node's RHCOS version and the OpenShift version are not compatible, the node fails.", + "\t\t\t\tcheck.LogDebug(\"Comparing RHCOS shortVersion %q to openshiftVersion %q\", shortVersion, env.OpenshiftVersion)", + "\t\t\t\tif !compatibility.IsRHCOSCompatible(shortVersion, env.OpenshiftVersion) {", + "\t\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible version of RHCOS %q\", nodeName, shortVersion)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).", + "\t\t\t\t\t\tAddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "\t\t\t\tcheck.LogInfo(\"Worker node %q has been found to be running a compatible version of RHCOS %q\", nodeName, shortVersion)", + "\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running a compatible OS\", true).", + "\t\t\t\t\tAddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t} else if node.IsCSCOS() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetCSCOSVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather CentOS Stream CoreOS version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather CentOS Stream CoreOS version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// Warning: CentOS Stream CoreOS has not been released yet in any", + "\t\t\t\t// OCP RC/GA versions, so for the moment, we cannot compare the", + "\t\t\t\t// version with the OCP one, or retrieve it on the internal database", + "\t\t\t\tmsg := `", + "\t\t\t\t\tNode %s is using CentOS Stream CoreOS %s, which is not being used yet in any", + "\t\t\t\t\tOCP RC/GA version. Relaxing the conditions to check the OS as a result.", + "\t\t\t\t\t`", + "\t\t\t\tcheck.LogDebug(msg, nodeName, shortVersion)", + "\t\t\t} else if node.IsRHEL() {", + "\t\t\t\t// Get the short version from the node", + "\t\t\t\tshortVersion, err := node.GetRHELVersion()", + "\t\t\t\tif err != nil {", + "\t\t\t\t\tcheck.LogError(\"Node %q failed to gather RHEL version, err: %v\", nodeName, err)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to gather RHEL version\", false))", + "\t\t\t\t\tcontinue", + "\t\t\t\t}", + "", + "\t\t\t\t// If the node's RHEL version and the OpenShift version are not compatible, the node fails.", + "\t\t\t\tcheck.LogDebug(\"Comparing RHEL shortVersion %q to openshiftVersion %q\", shortVersion, env.OpenshiftVersion)", + "\t\t\t\tif !compatibility.IsRHELCompatible(shortVersion, env.OpenshiftVersion) {", + "\t\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible version of RHEL %q\", nodeName, shortVersion)", + "\t\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t} else {", + "\t\t\t\t\tcheck.LogInfo(\"Worker node %q has been found to be running a compatible version of RHEL %q\", nodeName, shortVersion)", + "\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running a compatible OS\", true).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t\t}", + "\t\t\t} else {", + "\t\t\t\tcheck.LogError(\"Worker node %q has been found to be running an incompatible operating system %q\", nodeName, node.Data.Status.NodeInfo.OSImage)", + "\t\t\t\tfailedWorkerNodes = append(failedWorkerNodes, nodeName)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Worker node has been found to be running an incompatible OS\", false).AddField(testhelper.OSImage, node.Data.Status.NodeInfo.OSImage))", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\tif n := len(failedControlPlaneNodes); n \u003e 0 {", + "\t\tcheck.LogError(\"Number of control plane nodes running non-RHCOS based operating systems: %d\", n)", + "\t}", + "", + "\tif n := len(failedWorkerNodes); n \u003e 0 {", + "\t\tcheck.LogError(\"Number of worker nodes running non-RHCOS or non-RHEL based operating systems: %d\", n)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testOCPStatus", + "qualifiedName": "testOCPStatus", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testOCPStatus Checks OpenShift cluster version against lifecycle status\n\nThe function inspects the environment’s OpenShift status, logs an\nappropriate message for EOL, maintenance, GA, or pre‑GA releases, and marks\nthe check as compliant unless the version is in end of life. It constructs\nreport objects indicating compliance and assigns them to the check result.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:653", + "calls": [ + { + "name": "LogError", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewClusterVersionReportObject", + "kind": "function", + "source": [ + "func NewClusterVersionReportObject(version, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OCPClusterType, isCompliant)", + "\tout.AddField(OCPClusterVersionType, version)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewClusterVersionReportObject", + "kind": "function", + "source": [ + "func NewClusterVersionReportObject(version, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OCPClusterType, isCompliant)", + "\tout.AddField(OCPClusterVersionType, version)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testOCPStatus(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tclusterIsInEOL := false", + "\tswitch env.OCPStatus {", + "\tcase compatibility.OCPStatusEOL:", + "\t\tcheck.LogError(\"OCP Version %q has been found to be in end of life\", env.OpenshiftVersion)", + "\t\tclusterIsInEOL = true", + "\tcase compatibility.OCPStatusMS:", + "\t\tcheck.LogInfo(\"OCP Version %q has been found to be in maintenance support\", env.OpenshiftVersion)", + "\tcase compatibility.OCPStatusGA:", + "\t\tcheck.LogInfo(\"OCP Version %q has been found to be in general availability\", env.OpenshiftVersion)", + "\tcase compatibility.OCPStatusPreGA:", + "\t\tcheck.LogInfo(\"OCP Version %q has been found to be in pre-general availability\", env.OpenshiftVersion)", + "\tdefault:", + "\t\tcheck.LogInfo(\"OCP Version %q was unable to be found in the lifecycle compatibility matrix\", env.OpenshiftVersion)", + "\t}", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\tif clusterIsInEOL {", + "\t\tnonCompliantObjects = []*testhelper.ReportObject{testhelper.NewClusterVersionReportObject(env.OpenshiftVersion, \"Openshift Version is in End Of Life (EOL)\", false)}", + "\t} else {", + "\t\tcompliantObjects = []*testhelper.ReportObject{testhelper.NewClusterVersionReportObject(env.OpenshiftVersion, \"Openshift Version is not in End Of Life (EOL)\", true)}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testPodHugePagesSize", + "qualifiedName": "testPodHugePagesSize", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment, string)()", + "doc": "testPodHugePagesSize Verifies that pods use the expected hugepages size\n\nThe function iterates over all pods configured with hugepages in the test\nenvironment, checks each pod's allocated hugepages against a specified size,\nand logs whether each check passes or fails. It collects compliant and\nnon‑compliant pods into separate report objects, which are then set as the\nresult of the current test. Errors are logged for any pod that does not match\nthe expected size.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:800", + "calls": [ + { + "name": "GetHugepagesPods", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "CheckResourceHugePagesSize", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPodHugePagesSize(check *checksdb.Check, env *provider.TestEnvironment, size string) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.GetHugepagesPods() {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tresult := put.CheckResourceHugePagesSize(size)", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Pod %q has been found to be running with an incorrect hugepages size (expected size %q)\", put, size)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found to be running with an incorrect hugepages size\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q has been found to be running with a correct hugepages size %q\", put, size)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod has been found to be running with a correct hugepages size\", true))", + "\t\t}", + "\t}", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testServiceMesh", + "qualifiedName": "testServiceMesh", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testServiceMesh Verifies that every pod contains an Istio proxy container\n\nThe function iterates over all pods in the test environment, checking each\ncontainer for a service‑mesh indicator. Pods lacking an Istio proxy are\nrecorded as non‑compliant and logged with an error; those containing one\nare marked compliant and logged positively. Finally, the check result is set\nwith lists of compliant and non‑compliant report objects.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:225", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "IsIstioProxy", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewPodReportObject", + "kind": "function", + "source": [ + "func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, PodType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testServiceMesh(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, put := range env.Pods {", + "\t\tcheck.LogInfo(\"Testing Pod %q\", put)", + "\t\tistioProxyFound := false", + "\t\tfor _, cut := range put.Containers {", + "\t\t\tif cut.IsIstioProxy() {", + "\t\t\t\tcheck.LogInfo(\"Istio proxy container found on Pod %q (Container %q)\", put, cut)", + "\t\t\t\tistioProxyFound = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "\t\tif !istioProxyFound {", + "\t\t\tcheck.LogError(\"Pod %q found without service mesh\", put)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod found without service mesh container\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Pod %q found with service mesh\", put)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewPodReportObject(put.Namespace, put.Name, \"Pod found with service mesh container\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testSysctlConfigs", + "qualifiedName": "testSysctlConfigs", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testSysctlConfigs Verifies node sysctl values against machine config\n\nThis routine iterates over containers, ensuring each node is checked only\nonce. For every node it retrieves current sysctl settings and compares them\nto the expected kernel arguments defined in its machine configuration.\nMismatches are logged and reported as non‑compliant; nodes with matching\nvalues are marked compliant. The results are stored in the check result for\nlater reporting.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:601", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/sysctlconfig", + "name": "GetSysctlSettings", + "kind": "function", + "source": [ + "func GetSysctlSettings(env *provider.TestEnvironment, nodeName string) (map[string]string, error) {", + "\tconst (", + "\t\tsysctlCommand = \"chroot /host sysctl --system\"", + "\t)", + "", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, sysctlCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"failed to execute command %s in probe pod %s, err=%s, stderr=%s\", sysctlCommand,", + "\t\t\tenv.ProbePods[nodeName], err, errStr)", + "\t}", + "", + "\treturn parseSysctlSystemOutput(outStr), nil", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "GetMcKernelArguments", + "kind": "function", + "source": [ + "func GetMcKernelArguments(env *provider.TestEnvironment, nodeName string) (aMap map[string]string) {", + "\tmcKernelArgumentsMap := arrayhelper.ArgListToMap(env.Nodes[nodeName].Mc.Spec.KernelArguments)", + "\treturn mcKernelArgumentsMap", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testSysctlConfigs(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\talreadyCheckedNodes := map[string]bool{}", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif alreadyCheckedNodes[cut.NodeName] {", + "\t\t\tcontinue", + "\t\t}", + "\t\talreadyCheckedNodes[cut.NodeName] = true", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Probe Pod not found for node %q\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"tnf probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tsysctlSettings, err := sysctlconfig.GetSysctlSettings(env, cut.NodeName)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get sysctl settings for node %q, error: %v\", cut.NodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Could not get sysctl settings\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tmcKernelArgumentsMap := bootparams.GetMcKernelArguments(env, cut.NodeName)", + "\t\tvalidSettings := true", + "\t\tfor key, sysctlConfigVal := range sysctlSettings {", + "\t\t\tif mcVal, ok := mcKernelArgumentsMap[key]; ok {", + "\t\t\t\tif mcVal != sysctlConfigVal {", + "\t\t\t\t\tcheck.LogError(\"Kernel config mismatch in node %q for %q (sysctl value: %q, machine config value: %q)\",", + "\t\t\t\t\t\tcut.NodeName, key, sysctlConfigVal, mcVal)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, fmt.Sprintf(\"Kernel config mismatch for %s\", key), false))", + "\t\t\t\t\tvalidSettings = false", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif validSettings {", + "\t\t\tcheck.LogInfo(\"Node %q passed the sysctl config check\", cut.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Passed the sysctl config check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testTainted", + "qualifiedName": "testTainted", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testTainted Checks nodes for kernel taints against an allowlist\n\nThe function iterates over cluster nodes, verifies a workload is present,\nretrieves each node's kernel taint bitmask, and decodes the taints. It\ncompares found taints to a configured list of acceptable modules, logging\nerrors when unexpected taints or non‑module taints appear. Compliant and\nnon‑compliant findings are collected into report objects and reported via\nSetResult.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:311", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "HasWorkloadDeployed", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "NewNodeTaintedTester", + "kind": "function", + "source": [ + "func NewNodeTaintedTester(context *clientsholder.Context, node string) *NodeTainted {", + "\treturn \u0026NodeTainted{", + "\t\tctx: context,", + "\t\tnode: node,", + "\t}", + "}" + ] + }, + { + "name": "GetKernelTaintsMask", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "DecodeKernelTaintsFromBitMask", + "kind": "function", + "source": [ + "func DecodeKernelTaintsFromBitMask(bitmask uint64) []string {", + "\ttaints := []string{}", + "\tfor i := 0; i \u003c 64; i++ {", + "\t\tbit := (bitmask \u003e\u003e i) \u0026 1", + "\t\tif bit == 1 {", + "\t\t\ttaints = append(taints, GetTaintMsg(i))", + "\t\t}", + "\t}", + "\treturn taints", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "FormatUint", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "DecodeKernelTaintsFromBitMask", + "kind": "function", + "source": [ + "func DecodeKernelTaintsFromBitMask(bitmask uint64) []string {", + "\ttaints := []string{}", + "\tfor i := 0; i \u003c 64; i++ {", + "\t\tbit := (bitmask \u003e\u003e i) \u0026 1", + "\t\tif bit == 1 {", + "\t\t\ttaints = append(taints, GetTaintMsg(i))", + "\t\t}", + "\t}", + "\treturn taints", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "GetTainterModules", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "DecodeKernelTaintsFromLetters", + "kind": "function", + "source": [ + "func DecodeKernelTaintsFromLetters(letters string) []string {", + "\ttaints := []string{}", + "", + "\tfor _, l := range letters {", + "\t\ttaintLetter := string(l)", + "\t\tfound := false", + "", + "\t\tfor i := range kernelTaints {", + "\t\t\tkernelTaint := kernelTaints[i]", + "\t\t\tif strings.Contains(kernelTaint.Letters, taintLetter) {", + "\t\t\t\ttaints = append(taints, fmt.Sprintf(\"%s (taint letter:%s, bit:%d)\",", + "\t\t\t\t\tkernelTaint.Description, taintLetter, i))", + "\t\t\t\tfound = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\t// The letter does not belong to any known (yet) taint...", + "\t\tif !found {", + "\t\t\ttaints = append(taints, fmt.Sprintf(\"unknown taint (letter %s)\", taintLetter))", + "\t\t}", + "\t}", + "", + "\treturn taints", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewTaintReportObject", + "kind": "function", + "source": [ + "func NewTaintReportObject(taintBit, nodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, TaintType, isCompliant)", + "\tout.AddField(NodeType, nodeName)", + "\tout.AddField(TaintBit, taintBit)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "RemoveAllExceptNumbers", + "kind": "function", + "source": [ + "func RemoveAllExceptNumbers(incomingStr string) string {", + "\t// example string \", bit:10)\"", + "\t// return 10", + "", + "\t// remove all characters except numbers", + "\tre := regexp.MustCompile(`\\D+`)", + "\treturn re.ReplaceAllString(incomingStr, \"\")", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "GetOtherTaintedBits", + "kind": "function", + "source": [ + "func GetOtherTaintedBits(taintsMask uint64, taintedBitsByModules map[int]bool) []int {", + "\totherTaintedBits := []int{}", + "\t// Lastly, check that all kernel taint bits come from modules.", + "\tfor i := 0; i \u003c 64; i++ {", + "\t\t// helper var that is true if bit \"i\" is set.", + "\t\tbitIsSet := (taintsMask \u0026 (1 \u003c\u003c i)) \u003e 0", + "", + "\t\tif bitIsSet \u0026\u0026 !taintedBitsByModules[i] {", + "\t\t\totherTaintedBits = append(otherTaintedBits, i)", + "\t\t}", + "\t}", + "", + "\treturn otherTaintedBits", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewTaintReportObject", + "kind": "function", + "source": [ + "func NewTaintReportObject(taintBit, nodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, TaintType, isCompliant)", + "\tout.AddField(NodeType, nodeName)", + "\tout.AddField(TaintBit, taintBit)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "strconv", + "name": "Itoa", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "GetTaintMsg", + "kind": "function", + "source": [ + "func GetTaintMsg(bit int) string {", + "\tif taintMsg, exists := kernelTaints[bit]; exists {", + "\t\treturn fmt.Sprintf(\"%s (tainted bit %d)\", taintMsg.Description, bit)", + "\t}", + "", + "\treturn fmt.Sprintf(\"reserved (tainted bit %d)\", bit)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testTainted(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// errNodes has nodes that failed some operation while checking kernel taints.", + "\terrNodes := []string{}", + "", + "\ttype badModuleTaints struct {", + "\t\tname string", + "\t\ttaints []string", + "\t}", + "", + "\t// badModules maps node names to list of \"bad\"/offending modules.", + "\tbadModules := map[string][]badModuleTaints{}", + "\t// otherTaints maps a node to a list of taint bits that haven't been set by any module.", + "\totherTaints := map[string][]int{}", + "", + "\tcheck.LogInfo(\"Modules allowlist: %+v\", env.Config.AcceptedKernelTaints)", + "\t// helper map to make the checks easier.", + "\tallowListedModules := map[string]bool{}", + "\tfor _, module := range env.Config.AcceptedKernelTaints {", + "\t\tallowListedModules[module.Module] = true", + "\t}", + "", + "\t// Loop through the probe pods that are tied to each node.", + "\tfor _, n := range env.Nodes {", + "\t\tnodeName := n.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "", + "\t\t// Ensure we are only testing nodes that have CNF workload deployed on them.", + "\t\tif !n.HasWorkloadDeployed(env.Pods) {", + "\t\t\tcheck.LogInfo(\"Node %q has no workload deployed on it. Skipping tainted kernel check.\", nodeName)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tdp := env.ProbePods[nodeName]", + "", + "\t\tocpContext := clientsholder.NewContext(dp.Namespace, dp.Name, dp.Spec.Containers[0].Name)", + "\t\ttf := nodetainted.NewNodeTaintedTester(\u0026ocpContext, nodeName)", + "", + "\t\t// Get the taints mask from the node kernel", + "\t\ttaintsMask, err := tf.GetKernelTaintsMask()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to retrieve kernel taint information from node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to retrieve kernel taint information from node\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif taintsMask == 0 {", + "\t\t\tcheck.LogInfo(\"Node %q has no non-approved kernel taints.\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has no non-approved kernel taints\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Node %q kernel is tainted. Taints mask=%d - Decoded taints: %v\",", + "\t\t\tnodeName, taintsMask, nodetainted.DecodeKernelTaintsFromBitMask(taintsMask))", + "", + "\t\t// Check the allow list. If empty, mark this node as failed.", + "\t\tif len(allowListedModules) == 0 {", + "\t\t\ttaintsMaskStr := strconv.FormatUint(taintsMask, 10)", + "\t\t\ttaintsStr := strings.Join(nodetainted.DecodeKernelTaintsFromBitMask(taintsMask), \",\")", + "\t\t\tcheck.LogError(\"Node %q contains taints not covered by module allowlist. Taints: %q (mask=%q)\", nodeName, taintsStr, taintsMaskStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node contains taints not covered by module allowlist\", false).", + "\t\t\t\tAddField(testhelper.TaintMask, taintsMaskStr).", + "\t\t\t\tAddField(testhelper.Taints, taintsStr))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// allow list check.", + "\t\t// Get the list of modules (tainters) that have set a taint bit.", + "\t\t// 1. Each module should appear in the allow list.", + "\t\t// 2. All kernel taint bits (one bit \u003c-\u003e one letter) should have been set by at least", + "\t\t// one tainter module.", + "\t\ttainters, taintBitsByAllModules, err := tf.GetTainterModules(allowListedModules)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get tainter modules from node %q, err: %v\", nodeName, err)", + "\t\t\terrNodes = append(errNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to get tainter modules\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Keep track of whether or not this node is compliant with module allow list.", + "\t\tcompliantNode := true", + "", + "\t\t// Save modules' names only.", + "\t\tfor moduleName, taintsLetters := range tainters {", + "\t\t\tmoduleTaints := nodetainted.DecodeKernelTaintsFromLetters(taintsLetters)", + "\t\t\tbadModules[nodeName] = append(badModules[nodeName], badModuleTaints{name: moduleName, taints: moduleTaints})", + "", + "\t\t\t// Create non-compliant taint objects for each of the taints", + "\t\t\tfor _, taint := range moduleTaints {", + "\t\t\t\tcheck.LogError(\"Node %q - module %q taints kernel: %q\", nodeName, moduleName, taint)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(nodetainted.RemoveAllExceptNumbers(taint), nodeName, taint, false).AddField(testhelper.ModuleName, moduleName))", + "", + "\t\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\t\tcompliantNode = false", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Lastly, check that all kernel taint bits come from modules.", + "\t\totherKernelTaints := nodetainted.GetOtherTaintedBits(taintsMask, taintBitsByAllModules)", + "\t\tfor _, taintedBit := range otherKernelTaints {", + "\t\t\tcheck.LogError(\"Node %q - taint bit %d is set but it is not caused by any module.\", nodeName, taintedBit)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(strconv.Itoa(taintedBit), nodeName, nodetainted.GetTaintMsg(taintedBit), false).", + "\t\t\t\tAddField(testhelper.ModuleName, \"N/A\"))", + "\t\t\totherTaints[nodeName] = append(otherTaints[nodeName], taintedBit)", + "", + "\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\tcompliantNode = false", + "\t\t}", + "", + "\t\tif compliantNode {", + "\t\t\tcheck.LogInfo(\"Node %q passed the tainted kernel check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the tainted kernel check\", true))", + "\t\t}", + "\t}", + "", + "\tif len(errNodes) \u003e 0 {", + "\t\tcheck.LogError(\"Failed to get kernel taints from some nodes: %+v\", errNodes)", + "\t}", + "", + "\tif len(badModules) \u003e 0 || len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Nodes have been found to be tainted. Tainted modules: %+v\", badModules)", + "\t}", + "", + "\tif len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Taints not related to any module: %+v\", otherTaints)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "name": "testUnalteredBootParams", + "qualifiedName": "testUnalteredBootParams", + "exported": false, + "signature": "func(*checksdb.Check, *provider.TestEnvironment)()", + "doc": "testUnalteredBootParams Validates kernel boot parameters against MachineConfig and GRUB settings on each node\n\nThe routine iterates over all containers in the test environment, ensuring\neach node is checked only once. For every unique node it calls a helper that\ncompares current kernel command‑line arguments to those defined in the\nMachineConfig and GRUB configuration, logging any mismatches. Results are\ncollected into compliant or non‑compliant report objects which are then set\nas the check’s outcome.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:566", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "TestBootParamsHelper", + "kind": "function", + "source": [ + "func TestBootParamsHelper(env *provider.TestEnvironment, cut *provider.Container, logger *log.Logger) error {", + "\tprobePod := env.ProbePods[cut.NodeName]", + "\tif probePod == nil {", + "\t\treturn fmt.Errorf(\"probe pod for container %s not found on node %s\", cut, cut.NodeName)", + "\t}", + "\tmcKernelArgumentsMap := GetMcKernelArguments(env, cut.NodeName)", + "\tcurrentKernelArgsMap, err := getCurrentKernelCmdlineArgs(env, cut.NodeName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"error getting kernel cli arguments from container: %s, err=%s\", cut, err)", + "\t}", + "\tgrubKernelConfigMap, err := getGrubKernelArgs(env, cut.NodeName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"error getting grub kernel arguments for node: %s, err=%s\", cut.NodeName, err)", + "\t}", + "\tfor key, mcVal := range mcKernelArgumentsMap {", + "\t\tif currentVal, ok := currentKernelArgsMap[key]; ok {", + "\t\t\tif currentVal != mcVal {", + "\t\t\t\tlogger.Warn(\"%s KernelCmdLineArg %q does not match MachineConfig value: %q!=%q\",", + "\t\t\t\t\tcut.NodeName, key, currentVal, mcVal)", + "\t\t\t} else {", + "\t\t\t\tlogger.Debug(\"%s KernelCmdLineArg==mcVal %q: %q==%q\", cut.NodeName, key, currentVal, mcVal)", + "\t\t\t}", + "\t\t}", + "\t\tif grubVal, ok := grubKernelConfigMap[key]; ok {", + "\t\t\tif grubVal != mcVal {", + "\t\t\t\tlogger.Warn(\"%s NodeGrubKernelArgs %q does not match MachineConfig value: %q!=%q\",", + "\t\t\t\t\tcut.NodeName, key, mcVal, grubVal)", + "\t\t\t} else {", + "\t\t\t\tlogger.Debug(\"%s NodeGrubKernelArg==mcVal %q: %q==%q\", cut.NodeName, key, grubVal, mcVal)", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "name": "GetLogger", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "AddField", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewNodeReportObject", + "kind": "function", + "source": [ + "func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, NodeType, isCompliant)", + "\tout.AddField(Name, aNodeName)", + "\treturn out", + "}" + ] + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Loading %s suite checks\", common.PlatformAlterationTestKey)", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PlatformAlterationTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHyperThreadEnable)).", + "\t\tWithSkipCheckFn(testhelper.GetNoBareMetalNodesSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHyperThreadingEnabled(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredBaseImageIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestContainersFsDiff(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNonTaintedNodeKernelsIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestTainted(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsRedHatReleaseIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsRedHatRelease(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestIsSELinuxEnforcingIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestIsSELinuxEnforcing(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHugepagesNotManuallyManipulated)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestHugepages(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestUnalteredStartupBootParamsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestUnalteredBootParams(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysctlConfigsIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetDaemonSetFailedToSpawnSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestSysctlConfigs(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestServiceMeshIdentifier)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNoIstioSkipFn(\u0026env),", + "\t\t\ttesthelper.GetNoPodsUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestServiceMesh(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPLifecycleIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestOCPStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestNodeOperatingSystemIdentifier)).", + "\t\tWithSkipCheckFn(testhelper.GetNonOCPClusterSkipFn()).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestNodeOperatingSystemStatus(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages2M)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages2Mi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestPodHugePages1G)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t\ttesthelper.GetNoHugepagesPodsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestPodHugePagesSize(c, \u0026env, provider.HugePages1Gi)", + "\t\t\treturn nil", + "\t\t}))", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestClusterOperatorHealth)).", + "\t\tWithSkipCheckFn(", + "\t\t\ttesthelper.GetNonOCPClusterSkipFn(),", + "\t\t).", + "\t\tWithCheckFn(func(c *checksdb.Check) error {", + "\t\t\ttestClusterOperatorHealth(c, \u0026env)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testUnalteredBootParams(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\talreadyCheckedNodes := map[string]bool{}", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif alreadyCheckedNodes[cut.NodeName] {", + "\t\t\tcheck.LogInfo(\"Skipping node %q: already checked.\", cut.NodeName)", + "\t\t\tcontinue", + "\t\t}", + "\t\talreadyCheckedNodes[cut.NodeName] = true", + "", + "\t\terr := bootparams.TestBootParamsHelper(env, cut, check.GetLogger())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Node %q failed the boot params check\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Failed the boot params check\", false).", + "\t\t\t\tAddField(testhelper.ProbePodName, env.ProbePods[cut.NodeName].Name))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q passed the boot params check\", cut.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Passed the boot params check\", true).", + "\t\t\t\tAddField(testhelper.ProbePodName, env.ProbePods[cut.NodeName].Name))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "globals": [ + { + "name": "beforeEachFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:47" + }, + { + "name": "env", + "exported": false, + "type": "provider.TestEnvironment", + "position": "/Users/deliedit/dev/certsuite/tests/platform/suite.go:45" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "bootparams", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "GetMcKernelArguments", + "qualifiedName": "GetMcKernelArguments", + "exported": true, + "signature": "func(*provider.TestEnvironment, string)(map[string]string)", + "doc": "GetMcKernelArguments Retrieves kernel arguments from a node’s MachineConfig\n\nThis function accesses the specified node in the test environment, pulls the\nKernelArguments slice from its MachineConfig, and converts it into a map of\nkey‑value pairs using ArgListToMap. The resulting map is returned for\nfurther comparison against runtime values or other configuration sources.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/bootparams/bootparams.go:84", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper", + "name": "ArgListToMap", + "kind": "function", + "source": [ + "func ArgListToMap(lst []string) map[string]string {", + "\tretval := make(map[string]string)", + "\tfor _, arg := range lst {", + "\t\targ = strings.ReplaceAll(arg, `\"`, ``)", + "\t\tsplitArgs := strings.Split(arg, \"=\")", + "\t\tif len(splitArgs) == 1 {", + "\t\t\tretval[splitArgs[0]] = \"\"", + "\t\t} else {", + "\t\t\tretval[splitArgs[0]] = splitArgs[1]", + "\t\t}", + "\t}", + "\treturn retval", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testSysctlConfigs", + "kind": "function", + "source": [ + "func testSysctlConfigs(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\talreadyCheckedNodes := map[string]bool{}", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif alreadyCheckedNodes[cut.NodeName] {", + "\t\t\tcontinue", + "\t\t}", + "\t\talreadyCheckedNodes[cut.NodeName] = true", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Probe Pod not found for node %q\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"tnf probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tsysctlSettings, err := sysctlconfig.GetSysctlSettings(env, cut.NodeName)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get sysctl settings for node %q, error: %v\", cut.NodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Could not get sysctl settings\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tmcKernelArgumentsMap := bootparams.GetMcKernelArguments(env, cut.NodeName)", + "\t\tvalidSettings := true", + "\t\tfor key, sysctlConfigVal := range sysctlSettings {", + "\t\t\tif mcVal, ok := mcKernelArgumentsMap[key]; ok {", + "\t\t\t\tif mcVal != sysctlConfigVal {", + "\t\t\t\t\tcheck.LogError(\"Kernel config mismatch in node %q for %q (sysctl value: %q, machine config value: %q)\",", + "\t\t\t\t\t\tcut.NodeName, key, sysctlConfigVal, mcVal)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, fmt.Sprintf(\"Kernel config mismatch for %s\", key), false))", + "\t\t\t\t\tvalidSettings = false", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif validSettings {", + "\t\t\tcheck.LogInfo(\"Node %q passed the sysctl config check\", cut.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Passed the sysctl config check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "TestBootParamsHelper", + "kind": "function", + "source": [ + "func TestBootParamsHelper(env *provider.TestEnvironment, cut *provider.Container, logger *log.Logger) error {", + "\tprobePod := env.ProbePods[cut.NodeName]", + "\tif probePod == nil {", + "\t\treturn fmt.Errorf(\"probe pod for container %s not found on node %s\", cut, cut.NodeName)", + "\t}", + "\tmcKernelArgumentsMap := GetMcKernelArguments(env, cut.NodeName)", + "\tcurrentKernelArgsMap, err := getCurrentKernelCmdlineArgs(env, cut.NodeName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"error getting kernel cli arguments from container: %s, err=%s\", cut, err)", + "\t}", + "\tgrubKernelConfigMap, err := getGrubKernelArgs(env, cut.NodeName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"error getting grub kernel arguments for node: %s, err=%s\", cut.NodeName, err)", + "\t}", + "\tfor key, mcVal := range mcKernelArgumentsMap {", + "\t\tif currentVal, ok := currentKernelArgsMap[key]; ok {", + "\t\t\tif currentVal != mcVal {", + "\t\t\t\tlogger.Warn(\"%s KernelCmdLineArg %q does not match MachineConfig value: %q!=%q\",", + "\t\t\t\t\tcut.NodeName, key, currentVal, mcVal)", + "\t\t\t} else {", + "\t\t\t\tlogger.Debug(\"%s KernelCmdLineArg==mcVal %q: %q==%q\", cut.NodeName, key, currentVal, mcVal)", + "\t\t\t}", + "\t\t}", + "\t\tif grubVal, ok := grubKernelConfigMap[key]; ok {", + "\t\t\tif grubVal != mcVal {", + "\t\t\t\tlogger.Warn(\"%s NodeGrubKernelArgs %q does not match MachineConfig value: %q!=%q\",", + "\t\t\t\t\tcut.NodeName, key, mcVal, grubVal)", + "\t\t\t} else {", + "\t\t\t\tlogger.Debug(\"%s NodeGrubKernelArg==mcVal %q: %q==%q\", cut.NodeName, key, grubVal, mcVal)", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetMcKernelArguments(env *provider.TestEnvironment, nodeName string) (aMap map[string]string) {", + "\tmcKernelArgumentsMap := arrayhelper.ArgListToMap(env.Nodes[nodeName].Mc.Spec.KernelArguments)", + "\treturn mcKernelArgumentsMap", + "}" + ] + }, + { + "name": "TestBootParamsHelper", + "qualifiedName": "TestBootParamsHelper", + "exported": true, + "signature": "func(*provider.TestEnvironment, *provider.Container, *log.Logger)(error)", + "doc": "TestBootParamsHelper Verifies that node kernel parameters match the MachineConfig\n\nThe function retrieves the expected kernel arguments from a MachineConfig,\nthen obtains the current command‑line arguments from both the container’s\nprocess and the GRUB configuration on the same node. It compares each\nargument value against the expected one, logging warnings when mismatches\noccur and debug messages for matches. If any required probe pod is missing or\nan error occurs during retrieval, it returns an error; otherwise it completes\nsilently.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/bootparams/bootparams.go:43", + "calls": [ + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "GetMcKernelArguments", + "kind": "function", + "source": [ + "func GetMcKernelArguments(env *provider.TestEnvironment, nodeName string) (aMap map[string]string) {", + "\tmcKernelArgumentsMap := arrayhelper.ArgListToMap(env.Nodes[nodeName].Mc.Spec.KernelArguments)", + "\treturn mcKernelArgumentsMap", + "}" + ] + }, + { + "name": "getCurrentKernelCmdlineArgs", + "kind": "function", + "source": [ + "func getCurrentKernelCmdlineArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) {", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tcurrentKernelCmdlineArgs, errStr, err := o.ExecCommandContainer(ctx, kernelArgscommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn aMap, fmt.Errorf(\"cannot execute %s on probe pod container %s, err=%s, stderr=%s\", grubKernelArgsCommand, env.ProbePods[nodeName].Name, err, errStr)", + "\t}", + "\tcurrentSplitKernelCmdlineArgs := strings.Split(strings.TrimSuffix(currentKernelCmdlineArgs, \"\\n\"), \" \")", + "\treturn arrayhelper.ArgListToMap(currentSplitKernelCmdlineArgs), nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "getGrubKernelArgs", + "kind": "function", + "source": [ + "func getGrubKernelArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) {", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tbootConfig, errStr, err := o.ExecCommandContainer(ctx, grubKernelArgsCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn aMap, fmt.Errorf(\"cannot execute %s on probe pod %s, err=%s, stderr=%s\", grubKernelArgsCommand, env.ProbePods[nodeName], err, errStr)", + "\t}", + "", + "\tsplitBootConfig := strings.Split(bootConfig, \"\\n\")", + "\tfilteredBootConfig := arrayhelper.FilterArray(splitBootConfig, func(line string) bool {", + "\t\treturn strings.HasPrefix(line, \"options\")", + "\t})", + "\tif len(filteredBootConfig) != 1 {", + "\t\treturn aMap, fmt.Errorf(\"filteredBootConfig!=1\")", + "\t}", + "\tgrubKernelConfig := filteredBootConfig[0]", + "\tgrubSplitKernelConfig := strings.Split(grubKernelConfig, \" \")", + "\tgrubSplitKernelConfig = grubSplitKernelConfig[1:]", + "\treturn arrayhelper.ArgListToMap(grubSplitKernelConfig), nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "Warn", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + }, + { + "name": "Warn", + "kind": "function" + }, + { + "name": "Debug", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testUnalteredBootParams", + "kind": "function", + "source": [ + "func testUnalteredBootParams(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\talreadyCheckedNodes := map[string]bool{}", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif alreadyCheckedNodes[cut.NodeName] {", + "\t\t\tcheck.LogInfo(\"Skipping node %q: already checked.\", cut.NodeName)", + "\t\t\tcontinue", + "\t\t}", + "\t\talreadyCheckedNodes[cut.NodeName] = true", + "", + "\t\terr := bootparams.TestBootParamsHelper(env, cut, check.GetLogger())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Node %q failed the boot params check\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Failed the boot params check\", false).", + "\t\t\t\tAddField(testhelper.ProbePodName, env.ProbePods[cut.NodeName].Name))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q passed the boot params check\", cut.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Passed the boot params check\", true).", + "\t\t\t\tAddField(testhelper.ProbePodName, env.ProbePods[cut.NodeName].Name))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func TestBootParamsHelper(env *provider.TestEnvironment, cut *provider.Container, logger *log.Logger) error {", + "\tprobePod := env.ProbePods[cut.NodeName]", + "\tif probePod == nil {", + "\t\treturn fmt.Errorf(\"probe pod for container %s not found on node %s\", cut, cut.NodeName)", + "\t}", + "\tmcKernelArgumentsMap := GetMcKernelArguments(env, cut.NodeName)", + "\tcurrentKernelArgsMap, err := getCurrentKernelCmdlineArgs(env, cut.NodeName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"error getting kernel cli arguments from container: %s, err=%s\", cut, err)", + "\t}", + "\tgrubKernelConfigMap, err := getGrubKernelArgs(env, cut.NodeName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"error getting grub kernel arguments for node: %s, err=%s\", cut.NodeName, err)", + "\t}", + "\tfor key, mcVal := range mcKernelArgumentsMap {", + "\t\tif currentVal, ok := currentKernelArgsMap[key]; ok {", + "\t\t\tif currentVal != mcVal {", + "\t\t\t\tlogger.Warn(\"%s KernelCmdLineArg %q does not match MachineConfig value: %q!=%q\",", + "\t\t\t\t\tcut.NodeName, key, currentVal, mcVal)", + "\t\t\t} else {", + "\t\t\t\tlogger.Debug(\"%s KernelCmdLineArg==mcVal %q: %q==%q\", cut.NodeName, key, currentVal, mcVal)", + "\t\t\t}", + "\t\t}", + "\t\tif grubVal, ok := grubKernelConfigMap[key]; ok {", + "\t\t\tif grubVal != mcVal {", + "\t\t\t\tlogger.Warn(\"%s NodeGrubKernelArgs %q does not match MachineConfig value: %q!=%q\",", + "\t\t\t\t\tcut.NodeName, key, mcVal, grubVal)", + "\t\t\t} else {", + "\t\t\t\tlogger.Debug(\"%s NodeGrubKernelArg==mcVal %q: %q==%q\", cut.NodeName, key, grubVal, mcVal)", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "name": "getCurrentKernelCmdlineArgs", + "qualifiedName": "getCurrentKernelCmdlineArgs", + "exported": false, + "signature": "func(*provider.TestEnvironment, string)(map[string]string, error)", + "doc": "getCurrentKernelCmdlineArgs retrieves the current kernel command-line arguments from a node's probe pod\n\nThe function executes a predefined command inside the probe pod container to\ncapture the kernel's command line, splits the output into individual\narguments, and converts them into a map of key-value pairs. It returns this\nmap along with any error that occurs during execution or parsing. The\nreturned data is used to compare against expected configuration values.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/bootparams/bootparams.go:124", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSuffix", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper", + "name": "ArgListToMap", + "kind": "function", + "source": [ + "func ArgListToMap(lst []string) map[string]string {", + "\tretval := make(map[string]string)", + "\tfor _, arg := range lst {", + "\t\targ = strings.ReplaceAll(arg, `\"`, ``)", + "\t\tsplitArgs := strings.Split(arg, \"=\")", + "\t\tif len(splitArgs) == 1 {", + "\t\t\tretval[splitArgs[0]] = \"\"", + "\t\t} else {", + "\t\t\tretval[splitArgs[0]] = splitArgs[1]", + "\t\t}", + "\t}", + "\treturn retval", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "TestBootParamsHelper", + "kind": "function", + "source": [ + "func TestBootParamsHelper(env *provider.TestEnvironment, cut *provider.Container, logger *log.Logger) error {", + "\tprobePod := env.ProbePods[cut.NodeName]", + "\tif probePod == nil {", + "\t\treturn fmt.Errorf(\"probe pod for container %s not found on node %s\", cut, cut.NodeName)", + "\t}", + "\tmcKernelArgumentsMap := GetMcKernelArguments(env, cut.NodeName)", + "\tcurrentKernelArgsMap, err := getCurrentKernelCmdlineArgs(env, cut.NodeName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"error getting kernel cli arguments from container: %s, err=%s\", cut, err)", + "\t}", + "\tgrubKernelConfigMap, err := getGrubKernelArgs(env, cut.NodeName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"error getting grub kernel arguments for node: %s, err=%s\", cut.NodeName, err)", + "\t}", + "\tfor key, mcVal := range mcKernelArgumentsMap {", + "\t\tif currentVal, ok := currentKernelArgsMap[key]; ok {", + "\t\t\tif currentVal != mcVal {", + "\t\t\t\tlogger.Warn(\"%s KernelCmdLineArg %q does not match MachineConfig value: %q!=%q\",", + "\t\t\t\t\tcut.NodeName, key, currentVal, mcVal)", + "\t\t\t} else {", + "\t\t\t\tlogger.Debug(\"%s KernelCmdLineArg==mcVal %q: %q==%q\", cut.NodeName, key, currentVal, mcVal)", + "\t\t\t}", + "\t\t}", + "\t\tif grubVal, ok := grubKernelConfigMap[key]; ok {", + "\t\t\tif grubVal != mcVal {", + "\t\t\t\tlogger.Warn(\"%s NodeGrubKernelArgs %q does not match MachineConfig value: %q!=%q\",", + "\t\t\t\t\tcut.NodeName, key, mcVal, grubVal)", + "\t\t\t} else {", + "\t\t\t\tlogger.Debug(\"%s NodeGrubKernelArg==mcVal %q: %q==%q\", cut.NodeName, key, grubVal, mcVal)", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getCurrentKernelCmdlineArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) {", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tcurrentKernelCmdlineArgs, errStr, err := o.ExecCommandContainer(ctx, kernelArgscommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn aMap, fmt.Errorf(\"cannot execute %s on probe pod container %s, err=%s, stderr=%s\", grubKernelArgsCommand, env.ProbePods[nodeName].Name, err, errStr)", + "\t}", + "\tcurrentSplitKernelCmdlineArgs := strings.Split(strings.TrimSuffix(currentKernelCmdlineArgs, \"\\n\"), \" \")", + "\treturn arrayhelper.ArgListToMap(currentSplitKernelCmdlineArgs), nil", + "}" + ] + }, + { + "name": "getGrubKernelArgs", + "qualifiedName": "getGrubKernelArgs", + "exported": false, + "signature": "func(*provider.TestEnvironment, string)(map[string]string, error)", + "doc": "getGrubKernelArgs Retrieves GRUB kernel arguments from a probe pod\n\nThe function runs a command inside the node's probe container to capture the\nGRUB configuration line, filters for the options line, splits it into\nindividual arguments, and converts them into a map of key-value pairs. It\nreturns this map along with any error that occurs during execution or\nparsing.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/bootparams/bootparams.go:96", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper", + "name": "FilterArray", + "kind": "function", + "source": [ + "func FilterArray(vs []string, f func(string) bool) []string {", + "\tvsf := make([]string, 0)", + "\tfor _, v := range vs {", + "\t\tif f(v) {", + "\t\t\tvsf = append(vsf, v)", + "\t\t}", + "\t}", + "\treturn vsf", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "HasPrefix", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper", + "name": "ArgListToMap", + "kind": "function", + "source": [ + "func ArgListToMap(lst []string) map[string]string {", + "\tretval := make(map[string]string)", + "\tfor _, arg := range lst {", + "\t\targ = strings.ReplaceAll(arg, `\"`, ``)", + "\t\tsplitArgs := strings.Split(arg, \"=\")", + "\t\tif len(splitArgs) == 1 {", + "\t\t\tretval[splitArgs[0]] = \"\"", + "\t\t} else {", + "\t\t\tretval[splitArgs[0]] = splitArgs[1]", + "\t\t}", + "\t}", + "\treturn retval", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/bootparams", + "name": "TestBootParamsHelper", + "kind": "function", + "source": [ + "func TestBootParamsHelper(env *provider.TestEnvironment, cut *provider.Container, logger *log.Logger) error {", + "\tprobePod := env.ProbePods[cut.NodeName]", + "\tif probePod == nil {", + "\t\treturn fmt.Errorf(\"probe pod for container %s not found on node %s\", cut, cut.NodeName)", + "\t}", + "\tmcKernelArgumentsMap := GetMcKernelArguments(env, cut.NodeName)", + "\tcurrentKernelArgsMap, err := getCurrentKernelCmdlineArgs(env, cut.NodeName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"error getting kernel cli arguments from container: %s, err=%s\", cut, err)", + "\t}", + "\tgrubKernelConfigMap, err := getGrubKernelArgs(env, cut.NodeName)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"error getting grub kernel arguments for node: %s, err=%s\", cut.NodeName, err)", + "\t}", + "\tfor key, mcVal := range mcKernelArgumentsMap {", + "\t\tif currentVal, ok := currentKernelArgsMap[key]; ok {", + "\t\t\tif currentVal != mcVal {", + "\t\t\t\tlogger.Warn(\"%s KernelCmdLineArg %q does not match MachineConfig value: %q!=%q\",", + "\t\t\t\t\tcut.NodeName, key, currentVal, mcVal)", + "\t\t\t} else {", + "\t\t\t\tlogger.Debug(\"%s KernelCmdLineArg==mcVal %q: %q==%q\", cut.NodeName, key, currentVal, mcVal)", + "\t\t\t}", + "\t\t}", + "\t\tif grubVal, ok := grubKernelConfigMap[key]; ok {", + "\t\t\tif grubVal != mcVal {", + "\t\t\t\tlogger.Warn(\"%s NodeGrubKernelArgs %q does not match MachineConfig value: %q!=%q\",", + "\t\t\t\t\tcut.NodeName, key, mcVal, grubVal)", + "\t\t\t} else {", + "\t\t\t\tlogger.Debug(\"%s NodeGrubKernelArg==mcVal %q: %q==%q\", cut.NodeName, key, grubVal, mcVal)", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getGrubKernelArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) {", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "\tbootConfig, errStr, err := o.ExecCommandContainer(ctx, grubKernelArgsCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn aMap, fmt.Errorf(\"cannot execute %s on probe pod %s, err=%s, stderr=%s\", grubKernelArgsCommand, env.ProbePods[nodeName], err, errStr)", + "\t}", + "", + "\tsplitBootConfig := strings.Split(bootConfig, \"\\n\")", + "\tfilteredBootConfig := arrayhelper.FilterArray(splitBootConfig, func(line string) bool {", + "\t\treturn strings.HasPrefix(line, \"options\")", + "\t})", + "\tif len(filteredBootConfig) != 1 {", + "\t\treturn aMap, fmt.Errorf(\"filteredBootConfig!=1\")", + "\t}", + "\tgrubKernelConfig := filteredBootConfig[0]", + "\tgrubSplitKernelConfig := strings.Split(grubKernelConfig, \" \")", + "\tgrubSplitKernelConfig = grubSplitKernelConfig[1:]", + "\treturn arrayhelper.ArgListToMap(grubSplitKernelConfig), nil", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "grubKernelArgsCommand", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/platform/bootparams/bootparams.go:30" + }, + { + "name": "kernelArgscommand", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/platform/bootparams/bootparams.go:31" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/clusteroperator", + "name": "clusteroperator", + "files": 1, + "imports": [ + "github.com/openshift/api/config/v1", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "IsClusterOperatorAvailable", + "qualifiedName": "IsClusterOperatorAvailable", + "exported": true, + "signature": "func(*configv1.ClusterOperator)(bool)", + "doc": "IsClusterOperatorAvailable Determines if a ClusterOperator reports an 'Available' status\n\nThe function inspects the conditions of a given cluster operator, checking\nfor one whose type indicates availability. If such a condition is found, it\nlogs that the operator is available and returns true; otherwise it logs that\nthe operator is not available and returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/clusteroperator/clusteroperator.go:14", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testClusterOperatorHealth", + "kind": "function", + "source": [ + "func testClusterOperatorHealth(check *checksdb.Check, env *provider.TestEnvironment) {", + "\t// Checks the various ClusterOperator(s) to see if they are all in an 'Available' state.", + "\t// If they are not in an 'Available' state, the check will fail.", + "\t// Note: This check is only applicable to OCP clusters and is skipped for non-OCP clusters.", + "", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// Loop through the ClusterOperators and check their status.", + "\tfor i := range env.ClusterOperators {", + "\t\tcheck.LogInfo(\"Testing ClusterOperator %q to ensure it is in an 'Available' state.\", env.ClusterOperators[i].Name)", + "", + "\t\tif clusteroperator.IsClusterOperatorAvailable(\u0026env.ClusterOperators[i]) {", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewClusterOperatorReportObject(env.ClusterOperators[i].Name, \"ClusterOperator is in an 'Available' state\", true))", + "\t\t} else {", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewClusterOperatorReportObject(env.ClusterOperators[i].Name, \"ClusterOperator is not in an 'Available' state\", false))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsClusterOperatorAvailable(co *configv1.ClusterOperator) bool {", + "\t// Loop through the conditions, looking for the 'Available' state.", + "\tfor _, condition := range co.Status.Conditions {", + "\t\tif condition.Type == configv1.OperatorAvailable {", + "\t\t\tlog.Info(\"ClusterOperator %q is in an 'Available' state\", co.Name)", + "\t\t\treturn true", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"ClusterOperator %q is not in an 'Available' state\", co.Name)", + "\treturn false", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "cnffsdiff", + "files": 1, + "imports": [ + "encoding/json", + "errors", + "fmt", + "github.com/Masterminds/semver/v3", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "strings", + "time" + ], + "structs": [ + { + "name": "FsDiff", + "exported": true, + "doc": "FsDiff Tracks file system differences in a container\n\nThis structure stores the results of running a podman diff against a\ncontainer, capturing any folders that have been changed or deleted from a\npredefined target list. It also holds references to the check context,\ncommand client, and execution context used during the test, along with flags\nfor custom podman usage and an error field for failure reporting. The result\ninteger indicates success, failure, or error status after the test runs.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:79", + "fields": { + "ChangedFolders": "[]string", + "DeletedFolders": "[]string", + "Error": "error", + "check": "*checksdb.Check", + "clientHolder": "clientsholder.Command", + "ctxt": "clientsholder.Context", + "result": "int", + "useCustomPodman": "bool" + }, + "methodNames": [ + "GetResults", + "RunTest", + "createNodeFolder", + "deleteNodeFolder", + "execCommandContainer", + "installCustomPodman", + "intersectTargetFolders", + "mountProbePodmanFolder", + "runPodmanDiff", + "unmountCustomPodman", + "unmountProbePodmanFolder" + ], + "source": [ + "type FsDiff struct {", + "\tcheck *checksdb.Check", + "\tresult int", + "\tclientHolder clientsholder.Command", + "\tctxt clientsholder.Context", + "\tuseCustomPodman bool", + "", + "\tDeletedFolders []string", + "\tChangedFolders []string", + "\tError error", + "}" + ] + }, + { + "name": "fsDiffJSON", + "exported": false, + "doc": "fsDiffJSON Parses podman diff JSON output into separate lists of changed, added, and deleted paths\n\nThis struct holds three slices of strings that represent file or folder paths\nreported by the podman diff command. The \"changed\" slice contains paths\nmodified in a container, \"deleted\" lists removed items, and \"added\" tracks\nnew creations. Only the changed and deleted fields are used for comparison\nlogic, while added is retained for completeness.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:65", + "fields": { + "Added": "[]string", + "Changed": "[]string", + "Deleted": "[]string" + }, + "methodNames": null, + "source": [ + "type fsDiffJSON struct {", + "\tChanged []string `json:\"changed\"`", + "\tDeleted []string `json:\"deleted\"`", + "\tAdded []string `json:\"added\"` // Will not be checked, but let's keep it just in case.", + "}" + ] + } + ], + "interfaces": [ + { + "name": "FsDiffFuncs", + "exported": true, + "doc": "FsDiffFuncs provides file system diff functionality\n\nThis interface defines two operations: one that initiates a diff test within\na specified container context, and another that retrieves the result status\nof that test as an integer code. The RunTest method accepts execution context\nand container identifier parameters to perform the comparison, while\nGetResults returns an integer indicating success or failure of the last run.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:98", + "methods": [ + "RunTest", + "GetResults" + ], + "source": [ + "type FsDiffFuncs interface {", + "\tRunTest(ctx clientsholder.Context, containerUID string)", + "\tGetResults() int", + "}" + ] + } + ], + "functions": [ + { + "name": "GetResults", + "qualifiedName": "FsDiff.GetResults", + "exported": true, + "receiver": "FsDiff", + "signature": "func()(int)", + "doc": "FsDiff.GetResults provides the current result value\n\nThe method simply retrieves and returns the integer field that holds the diff\noutcome. No parameters are required, and it does not modify any state. The\nreturned value reflects the number of differences detected by the FsDiff\ninstance.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:264", + "calls": null, + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (f *FsDiff) GetResults() int {", + "\treturn f.result", + "}" + ] + }, + { + "name": "RunTest", + "qualifiedName": "FsDiff.RunTest", + "exported": true, + "receiver": "FsDiff", + "signature": "func(string)()", + "doc": "FsDiff.RunTest Executes podman diff to detect container file system changes\n\nThe method runs the \"podman diff\" command on a specified container,\noptionally installing a custom podman binary if configured. It retries up to\nfive times when encountering exit code 125 errors and parses the JSON output\ninto deleted and changed folder lists. If any target folders are found\naltered or removed, the test fails; otherwise it succeeds.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:202", + "calls": [ + { + "name": "FsDiff.installCustomPodman", + "kind": "function", + "source": [ + "func (f *FsDiff) installCustomPodman() error {", + "\t// We need to create the destination folder first.", + "\tf.check.LogInfo(\"Creating temp folder %s\", nodeTmpMountFolder)", + "\tif err := f.createNodeFolder(); err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Mount podman from partner probe pod into /host/tmp/...", + "\tf.check.LogInfo(\"Mounting %s into %s\", partnerPodmanFolder, nodeTmpMountFolder)", + "\tif mountErr := f.mountProbePodmanFolder(); mountErr != nil {", + "\t\t// We need to delete the temp folder previously created as mount point.", + "\t\tif deleteErr := f.deleteNodeFolder(); deleteErr != nil {", + "\t\t\treturn fmt.Errorf(\"failed to mount folder %s: %s, failed to delete %s: %s\",", + "\t\t\t\tpartnerPodmanFolder, mountErr, nodeTmpMountFolder, deleteErr)", + "\t\t}", + "", + "\t\treturn mountErr", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "FsDiff.unmountCustomPodman", + "kind": "function", + "source": [ + "func (f *FsDiff) unmountCustomPodman() {", + "\t// Unmount podman folder from host.", + "\tf.check.LogInfo(\"Unmounting folder %s\", nodeTmpMountFolder)", + "\tif err := f.unmountProbePodmanFolder(); err != nil {", + "\t\t// Here, there's no point on trying to remove the temp folder used as mount point, as", + "\t\t// that probably will not work either.", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "", + "\tf.check.LogInfo(\"Deleting folder %s\", nodeTmpMountFolder)", + "\tif err := f.deleteNodeFolder(); err != nil {", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t}", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "FsDiff.runPodmanDiff", + "kind": "function", + "source": [ + "func (f *FsDiff) runPodmanDiff(containerUID string) (string, error) {", + "\tpodmanPath := \"podman\"", + "\tif f.useCustomPodman {", + "\t\tpodmanPath = fmt.Sprintf(\"%s/podman\", tmpMountDestFolder)", + "\t}", + "", + "\toutput, outerr, err := f.clientHolder.ExecCommandContainer(f.ctxt, fmt.Sprintf(\"chroot /host %s diff --format json %s\", podmanPath, containerUID))", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"can not execute command on container: %w\", err)", + "\t}", + "\tif outerr != \"\" {", + "\t\treturn \"\", fmt.Errorf(\"stderr log received when running fsdiff test: %s\", outerr)", + "\t}", + "\treturn output, nil", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "LogWarn", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Sleep", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "FsDiff.intersectTargetFolders", + "kind": "function", + "source": [ + "func (f *FsDiff) intersectTargetFolders(src []string) []string {", + "\tvar dst []string", + "\tfor _, folder := range src {", + "\t\tif stringhelper.StringInSlice(targetFolders, folder, false) {", + "\t\t\tf.check.LogWarn(\"Container's folder %q is altered.\", folder)", + "\t\t\tdst = append(dst, folder)", + "\t\t}", + "\t}", + "\treturn dst", + "}" + ] + }, + { + "name": "FsDiff.intersectTargetFolders", + "kind": "function", + "source": [ + "func (f *FsDiff) intersectTargetFolders(src []string) []string {", + "\tvar dst []string", + "\tfor _, folder := range src {", + "\t\tif stringhelper.StringInSlice(targetFolders, folder, false) {", + "\t\t\tf.check.LogWarn(\"Container's folder %q is altered.\", folder)", + "\t\t\tdst = append(dst, folder)", + "\t\t}", + "\t}", + "\treturn dst", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "LogDebug", + "kind": "function" + }, + { + "name": "LogDebug", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (f *FsDiff) RunTest(containerUID string) {", + "\tif f.useCustomPodman {", + "\t\terr := f.installCustomPodman()", + "\t\tif err != nil {", + "\t\t\tf.Error = err", + "\t\t\tf.result = testhelper.ERROR", + "\t\t\treturn", + "\t\t}", + "", + "\t\tdefer f.unmountCustomPodman()", + "\t}", + "", + "\tf.check.LogInfo(\"Running \\\"podman diff\\\" for container id %s\", containerUID)", + "", + "\tvar output string", + "\tvar err error", + "\tfor i := range [5]int{} {", + "\t\toutput, err = f.runPodmanDiff(containerUID)", + "\t\tif err == nil {", + "\t\t\tbreak", + "\t\t}", + "\t\t// Retry if we get a podman error code 125, which is a known issue where the container/pod", + "\t\t// has possibly gone missing or is in CrashLoopBackOff state. Adding a retry here to help", + "\t\t// smooth out the test results.", + "\t\tif strings.Contains(err.Error(), \"command terminated with exit code 125\") {", + "\t\t\tf.check.LogWarn(\"Retrying \\\"podman diff\\\" due to error code 125 (attempt %d/5)\", i+1)", + "\t\t\ttime.Sleep(errorCode125RetrySeconds * time.Second)", + "\t\t\tcontinue", + "\t\t}", + "\t\tbreak", + "\t}", + "", + "\tif err != nil {", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "", + "\tdiff := fsDiffJSON{}", + "\terr = json.Unmarshal([]byte(output), \u0026diff)", + "\tif err != nil {", + "\t\tf.Error = fmt.Errorf(\"failed to unmarshall podman diff's json output: %s, err: %w\", output, err)", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "\tf.DeletedFolders = f.intersectTargetFolders(diff.Deleted)", + "\tf.ChangedFolders = f.intersectTargetFolders(diff.Changed)", + "\tif len(f.ChangedFolders) != 0 || len(f.DeletedFolders) != 0 {", + "\t\tf.check.LogDebug(\"Deleted folders found in Podman diff: %s\", f.DeletedFolders)", + "\t\tf.check.LogDebug(\"Changed folders found in Podman diff: %s\", f.ChangedFolders)", + "\t\tf.result = testhelper.FAILURE", + "\t} else {", + "\t\tf.result = testhelper.SUCCESS", + "\t}", + "}" + ] + }, + { + "name": "createNodeFolder", + "qualifiedName": "FsDiff.createNodeFolder", + "exported": false, + "receiver": "FsDiff", + "signature": "func()(error)", + "doc": "FsDiff.createNodeFolder Creates a temporary folder on the node for mounting purposes\n\nThe method runs a container command to make a directory at the path defined\nby nodeTmpMountFolder. It uses execCommandContainer to capture any output or\nerrors, returning an error if the command fails or produces unexpected\noutput.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:290", + "calls": [ + { + "name": "FsDiff.execCommandContainer", + "kind": "function", + "source": [ + "func (f *FsDiff) execCommandContainer(cmd, errorStr string) error {", + "\toutput, outerr, err := f.clientHolder.ExecCommandContainer(f.ctxt, cmd)", + "\tif err != nil || output != \"\" || outerr != \"\" {", + "\t\treturn errors.New(errorStr + fmt.Sprintf(\" Stderr: %s, Stdout: %s, Err: %v\", output, outerr, err))", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.installCustomPodman", + "kind": "function", + "source": [ + "func (f *FsDiff) installCustomPodman() error {", + "\t// We need to create the destination folder first.", + "\tf.check.LogInfo(\"Creating temp folder %s\", nodeTmpMountFolder)", + "\tif err := f.createNodeFolder(); err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Mount podman from partner probe pod into /host/tmp/...", + "\tf.check.LogInfo(\"Mounting %s into %s\", partnerPodmanFolder, nodeTmpMountFolder)", + "\tif mountErr := f.mountProbePodmanFolder(); mountErr != nil {", + "\t\t// We need to delete the temp folder previously created as mount point.", + "\t\tif deleteErr := f.deleteNodeFolder(); deleteErr != nil {", + "\t\t\treturn fmt.Errorf(\"failed to mount folder %s: %s, failed to delete %s: %s\",", + "\t\t\t\tpartnerPodmanFolder, mountErr, nodeTmpMountFolder, deleteErr)", + "\t\t}", + "", + "\t\treturn mountErr", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (f *FsDiff) createNodeFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"mkdir %s\", nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when creating folder %s.\", nodeTmpMountFolder))", + "}" + ] + }, + { + "name": "deleteNodeFolder", + "qualifiedName": "FsDiff.deleteNodeFolder", + "exported": false, + "receiver": "FsDiff", + "signature": "func()(error)", + "doc": "FsDiff.deleteNodeFolder Removes the temporary mount directory on the target node\n\nThis method issues a command to delete the folder designated by the constant\nnodeTmpMountFolder using the execCommandContainer helper. It expects no\noutput from the command; any stdout, stderr or execution error results in an\ninformative error being returned. The function is invoked during setup and\nteardown of custom Podman mounts to clean up the temporary directory.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:302", + "calls": [ + { + "name": "FsDiff.execCommandContainer", + "kind": "function", + "source": [ + "func (f *FsDiff) execCommandContainer(cmd, errorStr string) error {", + "\toutput, outerr, err := f.clientHolder.ExecCommandContainer(f.ctxt, cmd)", + "\tif err != nil || output != \"\" || outerr != \"\" {", + "\t\treturn errors.New(errorStr + fmt.Sprintf(\" Stderr: %s, Stdout: %s, Err: %v\", output, outerr, err))", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.installCustomPodman", + "kind": "function", + "source": [ + "func (f *FsDiff) installCustomPodman() error {", + "\t// We need to create the destination folder first.", + "\tf.check.LogInfo(\"Creating temp folder %s\", nodeTmpMountFolder)", + "\tif err := f.createNodeFolder(); err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Mount podman from partner probe pod into /host/tmp/...", + "\tf.check.LogInfo(\"Mounting %s into %s\", partnerPodmanFolder, nodeTmpMountFolder)", + "\tif mountErr := f.mountProbePodmanFolder(); mountErr != nil {", + "\t\t// We need to delete the temp folder previously created as mount point.", + "\t\tif deleteErr := f.deleteNodeFolder(); deleteErr != nil {", + "\t\t\treturn fmt.Errorf(\"failed to mount folder %s: %s, failed to delete %s: %s\",", + "\t\t\t\tpartnerPodmanFolder, mountErr, nodeTmpMountFolder, deleteErr)", + "\t\t}", + "", + "\t\treturn mountErr", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.unmountCustomPodman", + "kind": "function", + "source": [ + "func (f *FsDiff) unmountCustomPodman() {", + "\t// Unmount podman folder from host.", + "\tf.check.LogInfo(\"Unmounting folder %s\", nodeTmpMountFolder)", + "\tif err := f.unmountProbePodmanFolder(); err != nil {", + "\t\t// Here, there's no point on trying to remove the temp folder used as mount point, as", + "\t\t// that probably will not work either.", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "", + "\tf.check.LogInfo(\"Deleting folder %s\", nodeTmpMountFolder)", + "\tif err := f.deleteNodeFolder(); err != nil {", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (f *FsDiff) deleteNodeFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"rmdir %s\", nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when deleting folder %s.\", nodeTmpMountFolder))", + "}" + ] + }, + { + "name": "execCommandContainer", + "qualifiedName": "FsDiff.execCommandContainer", + "exported": false, + "receiver": "FsDiff", + "signature": "func(string, string)(error)", + "doc": "FsDiff.execCommandContainer Executes a shell command inside the probe pod and reports any output as an error\n\nIt runs the supplied command in the container associated with FsDiff,\ncapturing both stdout and stderr. If the command fails or produces any\noutput, it returns an error that includes the provided error string plus the\ncaptured outputs and underlying execution error. Otherwise, it returns nil to\nindicate success.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:275", + "calls": [ + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "errors", + "name": "New", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.createNodeFolder", + "kind": "function", + "source": [ + "func (f *FsDiff) createNodeFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"mkdir %s\", nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when creating folder %s.\", nodeTmpMountFolder))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.deleteNodeFolder", + "kind": "function", + "source": [ + "func (f *FsDiff) deleteNodeFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"rmdir %s\", nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when deleting folder %s.\", nodeTmpMountFolder))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.mountProbePodmanFolder", + "kind": "function", + "source": [ + "func (f *FsDiff) mountProbePodmanFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"mount --bind %s %s\", partnerPodmanFolder, nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when mounting %s into %s.\", partnerPodmanFolder, nodeTmpMountFolder))", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.unmountProbePodmanFolder", + "kind": "function", + "source": [ + "func (f *FsDiff) unmountProbePodmanFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"umount %s\", nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when unmounting %s.\", nodeTmpMountFolder))", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (f *FsDiff) execCommandContainer(cmd, errorStr string) error {", + "\toutput, outerr, err := f.clientHolder.ExecCommandContainer(f.ctxt, cmd)", + "\tif err != nil || output != \"\" || outerr != \"\" {", + "\t\treturn errors.New(errorStr + fmt.Sprintf(\" Stderr: %s, Stdout: %s, Err: %v\", output, outerr, err))", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "installCustomPodman", + "qualifiedName": "FsDiff.installCustomPodman", + "exported": false, + "receiver": "FsDiff", + "signature": "func()(error)", + "doc": "FsDiff.installCustomPodman prepares a temporary mount point for custom podman\n\nThis method creates a temporary directory, mounts the partner probe podman's\npodman binary into that directory, and cleans up if mounting fails. It logs\neach step and returns an error if any operation fails. The setup is used\nbefore running podman diff in tests.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:336", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "FsDiff.createNodeFolder", + "kind": "function", + "source": [ + "func (f *FsDiff) createNodeFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"mkdir %s\", nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when creating folder %s.\", nodeTmpMountFolder))", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "FsDiff.mountProbePodmanFolder", + "kind": "function", + "source": [ + "func (f *FsDiff) mountProbePodmanFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"mount --bind %s %s\", partnerPodmanFolder, nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when mounting %s into %s.\", partnerPodmanFolder, nodeTmpMountFolder))", + "}" + ] + }, + { + "name": "FsDiff.deleteNodeFolder", + "kind": "function", + "source": [ + "func (f *FsDiff) deleteNodeFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"rmdir %s\", nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when deleting folder %s.\", nodeTmpMountFolder))", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.RunTest", + "kind": "function", + "source": [ + "func (f *FsDiff) RunTest(containerUID string) {", + "\tif f.useCustomPodman {", + "\t\terr := f.installCustomPodman()", + "\t\tif err != nil {", + "\t\t\tf.Error = err", + "\t\t\tf.result = testhelper.ERROR", + "\t\t\treturn", + "\t\t}", + "", + "\t\tdefer f.unmountCustomPodman()", + "\t}", + "", + "\tf.check.LogInfo(\"Running \\\"podman diff\\\" for container id %s\", containerUID)", + "", + "\tvar output string", + "\tvar err error", + "\tfor i := range [5]int{} {", + "\t\toutput, err = f.runPodmanDiff(containerUID)", + "\t\tif err == nil {", + "\t\t\tbreak", + "\t\t}", + "\t\t// Retry if we get a podman error code 125, which is a known issue where the container/pod", + "\t\t// has possibly gone missing or is in CrashLoopBackOff state. Adding a retry here to help", + "\t\t// smooth out the test results.", + "\t\tif strings.Contains(err.Error(), \"command terminated with exit code 125\") {", + "\t\t\tf.check.LogWarn(\"Retrying \\\"podman diff\\\" due to error code 125 (attempt %d/5)\", i+1)", + "\t\t\ttime.Sleep(errorCode125RetrySeconds * time.Second)", + "\t\t\tcontinue", + "\t\t}", + "\t\tbreak", + "\t}", + "", + "\tif err != nil {", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "", + "\tdiff := fsDiffJSON{}", + "\terr = json.Unmarshal([]byte(output), \u0026diff)", + "\tif err != nil {", + "\t\tf.Error = fmt.Errorf(\"failed to unmarshall podman diff's json output: %s, err: %w\", output, err)", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "\tf.DeletedFolders = f.intersectTargetFolders(diff.Deleted)", + "\tf.ChangedFolders = f.intersectTargetFolders(diff.Changed)", + "\tif len(f.ChangedFolders) != 0 || len(f.DeletedFolders) != 0 {", + "\t\tf.check.LogDebug(\"Deleted folders found in Podman diff: %s\", f.DeletedFolders)", + "\t\tf.check.LogDebug(\"Changed folders found in Podman diff: %s\", f.ChangedFolders)", + "\t\tf.result = testhelper.FAILURE", + "\t} else {", + "\t\tf.result = testhelper.SUCCESS", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (f *FsDiff) installCustomPodman() error {", + "\t// We need to create the destination folder first.", + "\tf.check.LogInfo(\"Creating temp folder %s\", nodeTmpMountFolder)", + "\tif err := f.createNodeFolder(); err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Mount podman from partner probe pod into /host/tmp/...", + "\tf.check.LogInfo(\"Mounting %s into %s\", partnerPodmanFolder, nodeTmpMountFolder)", + "\tif mountErr := f.mountProbePodmanFolder(); mountErr != nil {", + "\t\t// We need to delete the temp folder previously created as mount point.", + "\t\tif deleteErr := f.deleteNodeFolder(); deleteErr != nil {", + "\t\t\treturn fmt.Errorf(\"failed to mount folder %s: %s, failed to delete %s: %s\",", + "\t\t\t\tpartnerPodmanFolder, mountErr, nodeTmpMountFolder, deleteErr)", + "\t\t}", + "", + "\t\treturn mountErr", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "name": "intersectTargetFolders", + "qualifiedName": "FsDiff.intersectTargetFolders", + "exported": false, + "receiver": "FsDiff", + "signature": "func([]string)([]string)", + "doc": "FsDiff.intersectTargetFolders Filters a list of folders to those that are monitored\n\nThe function iterates over the supplied slice, checking each path against a\npredefined set of target directories. If a match is found, it logs a warning\nand adds the folder to the result slice. The resulting slice contains only\npaths that belong to the monitored set.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:161", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper", + "name": "StringInSlice", + "kind": "function", + "source": [ + "func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool {", + "\tfor _, v := range s {", + "\t\tif !containsCheck {", + "\t\t\tif strings.TrimSpace(string(v)) == string(str) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t} else {", + "\t\t\tif strings.Contains(strings.TrimSpace(string(v)), string(str)) {", + "\t\t\t\treturn true", + "\t\t\t}", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "LogWarn", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.RunTest", + "kind": "function", + "source": [ + "func (f *FsDiff) RunTest(containerUID string) {", + "\tif f.useCustomPodman {", + "\t\terr := f.installCustomPodman()", + "\t\tif err != nil {", + "\t\t\tf.Error = err", + "\t\t\tf.result = testhelper.ERROR", + "\t\t\treturn", + "\t\t}", + "", + "\t\tdefer f.unmountCustomPodman()", + "\t}", + "", + "\tf.check.LogInfo(\"Running \\\"podman diff\\\" for container id %s\", containerUID)", + "", + "\tvar output string", + "\tvar err error", + "\tfor i := range [5]int{} {", + "\t\toutput, err = f.runPodmanDiff(containerUID)", + "\t\tif err == nil {", + "\t\t\tbreak", + "\t\t}", + "\t\t// Retry if we get a podman error code 125, which is a known issue where the container/pod", + "\t\t// has possibly gone missing or is in CrashLoopBackOff state. Adding a retry here to help", + "\t\t// smooth out the test results.", + "\t\tif strings.Contains(err.Error(), \"command terminated with exit code 125\") {", + "\t\t\tf.check.LogWarn(\"Retrying \\\"podman diff\\\" due to error code 125 (attempt %d/5)\", i+1)", + "\t\t\ttime.Sleep(errorCode125RetrySeconds * time.Second)", + "\t\t\tcontinue", + "\t\t}", + "\t\tbreak", + "\t}", + "", + "\tif err != nil {", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "", + "\tdiff := fsDiffJSON{}", + "\terr = json.Unmarshal([]byte(output), \u0026diff)", + "\tif err != nil {", + "\t\tf.Error = fmt.Errorf(\"failed to unmarshall podman diff's json output: %s, err: %w\", output, err)", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "\tf.DeletedFolders = f.intersectTargetFolders(diff.Deleted)", + "\tf.ChangedFolders = f.intersectTargetFolders(diff.Changed)", + "\tif len(f.ChangedFolders) != 0 || len(f.DeletedFolders) != 0 {", + "\t\tf.check.LogDebug(\"Deleted folders found in Podman diff: %s\", f.DeletedFolders)", + "\t\tf.check.LogDebug(\"Changed folders found in Podman diff: %s\", f.ChangedFolders)", + "\t\tf.result = testhelper.FAILURE", + "\t} else {", + "\t\tf.result = testhelper.SUCCESS", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (f *FsDiff) intersectTargetFolders(src []string) []string {", + "\tvar dst []string", + "\tfor _, folder := range src {", + "\t\tif stringhelper.StringInSlice(targetFolders, folder, false) {", + "\t\t\tf.check.LogWarn(\"Container's folder %q is altered.\", folder)", + "\t\t\tdst = append(dst, folder)", + "\t\t}", + "\t}", + "\treturn dst", + "}" + ] + }, + { + "name": "mountProbePodmanFolder", + "qualifiedName": "FsDiff.mountProbePodmanFolder", + "exported": false, + "receiver": "FsDiff", + "signature": "func()(error)", + "doc": "FsDiff.mountProbePodmanFolder Binds a partner pod's podman directory into the node's temporary mount point\n\nThis method runs a bind‑mount command inside the container to expose the\npartner probe's podman folder at the node’s temporary location. It\nconstructs the mount command with the source and destination paths, executes\nit via execCommandContainer, and returns any error from that execution. If\nthe command succeeds, no value is returned.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:314", + "calls": [ + { + "name": "FsDiff.execCommandContainer", + "kind": "function", + "source": [ + "func (f *FsDiff) execCommandContainer(cmd, errorStr string) error {", + "\toutput, outerr, err := f.clientHolder.ExecCommandContainer(f.ctxt, cmd)", + "\tif err != nil || output != \"\" || outerr != \"\" {", + "\t\treturn errors.New(errorStr + fmt.Sprintf(\" Stderr: %s, Stdout: %s, Err: %v\", output, outerr, err))", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.installCustomPodman", + "kind": "function", + "source": [ + "func (f *FsDiff) installCustomPodman() error {", + "\t// We need to create the destination folder first.", + "\tf.check.LogInfo(\"Creating temp folder %s\", nodeTmpMountFolder)", + "\tif err := f.createNodeFolder(); err != nil {", + "\t\treturn err", + "\t}", + "", + "\t// Mount podman from partner probe pod into /host/tmp/...", + "\tf.check.LogInfo(\"Mounting %s into %s\", partnerPodmanFolder, nodeTmpMountFolder)", + "\tif mountErr := f.mountProbePodmanFolder(); mountErr != nil {", + "\t\t// We need to delete the temp folder previously created as mount point.", + "\t\tif deleteErr := f.deleteNodeFolder(); deleteErr != nil {", + "\t\t\treturn fmt.Errorf(\"failed to mount folder %s: %s, failed to delete %s: %s\",", + "\t\t\t\tpartnerPodmanFolder, mountErr, nodeTmpMountFolder, deleteErr)", + "\t\t}", + "", + "\t\treturn mountErr", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (f *FsDiff) mountProbePodmanFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"mount --bind %s %s\", partnerPodmanFolder, nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when mounting %s into %s.\", partnerPodmanFolder, nodeTmpMountFolder))", + "}" + ] + }, + { + "name": "runPodmanDiff", + "qualifiedName": "FsDiff.runPodmanDiff", + "exported": false, + "receiver": "FsDiff", + "signature": "func(string)(string, error)", + "doc": "FsDiff.runPodmanDiff Runs podman diff and returns its JSON output\n\nThis method constructs the path to podman, optionally using a custom binary\nif configured. It then executes a chrooted command inside the host\nenvironment to obtain a diff of the container’s filesystem in JSON format.\nThe function captures standard output and errors, returning the output string\nor an error if execution fails.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:179", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.RunTest", + "kind": "function", + "source": [ + "func (f *FsDiff) RunTest(containerUID string) {", + "\tif f.useCustomPodman {", + "\t\terr := f.installCustomPodman()", + "\t\tif err != nil {", + "\t\t\tf.Error = err", + "\t\t\tf.result = testhelper.ERROR", + "\t\t\treturn", + "\t\t}", + "", + "\t\tdefer f.unmountCustomPodman()", + "\t}", + "", + "\tf.check.LogInfo(\"Running \\\"podman diff\\\" for container id %s\", containerUID)", + "", + "\tvar output string", + "\tvar err error", + "\tfor i := range [5]int{} {", + "\t\toutput, err = f.runPodmanDiff(containerUID)", + "\t\tif err == nil {", + "\t\t\tbreak", + "\t\t}", + "\t\t// Retry if we get a podman error code 125, which is a known issue where the container/pod", + "\t\t// has possibly gone missing or is in CrashLoopBackOff state. Adding a retry here to help", + "\t\t// smooth out the test results.", + "\t\tif strings.Contains(err.Error(), \"command terminated with exit code 125\") {", + "\t\t\tf.check.LogWarn(\"Retrying \\\"podman diff\\\" due to error code 125 (attempt %d/5)\", i+1)", + "\t\t\ttime.Sleep(errorCode125RetrySeconds * time.Second)", + "\t\t\tcontinue", + "\t\t}", + "\t\tbreak", + "\t}", + "", + "\tif err != nil {", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "", + "\tdiff := fsDiffJSON{}", + "\terr = json.Unmarshal([]byte(output), \u0026diff)", + "\tif err != nil {", + "\t\tf.Error = fmt.Errorf(\"failed to unmarshall podman diff's json output: %s, err: %w\", output, err)", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "\tf.DeletedFolders = f.intersectTargetFolders(diff.Deleted)", + "\tf.ChangedFolders = f.intersectTargetFolders(diff.Changed)", + "\tif len(f.ChangedFolders) != 0 || len(f.DeletedFolders) != 0 {", + "\t\tf.check.LogDebug(\"Deleted folders found in Podman diff: %s\", f.DeletedFolders)", + "\t\tf.check.LogDebug(\"Changed folders found in Podman diff: %s\", f.ChangedFolders)", + "\t\tf.result = testhelper.FAILURE", + "\t} else {", + "\t\tf.result = testhelper.SUCCESS", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (f *FsDiff) runPodmanDiff(containerUID string) (string, error) {", + "\tpodmanPath := \"podman\"", + "\tif f.useCustomPodman {", + "\t\tpodmanPath = fmt.Sprintf(\"%s/podman\", tmpMountDestFolder)", + "\t}", + "", + "\toutput, outerr, err := f.clientHolder.ExecCommandContainer(f.ctxt, fmt.Sprintf(\"chroot /host %s diff --format json %s\", podmanPath, containerUID))", + "\tif err != nil {", + "\t\treturn \"\", fmt.Errorf(\"can not execute command on container: %w\", err)", + "\t}", + "\tif outerr != \"\" {", + "\t\treturn \"\", fmt.Errorf(\"stderr log received when running fsdiff test: %s\", outerr)", + "\t}", + "\treturn output, nil", + "}" + ] + }, + { + "name": "unmountCustomPodman", + "qualifiedName": "FsDiff.unmountCustomPodman", + "exported": false, + "receiver": "FsDiff", + "signature": "func()()", + "doc": "FsDiff.unmountCustomPodman Unmounts the temporary Podman mount directory\n\nThe function logs that it is unmounting a specific folder, then attempts to\nunmount it using a helper command. If the unmount fails, it records an error\nand stops further cleanup. Finally, it deletes the now-unmounted folder,\nrecording any errors encountered during deletion.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:364", + "calls": [ + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "FsDiff.unmountProbePodmanFolder", + "kind": "function", + "source": [ + "func (f *FsDiff) unmountProbePodmanFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"umount %s\", nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when unmounting %s.\", nodeTmpMountFolder))", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "FsDiff.deleteNodeFolder", + "kind": "function", + "source": [ + "func (f *FsDiff) deleteNodeFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"rmdir %s\", nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when deleting folder %s.\", nodeTmpMountFolder))", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.RunTest", + "kind": "function", + "source": [ + "func (f *FsDiff) RunTest(containerUID string) {", + "\tif f.useCustomPodman {", + "\t\terr := f.installCustomPodman()", + "\t\tif err != nil {", + "\t\t\tf.Error = err", + "\t\t\tf.result = testhelper.ERROR", + "\t\t\treturn", + "\t\t}", + "", + "\t\tdefer f.unmountCustomPodman()", + "\t}", + "", + "\tf.check.LogInfo(\"Running \\\"podman diff\\\" for container id %s\", containerUID)", + "", + "\tvar output string", + "\tvar err error", + "\tfor i := range [5]int{} {", + "\t\toutput, err = f.runPodmanDiff(containerUID)", + "\t\tif err == nil {", + "\t\t\tbreak", + "\t\t}", + "\t\t// Retry if we get a podman error code 125, which is a known issue where the container/pod", + "\t\t// has possibly gone missing or is in CrashLoopBackOff state. Adding a retry here to help", + "\t\t// smooth out the test results.", + "\t\tif strings.Contains(err.Error(), \"command terminated with exit code 125\") {", + "\t\t\tf.check.LogWarn(\"Retrying \\\"podman diff\\\" due to error code 125 (attempt %d/5)\", i+1)", + "\t\t\ttime.Sleep(errorCode125RetrySeconds * time.Second)", + "\t\t\tcontinue", + "\t\t}", + "\t\tbreak", + "\t}", + "", + "\tif err != nil {", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "", + "\tdiff := fsDiffJSON{}", + "\terr = json.Unmarshal([]byte(output), \u0026diff)", + "\tif err != nil {", + "\t\tf.Error = fmt.Errorf(\"failed to unmarshall podman diff's json output: %s, err: %w\", output, err)", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "\tf.DeletedFolders = f.intersectTargetFolders(diff.Deleted)", + "\tf.ChangedFolders = f.intersectTargetFolders(diff.Changed)", + "\tif len(f.ChangedFolders) != 0 || len(f.DeletedFolders) != 0 {", + "\t\tf.check.LogDebug(\"Deleted folders found in Podman diff: %s\", f.DeletedFolders)", + "\t\tf.check.LogDebug(\"Changed folders found in Podman diff: %s\", f.ChangedFolders)", + "\t\tf.result = testhelper.FAILURE", + "\t} else {", + "\t\tf.result = testhelper.SUCCESS", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (f *FsDiff) unmountCustomPodman() {", + "\t// Unmount podman folder from host.", + "\tf.check.LogInfo(\"Unmounting folder %s\", nodeTmpMountFolder)", + "\tif err := f.unmountProbePodmanFolder(); err != nil {", + "\t\t// Here, there's no point on trying to remove the temp folder used as mount point, as", + "\t\t// that probably will not work either.", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "", + "\tf.check.LogInfo(\"Deleting folder %s\", nodeTmpMountFolder)", + "\tif err := f.deleteNodeFolder(); err != nil {", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t}", + "}" + ] + }, + { + "name": "unmountProbePodmanFolder", + "qualifiedName": "FsDiff.unmountProbePodmanFolder", + "exported": false, + "receiver": "FsDiff", + "signature": "func()(error)", + "doc": "FsDiff.unmountProbePodmanFolder Unmounts the probe podman mount folder from within the container\n\nThe method runs a command inside the container to unmount the temporary host\nfolder used for probing filesystem differences. It reports any error or\nunexpected output, propagating it back to the caller. The operation is part\nof cleaning up after tests and returns an error if the unmount fails.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:325", + "calls": [ + { + "name": "FsDiff.execCommandContainer", + "kind": "function", + "source": [ + "func (f *FsDiff) execCommandContainer(cmd, errorStr string) error {", + "\toutput, outerr, err := f.clientHolder.ExecCommandContainer(f.ctxt, cmd)", + "\tif err != nil || output != \"\" || outerr != \"\" {", + "\t\treturn errors.New(errorStr + fmt.Sprintf(\" Stderr: %s, Stdout: %s, Err: %v\", output, outerr, err))", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "FsDiff.unmountCustomPodman", + "kind": "function", + "source": [ + "func (f *FsDiff) unmountCustomPodman() {", + "\t// Unmount podman folder from host.", + "\tf.check.LogInfo(\"Unmounting folder %s\", nodeTmpMountFolder)", + "\tif err := f.unmountProbePodmanFolder(); err != nil {", + "\t\t// Here, there's no point on trying to remove the temp folder used as mount point, as", + "\t\t// that probably will not work either.", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t\treturn", + "\t}", + "", + "\tf.check.LogInfo(\"Deleting folder %s\", nodeTmpMountFolder)", + "\tif err := f.deleteNodeFolder(); err != nil {", + "\t\tf.Error = err", + "\t\tf.result = testhelper.ERROR", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (f *FsDiff) unmountProbePodmanFolder() error {", + "\treturn f.execCommandContainer(fmt.Sprintf(\"umount %s\", nodeTmpMountFolder),", + "\t\tfmt.Sprintf(\"failed or unexpected output when unmounting %s.\", nodeTmpMountFolder))", + "}" + ] + }, + { + "name": "NewFsDiffTester", + "qualifiedName": "NewFsDiffTester", + "exported": true, + "signature": "func(*checksdb.Check, clientsholder.Command, clientsholder.Context, string)(*FsDiff)", + "doc": "NewFsDiffTester Creates a tester for filesystem differences in containers\n\nIt determines whether to use a custom podman based on the OpenShift version,\nlogs this decision, and initializes an FsDiff structure with the provided\ncheck, client holder, context, and result state. The returned object is ready\nto run tests that compare container file systems.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:109", + "calls": [ + { + "name": "shouldUseCustomPodman", + "kind": "function", + "source": [ + "func shouldUseCustomPodman(check *checksdb.Check, ocpVersion string) bool {", + "\tconst (", + "\t\tocpForPreinstalledPodmanMajor = 4", + "\t\tocpForPreinstalledPodmanMinor = 13", + "\t)", + "", + "\tversion, err := semver.NewVersion(ocpVersion)", + "\tif err != nil {", + "\t\tcheck.LogError(\"Failed to parse Openshift version %q. Using preinstalled podman.\", ocpVersion)", + "\t\t// Use podman preinstalled in nodes as failover.", + "\t\treturn false", + "\t}", + "", + "\t// Major versions \u003e 4, use podman preinstalled in nodes.", + "\tif version.Major() \u003e ocpForPreinstalledPodmanMajor {", + "\t\treturn false", + "\t}", + "", + "\tif version.Major() == ocpForPreinstalledPodmanMajor {", + "\t\treturn version.Minor() \u003c ocpForPreinstalledPodmanMinor", + "\t}", + "", + "\t// For older versions (\u003c 3.), use podman preinstalled in nodes.", + "\treturn false", + "}" + ] + }, + { + "name": "LogDebug", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testContainersFsDiff", + "kind": "function", + "source": [ + "func testContainersFsDiff(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "", + "\t\t// If the probe pod is not found, we cannot run the test.", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Probe Pod not found for node %q\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"certsuite probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Check whether or not a container is available to prevent a panic.", + "\t\tif len(probePod.Spec.Containers) == 0 {", + "\t\t\tcheck.LogError(\"Probe Pod %q has no containers\", probePod)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"certsuite probe pod has no containers\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tctxt := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name)", + "\t\tfsDiffTester := cnffsdiff.NewFsDiffTester(check, clientsholder.GetClientsHolder(), ctxt, env.OpenshiftVersion)", + "\t\tfsDiffTester.RunTest(cut.UID)", + "\t\tswitch fsDiffTester.GetResults() {", + "\t\tcase testhelper.SUCCESS:", + "\t\t\tcheck.LogInfo(\"Container %q is not modified\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is not modified\", true))", + "\t\t\tcontinue", + "\t\tcase testhelper.FAILURE:", + "\t\t\tcheck.LogError(\"Container %q modified (changed folders: %v, deleted folders: %v\", cut, fsDiffTester.ChangedFolders, fsDiffTester.DeletedFolders)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container is modified\", false).", + "\t\t\t\tAddField(\"ChangedFolders\", strings.Join(fsDiffTester.ChangedFolders, \",\")).", + "\t\t\t\tAddField(\"DeletedFolders\", strings.Join(fsDiffTester.DeletedFolders, \",\")))", + "", + "\t\tcase testhelper.ERROR:", + "\t\t\tcheck.LogError(\"Could not run fs-diff in Container %q, err: %v\", cut, fsDiffTester.Error)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Error while running fs-diff\", false).AddField(testhelper.Error, fsDiffTester.Error.Error()))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewFsDiffTester(check *checksdb.Check, client clientsholder.Command, ctxt clientsholder.Context, ocpVersion string) *FsDiff {", + "\tuseCustomPodman := shouldUseCustomPodman(check, ocpVersion)", + "\tcheck.LogDebug(\"Using custom podman: %v.\", useCustomPodman)", + "", + "\treturn \u0026FsDiff{", + "\t\tcheck: check,", + "\t\tclientHolder: client,", + "\t\tctxt: ctxt,", + "\t\tresult: testhelper.ERROR,", + "\t\tuseCustomPodman: useCustomPodman,", + "\t}", + "}" + ] + }, + { + "name": "shouldUseCustomPodman", + "qualifiedName": "shouldUseCustomPodman", + "exported": false, + "signature": "func(*checksdb.Check, string)(bool)", + "doc": "shouldUseCustomPodman determines whether a custom podman binary should be used\n\nThe function parses the OpenShift version string to decide if the\npreinstalled podman on each node is suitable. For versions below 4.13 it\nselects a custom, precompiled podman that works with older RHEL 8.x based\nclusters; for newer releases or parsing failures it defaults to the node’s\nbuilt‑in podman. The result is returned as a boolean.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:129", + "calls": [ + { + "name": "NewVersion", + "kind": "function" + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "Major", + "kind": "function" + }, + { + "name": "Major", + "kind": "function" + }, + { + "name": "Minor", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff", + "name": "NewFsDiffTester", + "kind": "function", + "source": [ + "func NewFsDiffTester(check *checksdb.Check, client clientsholder.Command, ctxt clientsholder.Context, ocpVersion string) *FsDiff {", + "\tuseCustomPodman := shouldUseCustomPodman(check, ocpVersion)", + "\tcheck.LogDebug(\"Using custom podman: %v.\", useCustomPodman)", + "", + "\treturn \u0026FsDiff{", + "\t\tcheck: check,", + "\t\tclientHolder: client,", + "\t\tctxt: ctxt,", + "\t\tresult: testhelper.ERROR,", + "\t\tuseCustomPodman: useCustomPodman,", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func shouldUseCustomPodman(check *checksdb.Check, ocpVersion string) bool {", + "\tconst (", + "\t\tocpForPreinstalledPodmanMajor = 4", + "\t\tocpForPreinstalledPodmanMinor = 13", + "\t)", + "", + "\tversion, err := semver.NewVersion(ocpVersion)", + "\tif err != nil {", + "\t\tcheck.LogError(\"Failed to parse Openshift version %q. Using preinstalled podman.\", ocpVersion)", + "\t\t// Use podman preinstalled in nodes as failover.", + "\t\treturn false", + "\t}", + "", + "\t// Major versions \u003e 4, use podman preinstalled in nodes.", + "\tif version.Major() \u003e ocpForPreinstalledPodmanMajor {", + "\t\treturn false", + "\t}", + "", + "\tif version.Major() == ocpForPreinstalledPodmanMajor {", + "\t\treturn version.Minor() \u003c ocpForPreinstalledPodmanMinor", + "\t}", + "", + "\t// For older versions (\u003c 3.), use podman preinstalled in nodes.", + "\treturn false", + "}" + ] + } + ], + "globals": [ + { + "name": "nodeTmpMountFolder", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:40" + }, + { + "name": "targetFolders", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:44" + } + ], + "consts": [ + { + "name": "errorCode125RetrySeconds", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:36" + }, + { + "name": "partnerPodmanFolder", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:34" + }, + { + "name": "tmpMountDestFolder", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/platform/cnffsdiff/fsdiff.go:35" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "hugepages", + "files": 1, + "imports": [ + "errors", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "k8s.io/api/core/v1", + "regexp", + "sort", + "strconv", + "strings" + ], + "structs": [ + { + "name": "Tester", + "exported": true, + "doc": "Tester performs validation of node hugepage configuration against MachineConfig settings\n\nIt gathers hugepage counts per NUMA from the node, parses MachineConfig\nkernel arguments or systemd units, and compares these values to ensure\nconsistency. The Run method selects the appropriate comparison path based on\nwhether systemd units are present. A successful run confirms that all\nconfigured hugepages match between the node and its MachineConfig.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:68", + "fields": { + "commander": "clientsholder.Command", + "context": "clientsholder.Context", + "mcSystemdHugepagesByNuma": "hugepagesByNuma", + "node": "*provider.Node", + "nodeHugepagesByNuma": "hugepagesByNuma" + }, + "methodNames": [ + "HasMcSystemdHugepagesUnits", + "Run", + "TestNodeHugepagesWithKernelArgs", + "TestNodeHugepagesWithMcSystemd", + "getNodeNumaHugePages" + ], + "source": [ + "type Tester struct {", + "\tnode *provider.Node", + "\tcontext clientsholder.Context", + "\tcommander clientsholder.Command", + "", + "\tnodeHugepagesByNuma hugepagesByNuma", + "\tmcSystemdHugepagesByNuma hugepagesByNuma", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "NewTester", + "qualifiedName": "NewTester", + "exported": true, + "signature": "func(*provider.Node, *corev1.Pod, clientsholder.Command)(*Tester, error)", + "doc": "NewTester Creates a tester for node hugepage validation\n\nThis function initializes a Tester object with the provided node, probe pod,\nand command executor. It sets up the execution context inside the probe\ncontainer and retrieves the node's NUMA hugepages information along with\nmachineconfig systemd unit configurations. The resulting Tester is ready to\nrun checks against the gathered data.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:104", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Tester.getNodeNumaHugePages", + "kind": "function", + "source": [ + "func (tester *Tester) getNodeNumaHugePages() (hugepages hugepagesByNuma, err error) {", + "\t// This command must run inside the node, so we'll need the node's context to run commands inside the probe daemonset pod.", + "\tstdout, stderr, err := tester.commander.ExecCommandContainer(tester.context, cmd)", + "\tlog.Debug(\"getNodeNumaHugePages stdout: %s, stderr: %s\", stdout, stderr)", + "\tif err != nil {", + "\t\treturn hugepagesByNuma{}, err", + "\t}", + "\tif stderr != \"\" {", + "\t\treturn hugepagesByNuma{}, errors.New(stderr)", + "\t}", + "", + "\thugepages = hugepagesByNuma{}", + "\tr := regexp.MustCompile(outputRegex)", + "\tfor _, line := range strings.Split(stdout, \"\\n\") {", + "\t\tif line == \"\" {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tvalues := r.FindStringSubmatch(line)", + "\t\tif len(values) != numRegexFields {", + "\t\t\treturn hugepagesByNuma{}, fmt.Errorf(\"failed to parse node's numa hugepages output line:%s (stdout: %s)\", line, stdout)", + "\t\t}", + "", + "\t\tnumaNode, _ := strconv.Atoi(values[1])", + "\t\thpSize, _ := strconv.Atoi(values[2])", + "\t\thpCount, _ := strconv.Atoi(values[3])", + "", + "\t\tif sizeCounts, exists := hugepages[numaNode]; exists {", + "\t\t\tsizeCounts[hpSize] = hpCount", + "\t\t} else {", + "\t\t\thugepages[numaNode] = countBySize{hpSize: hpCount}", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Node %s hugepages: %s\", tester.node.Data.Name, hugepages)", + "\treturn hugepages, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "getMcSystemdUnitsHugepagesConfig", + "kind": "function", + "source": [ + "func getMcSystemdUnitsHugepagesConfig(mc *provider.MachineConfig) (hugepages hugepagesByNuma, err error) {", + "\tconst UnitContentsRegexMatchLen = 4", + "\thugepages = hugepagesByNuma{}", + "", + "\tr := regexp.MustCompile(`(?ms)HUGEPAGES_COUNT=(\\d+).*HUGEPAGES_SIZE=(\\d+).*NUMA_NODE=(\\d+)`)", + "\tfor _, unit := range mc.Config.Systemd.Units {", + "\t\tunit.Name = strings.Trim(unit.Name, \"\\\"\")", + "\t\tif !strings.Contains(unit.Name, \"hugepages-allocation\") {", + "\t\t\tcontinue", + "\t\t}", + "\t\tlog.Info(\"Systemd Unit with hugepages info -\u003e name: %s, contents: %s\", unit.Name, unit.Contents)", + "\t\tunit.Contents = strings.Trim(unit.Contents, \"\\\"\")", + "\t\tvalues := r.FindStringSubmatch(unit.Contents)", + "\t\tif len(values) \u003c UnitContentsRegexMatchLen {", + "\t\t\treturn hugepagesByNuma{}, fmt.Errorf(\"unable to get hugepages values from mc (contents=%s)\", unit.Contents)", + "\t\t}", + "", + "\t\tnumaNode, _ := strconv.Atoi(values[3])", + "\t\thpSize, _ := strconv.Atoi(values[2])", + "\t\thpCount, _ := strconv.Atoi(values[1])", + "", + "\t\tif sizeCounts, exists := hugepages[numaNode]; exists {", + "\t\t\tsizeCounts[hpSize] = hpCount", + "\t\t} else {", + "\t\t\thugepages[numaNode] = countBySize{hpSize: hpCount}", + "\t\t}", + "\t}", + "", + "\tif len(hugepages) \u003e 0 {", + "\t\tlog.Info(\"Machineconfig's systemd.units hugepages: %v\", hugepages)", + "\t} else {", + "\t\tlog.Info(\"No hugepages found in machineconfig system.units\")", + "\t}", + "", + "\treturn hugepages, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testHugepages", + "kind": "function", + "source": [ + "func testHugepages(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor i := range env.Nodes {", + "\t\tnode := env.Nodes[i]", + "\t\tnodeName := node.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "\t\tif !node.IsWorkerNode() {", + "\t\t\tcheck.LogInfo(\"Node %q is not a worker node\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Not a worker node\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tprobePod, exist := env.ProbePods[nodeName]", + "\t\tif !exist {", + "\t\t\tcheck.LogError(\"Could not find a Probe Pod in node %q.\", nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"tnf probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\thpTester, err := hugepages.NewTester(\u0026node, probePod, clientsholder.GetClientsHolder())", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Unable to get node hugepages tester for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Unable to get node hugepages tester\", false))", + "\t\t}", + "", + "\t\tif err := hpTester.Run(); err != nil {", + "\t\t\tcheck.LogError(\"Hugepages check failed for node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, err.Error(), false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Node %q passed the hugepages check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the hugepages check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewTester(node *provider.Node, probePod *corev1.Pod, commander clientsholder.Command) (*Tester, error) {", + "\ttester := \u0026Tester{", + "\t\tnode: node,", + "\t\tcommander: commander,", + "\t\tcontext: clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name),", + "\t}", + "", + "\tlog.Info(\"Getting node %s numa's hugepages values.\", node.Data.Name)", + "\tvar err error", + "\ttester.nodeHugepagesByNuma, err = tester.getNodeNumaHugePages()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to get node hugepages, err: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Parsing machineconfig's kernelArguments and systemd's hugepages units.\")", + "\ttester.mcSystemdHugepagesByNuma, err = getMcSystemdUnitsHugepagesConfig(\u0026tester.node.Mc)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get MC systemd hugepages config, err: %v\", err)", + "\t}", + "", + "\treturn tester, nil", + "}" + ] + }, + { + "name": "HasMcSystemdHugepagesUnits", + "qualifiedName": "Tester.HasMcSystemdHugepagesUnits", + "exported": true, + "receiver": "Tester", + "signature": "func()(bool)", + "doc": "Tester.HasMcSystemdHugepagesUnits Indicates whether MachineConfig contains Systemd hugepage unit definitions\n\nThe method returns true if the internal map of Systemd hugepages per NUMA\nnode has one or more entries, meaning that the machine configuration includes\nexplicit hugepage units. It does this by checking the length of the map; a\nnon‑zero count signals presence, otherwise it indicates no such units were\ndefined.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:134", + "calls": [ + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "Tester.Run", + "kind": "function", + "source": [ + "func (tester *Tester) Run() error {", + "\tif tester.HasMcSystemdHugepagesUnits() {", + "\t\tlog.Info(\"Comparing MachineConfig Systemd hugepages info against node values.\")", + "\t\tif pass, err := tester.TestNodeHugepagesWithMcSystemd(); !pass {", + "\t\t\treturn fmt.Errorf(\"failed to compare machineConfig systemd's unit hugepages config with node values, err: %v\", err)", + "\t\t}", + "\t} else {", + "\t\tlog.Info(\"Comparing MC KernelArguments hugepages info against node values.\")", + "\t\tif pass, err := tester.TestNodeHugepagesWithKernelArgs(); !pass {", + "\t\t\treturn fmt.Errorf(\"failed to compare machineConfig KernelArguments with node ones, err: %v\", err)", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (tester *Tester) HasMcSystemdHugepagesUnits() bool {", + "\treturn len(tester.mcSystemdHugepagesByNuma) \u003e 0", + "}" + ] + }, + { + "name": "Run", + "qualifiedName": "Tester.Run", + "exported": true, + "receiver": "Tester", + "signature": "func()(error)", + "doc": "Tester.Run Runs the hugepage configuration comparison tests\n\nThe method checks whether MachineConfig includes systemd unit definitions for\nhugepages. If so, it verifies that the node's hugepage counts match those\nunits; otherwise it compares kernel argument values against the node's\ntotals. It logs progress and returns an error if any mismatch or test failure\noccurs.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:145", + "calls": [ + { + "name": "Tester.HasMcSystemdHugepagesUnits", + "kind": "function", + "source": [ + "func (tester *Tester) HasMcSystemdHugepagesUnits() bool {", + "\treturn len(tester.mcSystemdHugepagesByNuma) \u003e 0", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Tester.TestNodeHugepagesWithMcSystemd", + "kind": "function", + "source": [ + "func (tester *Tester) TestNodeHugepagesWithMcSystemd() (bool, error) {", + "\t// Iterate through node's actual hugepages to make sure that each node's size that does not exist in the", + "\t// MachineConfig has a value of 0.", + "\tfor nodeNumaIdx, nodeCountBySize := range tester.nodeHugepagesByNuma {", + "\t\t// First, numa index should exist in MC", + "\t\tmcCountBySize, numaExistsInMc := tester.mcSystemdHugepagesByNuma[nodeNumaIdx]", + "\t\tif !numaExistsInMc {", + "\t\t\tlog.Warn(\"Numa %d does not exist in machine config. All hugepage count for all sizes must be zero.\", nodeNumaIdx)", + "\t\t\tfor _, count := range nodeCountBySize {", + "\t\t\t\tif count != 0 {", + "\t\t\t\t\treturn false, fmt.Errorf(\"node's numa %d hugepages config does not exist in node's machineconfig\", nodeNumaIdx)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Second, all sizes must exist in mc. If it does not exist (e.g. default 2MB size), its count should be 0.", + "\t\tfor nodeSize, nodeCount := range nodeCountBySize {", + "\t\t\tif _, sizeExistsInMc := mcCountBySize[nodeSize]; !sizeExistsInMc \u0026\u0026 nodeCount != 0 {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa %d hugepages size=%d does not appear in MC, but the count is not zero (%d)\",", + "\t\t\t\t\tnodeNumaIdx, nodeSize, nodeCount)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// Now, iterate through mc's numas and make sure they exist and have the same sizes and values in the node.", + "\tfor mcNumaIdx, mcCountBySize := range tester.mcSystemdHugepagesByNuma {", + "\t\tnodeCountBySize, numaExistsInNode := tester.nodeHugepagesByNuma[mcNumaIdx]", + "\t\t// First, numa index should exist in the node", + "\t\tif !numaExistsInNode {", + "\t\t\treturn false, fmt.Errorf(\"node does not have numa id %d found in the machine config\", mcNumaIdx)", + "\t\t}", + "", + "\t\t// For this numa, iterate through each of the mc's hugepages sizes and compare with node ones.", + "\t\tfor mcSize, mcCount := range mcCountBySize {", + "\t\t\tnodeCount, nodeSizeExistsInNode := nodeCountBySize[mcSize]", + "\t\t\tif !nodeSizeExistsInNode {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa id %d does not have size %d found in the machine config\",", + "\t\t\t\t\tmcNumaIdx, mcSize)", + "\t\t\t}", + "", + "\t\t\tif nodeCount != mcCount {", + "\t\t\t\treturn false, fmt.Errorf(\"mc numa=%d, hugepages count:%d, size:%d does not match node ones=%d\",", + "\t\t\t\t\tmcNumaIdx, mcCount, mcSize, nodeCount)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn true, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Tester.TestNodeHugepagesWithKernelArgs", + "kind": "function", + "source": [ + "func (tester *Tester) TestNodeHugepagesWithKernelArgs() (bool, error) {", + "\tkernelArgsHpCountBySize, _ := getMcHugepagesFromMcKernelArguments(\u0026tester.node.Mc)", + "", + "\t// First, check that all the actual hp sizes across all numas exist in the kernelArguments.", + "\tfor nodeNumaIdx, nodeCountBySize := range tester.nodeHugepagesByNuma {", + "\t\tfor nodeSize, nodeCount := range nodeCountBySize {", + "\t\t\tif _, sizeExistsInKernelArgs := kernelArgsHpCountBySize[nodeSize]; !sizeExistsInKernelArgs \u0026\u0026 nodeCount != 0 {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa %d hugepages size=%d does not appear in kernelArgs, but the count is not zero (%d)\",", + "\t\t\t\t\tnodeNumaIdx, nodeSize, nodeCount)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// kernelArguments don't have numa info, so we'll add up all numa's hp count", + "\t// for the same size and it should match the values in the kernelArgs.", + "\tfor kernelSize, kernelCount := range kernelArgsHpCountBySize {", + "\t\ttotal := 0", + "\t\tfor numaIdx, numaCountBySize := range tester.nodeHugepagesByNuma {", + "\t\t\tnodeCount, sizeExistsInNode := numaCountBySize[kernelSize]", + "\t\t\tif !sizeExistsInNode {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa %d has no hugepages of kernelArgs' size %d\", numaIdx, kernelSize)", + "\t\t\t}", + "\t\t\ttotal += nodeCount", + "\t\t}", + "", + "\t\tif total == kernelCount {", + "\t\t\tlog.Info(\"kernelArguments' hugepages count:%d, size:%d match total node ones for that size.\", kernelCount, kernelSize)", + "\t\t} else {", + "\t\t\treturn false, fmt.Errorf(\"total hugepages of size %d will not match (node count=%d, expected=%d)\", kernelSize, total, kernelCount)", + "\t\t}", + "\t}", + "", + "\treturn true, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (tester *Tester) Run() error {", + "\tif tester.HasMcSystemdHugepagesUnits() {", + "\t\tlog.Info(\"Comparing MachineConfig Systemd hugepages info against node values.\")", + "\t\tif pass, err := tester.TestNodeHugepagesWithMcSystemd(); !pass {", + "\t\t\treturn fmt.Errorf(\"failed to compare machineConfig systemd's unit hugepages config with node values, err: %v\", err)", + "\t\t}", + "\t} else {", + "\t\tlog.Info(\"Comparing MC KernelArguments hugepages info against node values.\")", + "\t\tif pass, err := tester.TestNodeHugepagesWithKernelArgs(); !pass {", + "\t\t\treturn fmt.Errorf(\"failed to compare machineConfig KernelArguments with node ones, err: %v\", err)", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + }, + { + "name": "TestNodeHugepagesWithKernelArgs", + "qualifiedName": "Tester.TestNodeHugepagesWithKernelArgs", + "exported": true, + "receiver": "Tester", + "signature": "func()(bool, error)", + "doc": "Tester.TestNodeHugepagesWithKernelArgs Validates node hugepage counts against kernel argument configuration\n\nThe method retrieves the hugepage sizes and counts specified in a machine's\nkernel arguments, then checks that each size present on the node appears in\nthose arguments with non‑zero counts. It aggregates node counts per size\nacross all NUMA nodes and compares them to the expected totals from the\nkernel arguments, returning an error if any mismatch occurs. On success it\nlogs matching sizes and returns true without error.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:226", + "calls": [ + { + "name": "getMcHugepagesFromMcKernelArguments", + "kind": "function", + "source": [ + "func getMcHugepagesFromMcKernelArguments(mc *provider.MachineConfig) (hugepagesPerSize map[int]int, defhugepagesz int) {", + "\tdefhugepagesz = RhelDefaultHugepagesz", + "\thugepagesPerSize = map[int]int{}", + "", + "\thugepagesz := 0", + "\tfor _, arg := range mc.Spec.KernelArguments {", + "\t\tkeyValueSlice := strings.Split(arg, \"=\")", + "\t\tif len(keyValueSlice) != KernArgsKeyValueSplitLen {", + "\t\t\t// Some kernel arguments do not come in name=value", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tkey, value := keyValueSlice[0], keyValueSlice[1]", + "\t\tif key == HugepagesParam \u0026\u0026 value != \"\" {", + "\t\t\thugepages, _ := strconv.Atoi(value)", + "\t\t\tif _, sizeFound := hugepagesPerSize[hugepagesz]; sizeFound {", + "\t\t\t\t// hugepagesz was parsed before.", + "\t\t\t\thugepagesPerSize[hugepagesz] = hugepages", + "\t\t\t} else {", + "\t\t\t\t// use RHEL's default size for this count.", + "\t\t\t\thugepagesPerSize[RhelDefaultHugepagesz] = hugepages", + "\t\t\t}", + "\t\t}", + "", + "\t\tif key == HugepageszParam \u0026\u0026 value != \"\" {", + "\t\t\thugepagesz = hugepageSizeToInt(value)", + "\t\t\t// Create new map entry for this size", + "\t\t\thugepagesPerSize[hugepagesz] = 0", + "\t\t}", + "", + "\t\tif key == DefaultHugepagesz \u0026\u0026 value != \"\" {", + "\t\t\tdefhugepagesz = hugepageSizeToInt(value)", + "\t\t\t// In case only default_hugepagesz and hugepages values are provided. The actual value should be", + "\t\t\t// parsed next and this default value overwritten.", + "\t\t\thugepagesPerSize[defhugepagesz] = RhelDefaultHugepages", + "\t\t\thugepagesz = defhugepagesz", + "\t\t}", + "\t}", + "", + "\tif len(hugepagesPerSize) == 0 {", + "\t\thugepagesPerSize[RhelDefaultHugepagesz] = RhelDefaultHugepages", + "\t\tlog.Warn(\"No hugepages size found in node's machineconfig. Defaulting to size=%dkB (count=%d)\", RhelDefaultHugepagesz, RhelDefaultHugepages)", + "\t}", + "", + "\tlogMcKernelArgumentsHugepages(hugepagesPerSize, defhugepagesz)", + "\treturn hugepagesPerSize, defhugepagesz", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "Tester.Run", + "kind": "function", + "source": [ + "func (tester *Tester) Run() error {", + "\tif tester.HasMcSystemdHugepagesUnits() {", + "\t\tlog.Info(\"Comparing MachineConfig Systemd hugepages info against node values.\")", + "\t\tif pass, err := tester.TestNodeHugepagesWithMcSystemd(); !pass {", + "\t\t\treturn fmt.Errorf(\"failed to compare machineConfig systemd's unit hugepages config with node values, err: %v\", err)", + "\t\t}", + "\t} else {", + "\t\tlog.Info(\"Comparing MC KernelArguments hugepages info against node values.\")", + "\t\tif pass, err := tester.TestNodeHugepagesWithKernelArgs(); !pass {", + "\t\t\treturn fmt.Errorf(\"failed to compare machineConfig KernelArguments with node ones, err: %v\", err)", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (tester *Tester) TestNodeHugepagesWithKernelArgs() (bool, error) {", + "\tkernelArgsHpCountBySize, _ := getMcHugepagesFromMcKernelArguments(\u0026tester.node.Mc)", + "", + "\t// First, check that all the actual hp sizes across all numas exist in the kernelArguments.", + "\tfor nodeNumaIdx, nodeCountBySize := range tester.nodeHugepagesByNuma {", + "\t\tfor nodeSize, nodeCount := range nodeCountBySize {", + "\t\t\tif _, sizeExistsInKernelArgs := kernelArgsHpCountBySize[nodeSize]; !sizeExistsInKernelArgs \u0026\u0026 nodeCount != 0 {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa %d hugepages size=%d does not appear in kernelArgs, but the count is not zero (%d)\",", + "\t\t\t\t\tnodeNumaIdx, nodeSize, nodeCount)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// kernelArguments don't have numa info, so we'll add up all numa's hp count", + "\t// for the same size and it should match the values in the kernelArgs.", + "\tfor kernelSize, kernelCount := range kernelArgsHpCountBySize {", + "\t\ttotal := 0", + "\t\tfor numaIdx, numaCountBySize := range tester.nodeHugepagesByNuma {", + "\t\t\tnodeCount, sizeExistsInNode := numaCountBySize[kernelSize]", + "\t\t\tif !sizeExistsInNode {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa %d has no hugepages of kernelArgs' size %d\", numaIdx, kernelSize)", + "\t\t\t}", + "\t\t\ttotal += nodeCount", + "\t\t}", + "", + "\t\tif total == kernelCount {", + "\t\t\tlog.Info(\"kernelArguments' hugepages count:%d, size:%d match total node ones for that size.\", kernelCount, kernelSize)", + "\t\t} else {", + "\t\t\treturn false, fmt.Errorf(\"total hugepages of size %d will not match (node count=%d, expected=%d)\", kernelSize, total, kernelCount)", + "\t\t}", + "\t}", + "", + "\treturn true, nil", + "}" + ] + }, + { + "name": "TestNodeHugepagesWithMcSystemd", + "qualifiedName": "Tester.TestNodeHugepagesWithMcSystemd", + "exported": true, + "receiver": "Tester", + "signature": "func()(bool, error)", + "doc": "Tester.TestNodeHugepagesWithMcSystemd Verifies node hugepage counts match MachineConfig systemd settings\n\nThe function walks through each NUMA node’s actual hugepage configuration,\nensuring that any size or node absent from the MachineConfig has a count of\nzero. It then cross‑checks every entry in the MachineConfig against the\nnode’s values, confirming matching sizes and counts for all NUMA indices.\nIf any discrepancy is found, it returns false with an explanatory error;\notherwise it reports success.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:168", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "Tester.Run", + "kind": "function", + "source": [ + "func (tester *Tester) Run() error {", + "\tif tester.HasMcSystemdHugepagesUnits() {", + "\t\tlog.Info(\"Comparing MachineConfig Systemd hugepages info against node values.\")", + "\t\tif pass, err := tester.TestNodeHugepagesWithMcSystemd(); !pass {", + "\t\t\treturn fmt.Errorf(\"failed to compare machineConfig systemd's unit hugepages config with node values, err: %v\", err)", + "\t\t}", + "\t} else {", + "\t\tlog.Info(\"Comparing MC KernelArguments hugepages info against node values.\")", + "\t\tif pass, err := tester.TestNodeHugepagesWithKernelArgs(); !pass {", + "\t\t\treturn fmt.Errorf(\"failed to compare machineConfig KernelArguments with node ones, err: %v\", err)", + "\t\t}", + "\t}", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (tester *Tester) TestNodeHugepagesWithMcSystemd() (bool, error) {", + "\t// Iterate through node's actual hugepages to make sure that each node's size that does not exist in the", + "\t// MachineConfig has a value of 0.", + "\tfor nodeNumaIdx, nodeCountBySize := range tester.nodeHugepagesByNuma {", + "\t\t// First, numa index should exist in MC", + "\t\tmcCountBySize, numaExistsInMc := tester.mcSystemdHugepagesByNuma[nodeNumaIdx]", + "\t\tif !numaExistsInMc {", + "\t\t\tlog.Warn(\"Numa %d does not exist in machine config. All hugepage count for all sizes must be zero.\", nodeNumaIdx)", + "\t\t\tfor _, count := range nodeCountBySize {", + "\t\t\t\tif count != 0 {", + "\t\t\t\t\treturn false, fmt.Errorf(\"node's numa %d hugepages config does not exist in node's machineconfig\", nodeNumaIdx)", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Second, all sizes must exist in mc. If it does not exist (e.g. default 2MB size), its count should be 0.", + "\t\tfor nodeSize, nodeCount := range nodeCountBySize {", + "\t\t\tif _, sizeExistsInMc := mcCountBySize[nodeSize]; !sizeExistsInMc \u0026\u0026 nodeCount != 0 {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa %d hugepages size=%d does not appear in MC, but the count is not zero (%d)\",", + "\t\t\t\t\tnodeNumaIdx, nodeSize, nodeCount)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// Now, iterate through mc's numas and make sure they exist and have the same sizes and values in the node.", + "\tfor mcNumaIdx, mcCountBySize := range tester.mcSystemdHugepagesByNuma {", + "\t\tnodeCountBySize, numaExistsInNode := tester.nodeHugepagesByNuma[mcNumaIdx]", + "\t\t// First, numa index should exist in the node", + "\t\tif !numaExistsInNode {", + "\t\t\treturn false, fmt.Errorf(\"node does not have numa id %d found in the machine config\", mcNumaIdx)", + "\t\t}", + "", + "\t\t// For this numa, iterate through each of the mc's hugepages sizes and compare with node ones.", + "\t\tfor mcSize, mcCount := range mcCountBySize {", + "\t\t\tnodeCount, nodeSizeExistsInNode := nodeCountBySize[mcSize]", + "\t\t\tif !nodeSizeExistsInNode {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa id %d does not have size %d found in the machine config\",", + "\t\t\t\t\tmcNumaIdx, mcSize)", + "\t\t\t}", + "", + "\t\t\tif nodeCount != mcCount {", + "\t\t\t\treturn false, fmt.Errorf(\"mc numa=%d, hugepages count:%d, size:%d does not match node ones=%d\",", + "\t\t\t\t\tmcNumaIdx, mcCount, mcSize, nodeCount)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn true, nil", + "}" + ] + }, + { + "name": "getNodeNumaHugePages", + "qualifiedName": "Tester.getNodeNumaHugePages", + "exported": false, + "receiver": "Tester", + "signature": "func()(hugepagesByNuma, error)", + "doc": "Tester.getNodeNumaHugePages Retrieves the node's current hugepage configuration\n\nThis method runs a command inside the probe pod to read\n/sys/devices/system/node files, parses each line for NUMA node number, page\nsize, and count, and aggregates them into a map keyed by node. It returns the\npopulated map or an error if execution fails or output cannot be parsed. The\nresult is used to compare against desired hugepage settings.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:268", + "calls": [ + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "errors", + "name": "New", + "kind": "function" + }, + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "FindStringSubmatch", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "NewTester", + "kind": "function", + "source": [ + "func NewTester(node *provider.Node, probePod *corev1.Pod, commander clientsholder.Command) (*Tester, error) {", + "\ttester := \u0026Tester{", + "\t\tnode: node,", + "\t\tcommander: commander,", + "\t\tcontext: clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name),", + "\t}", + "", + "\tlog.Info(\"Getting node %s numa's hugepages values.\", node.Data.Name)", + "\tvar err error", + "\ttester.nodeHugepagesByNuma, err = tester.getNodeNumaHugePages()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to get node hugepages, err: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Parsing machineconfig's kernelArguments and systemd's hugepages units.\")", + "\ttester.mcSystemdHugepagesByNuma, err = getMcSystemdUnitsHugepagesConfig(\u0026tester.node.Mc)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get MC systemd hugepages config, err: %v\", err)", + "\t}", + "", + "\treturn tester, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (tester *Tester) getNodeNumaHugePages() (hugepages hugepagesByNuma, err error) {", + "\t// This command must run inside the node, so we'll need the node's context to run commands inside the probe daemonset pod.", + "\tstdout, stderr, err := tester.commander.ExecCommandContainer(tester.context, cmd)", + "\tlog.Debug(\"getNodeNumaHugePages stdout: %s, stderr: %s\", stdout, stderr)", + "\tif err != nil {", + "\t\treturn hugepagesByNuma{}, err", + "\t}", + "\tif stderr != \"\" {", + "\t\treturn hugepagesByNuma{}, errors.New(stderr)", + "\t}", + "", + "\thugepages = hugepagesByNuma{}", + "\tr := regexp.MustCompile(outputRegex)", + "\tfor _, line := range strings.Split(stdout, \"\\n\") {", + "\t\tif line == \"\" {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tvalues := r.FindStringSubmatch(line)", + "\t\tif len(values) != numRegexFields {", + "\t\t\treturn hugepagesByNuma{}, fmt.Errorf(\"failed to parse node's numa hugepages output line:%s (stdout: %s)\", line, stdout)", + "\t\t}", + "", + "\t\tnumaNode, _ := strconv.Atoi(values[1])", + "\t\thpSize, _ := strconv.Atoi(values[2])", + "\t\thpCount, _ := strconv.Atoi(values[3])", + "", + "\t\tif sizeCounts, exists := hugepages[numaNode]; exists {", + "\t\t\tsizeCounts[hpSize] = hpCount", + "\t\t} else {", + "\t\t\thugepages[numaNode] = countBySize{hpSize: hpCount}", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Node %s hugepages: %s\", tester.node.Data.Name, hugepages)", + "\treturn hugepages, nil", + "}" + ] + }, + { + "name": "getMcHugepagesFromMcKernelArguments", + "qualifiedName": "getMcHugepagesFromMcKernelArguments", + "exported": false, + "signature": "func(*provider.MachineConfig)(map[int]int, int)", + "doc": "getMcHugepagesFromMcKernelArguments extracts hugepage configuration from kernel arguments\n\nThe function parses the kernelArguments field of a MachineConfig to build a\nmap that associates each hugepage size with its count, using RHEL defaults\nwhen necessary. It also determines the default hugepages size specified in\nthe arguments or falls back to a system default. The resulting map and\ndefault size are returned for use by tests validating node hugepage settings.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:373", + "calls": [ + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "name": "hugepageSizeToInt", + "kind": "function", + "source": [ + "func hugepageSizeToInt(s string) int {", + "\tnum, _ := strconv.Atoi(s[:len(s)-1])", + "\tunit := s[len(s)-1]", + "\tswitch unit {", + "\tcase 'M':", + "\t\tnum *= 1024", + "\tcase 'G':", + "\t\tnum *= 1024 * 1024", + "\t}", + "", + "\treturn num", + "}" + ] + }, + { + "name": "hugepageSizeToInt", + "kind": "function", + "source": [ + "func hugepageSizeToInt(s string) int {", + "\tnum, _ := strconv.Atoi(s[:len(s)-1])", + "\tunit := s[len(s)-1]", + "\tswitch unit {", + "\tcase 'M':", + "\t\tnum *= 1024", + "\tcase 'G':", + "\t\tnum *= 1024 * 1024", + "\t}", + "", + "\treturn num", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + }, + { + "name": "logMcKernelArgumentsHugepages", + "kind": "function", + "source": [ + "func logMcKernelArgumentsHugepages(hugepagesPerSize map[int]int, defhugepagesz int) {", + "\tvar sb strings.Builder", + "\tsb.WriteString(fmt.Sprintf(\"MC KernelArguments hugepages config: default_hugepagesz=%d-kB\", defhugepagesz))", + "\tfor size, count := range hugepagesPerSize {", + "\t\tsb.WriteString(fmt.Sprintf(\", size=%dkB - count=%d\", size, count))", + "\t}", + "\tlog.Info(\"%s\", sb.String())", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "Tester.TestNodeHugepagesWithKernelArgs", + "kind": "function", + "source": [ + "func (tester *Tester) TestNodeHugepagesWithKernelArgs() (bool, error) {", + "\tkernelArgsHpCountBySize, _ := getMcHugepagesFromMcKernelArguments(\u0026tester.node.Mc)", + "", + "\t// First, check that all the actual hp sizes across all numas exist in the kernelArguments.", + "\tfor nodeNumaIdx, nodeCountBySize := range tester.nodeHugepagesByNuma {", + "\t\tfor nodeSize, nodeCount := range nodeCountBySize {", + "\t\t\tif _, sizeExistsInKernelArgs := kernelArgsHpCountBySize[nodeSize]; !sizeExistsInKernelArgs \u0026\u0026 nodeCount != 0 {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa %d hugepages size=%d does not appear in kernelArgs, but the count is not zero (%d)\",", + "\t\t\t\t\tnodeNumaIdx, nodeSize, nodeCount)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\t// kernelArguments don't have numa info, so we'll add up all numa's hp count", + "\t// for the same size and it should match the values in the kernelArgs.", + "\tfor kernelSize, kernelCount := range kernelArgsHpCountBySize {", + "\t\ttotal := 0", + "\t\tfor numaIdx, numaCountBySize := range tester.nodeHugepagesByNuma {", + "\t\t\tnodeCount, sizeExistsInNode := numaCountBySize[kernelSize]", + "\t\t\tif !sizeExistsInNode {", + "\t\t\t\treturn false, fmt.Errorf(\"node's numa %d has no hugepages of kernelArgs' size %d\", numaIdx, kernelSize)", + "\t\t\t}", + "\t\t\ttotal += nodeCount", + "\t\t}", + "", + "\t\tif total == kernelCount {", + "\t\t\tlog.Info(\"kernelArguments' hugepages count:%d, size:%d match total node ones for that size.\", kernelCount, kernelSize)", + "\t\t} else {", + "\t\t\treturn false, fmt.Errorf(\"total hugepages of size %d will not match (node count=%d, expected=%d)\", kernelSize, total, kernelCount)", + "\t\t}", + "\t}", + "", + "\treturn true, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getMcHugepagesFromMcKernelArguments(mc *provider.MachineConfig) (hugepagesPerSize map[int]int, defhugepagesz int) {", + "\tdefhugepagesz = RhelDefaultHugepagesz", + "\thugepagesPerSize = map[int]int{}", + "", + "\thugepagesz := 0", + "\tfor _, arg := range mc.Spec.KernelArguments {", + "\t\tkeyValueSlice := strings.Split(arg, \"=\")", + "\t\tif len(keyValueSlice) != KernArgsKeyValueSplitLen {", + "\t\t\t// Some kernel arguments do not come in name=value", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tkey, value := keyValueSlice[0], keyValueSlice[1]", + "\t\tif key == HugepagesParam \u0026\u0026 value != \"\" {", + "\t\t\thugepages, _ := strconv.Atoi(value)", + "\t\t\tif _, sizeFound := hugepagesPerSize[hugepagesz]; sizeFound {", + "\t\t\t\t// hugepagesz was parsed before.", + "\t\t\t\thugepagesPerSize[hugepagesz] = hugepages", + "\t\t\t} else {", + "\t\t\t\t// use RHEL's default size for this count.", + "\t\t\t\thugepagesPerSize[RhelDefaultHugepagesz] = hugepages", + "\t\t\t}", + "\t\t}", + "", + "\t\tif key == HugepageszParam \u0026\u0026 value != \"\" {", + "\t\t\thugepagesz = hugepageSizeToInt(value)", + "\t\t\t// Create new map entry for this size", + "\t\t\thugepagesPerSize[hugepagesz] = 0", + "\t\t}", + "", + "\t\tif key == DefaultHugepagesz \u0026\u0026 value != \"\" {", + "\t\t\tdefhugepagesz = hugepageSizeToInt(value)", + "\t\t\t// In case only default_hugepagesz and hugepages values are provided. The actual value should be", + "\t\t\t// parsed next and this default value overwritten.", + "\t\t\thugepagesPerSize[defhugepagesz] = RhelDefaultHugepages", + "\t\t\thugepagesz = defhugepagesz", + "\t\t}", + "\t}", + "", + "\tif len(hugepagesPerSize) == 0 {", + "\t\thugepagesPerSize[RhelDefaultHugepagesz] = RhelDefaultHugepages", + "\t\tlog.Warn(\"No hugepages size found in node's machineconfig. Defaulting to size=%dkB (count=%d)\", RhelDefaultHugepagesz, RhelDefaultHugepages)", + "\t}", + "", + "\tlogMcKernelArgumentsHugepages(hugepagesPerSize, defhugepagesz)", + "\treturn hugepagesPerSize, defhugepagesz", + "}" + ] + }, + { + "name": "getMcSystemdUnitsHugepagesConfig", + "qualifiedName": "getMcSystemdUnitsHugepagesConfig", + "exported": false, + "signature": "func(*provider.MachineConfig)(hugepagesByNuma, error)", + "doc": "getMcSystemdUnitsHugepagesConfig extracts hugepage configuration from machineconfig systemd units\n\nThis function scans the systemd unit files in a machine configuration for\nentries that define hugepage allocations. It parses each matching unit’s\ncontents to capture the number, size, and NUMA node of the hugepages,\norganizing them into a nested map keyed by node and page size. The resulting\nstructure is returned along with any parsing errors encountered.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:313", + "calls": [ + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Trim", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Trim", + "kind": "function" + }, + { + "name": "FindStringSubmatch", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "NewTester", + "kind": "function", + "source": [ + "func NewTester(node *provider.Node, probePod *corev1.Pod, commander clientsholder.Command) (*Tester, error) {", + "\ttester := \u0026Tester{", + "\t\tnode: node,", + "\t\tcommander: commander,", + "\t\tcontext: clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name),", + "\t}", + "", + "\tlog.Info(\"Getting node %s numa's hugepages values.\", node.Data.Name)", + "\tvar err error", + "\ttester.nodeHugepagesByNuma, err = tester.getNodeNumaHugePages()", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"unable to get node hugepages, err: %v\", err)", + "\t}", + "", + "\tlog.Info(\"Parsing machineconfig's kernelArguments and systemd's hugepages units.\")", + "\ttester.mcSystemdHugepagesByNuma, err = getMcSystemdUnitsHugepagesConfig(\u0026tester.node.Mc)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to get MC systemd hugepages config, err: %v\", err)", + "\t}", + "", + "\treturn tester, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getMcSystemdUnitsHugepagesConfig(mc *provider.MachineConfig) (hugepages hugepagesByNuma, err error) {", + "\tconst UnitContentsRegexMatchLen = 4", + "\thugepages = hugepagesByNuma{}", + "", + "\tr := regexp.MustCompile(`(?ms)HUGEPAGES_COUNT=(\\d+).*HUGEPAGES_SIZE=(\\d+).*NUMA_NODE=(\\d+)`)", + "\tfor _, unit := range mc.Config.Systemd.Units {", + "\t\tunit.Name = strings.Trim(unit.Name, \"\\\"\")", + "\t\tif !strings.Contains(unit.Name, \"hugepages-allocation\") {", + "\t\t\tcontinue", + "\t\t}", + "\t\tlog.Info(\"Systemd Unit with hugepages info -\u003e name: %s, contents: %s\", unit.Name, unit.Contents)", + "\t\tunit.Contents = strings.Trim(unit.Contents, \"\\\"\")", + "\t\tvalues := r.FindStringSubmatch(unit.Contents)", + "\t\tif len(values) \u003c UnitContentsRegexMatchLen {", + "\t\t\treturn hugepagesByNuma{}, fmt.Errorf(\"unable to get hugepages values from mc (contents=%s)\", unit.Contents)", + "\t\t}", + "", + "\t\tnumaNode, _ := strconv.Atoi(values[3])", + "\t\thpSize, _ := strconv.Atoi(values[2])", + "\t\thpCount, _ := strconv.Atoi(values[1])", + "", + "\t\tif sizeCounts, exists := hugepages[numaNode]; exists {", + "\t\t\tsizeCounts[hpSize] = hpCount", + "\t\t} else {", + "\t\t\thugepages[numaNode] = countBySize{hpSize: hpCount}", + "\t\t}", + "\t}", + "", + "\tif len(hugepages) \u003e 0 {", + "\t\tlog.Info(\"Machineconfig's systemd.units hugepages: %v\", hugepages)", + "\t} else {", + "\t\tlog.Info(\"No hugepages found in machineconfig system.units\")", + "\t}", + "", + "\treturn hugepages, nil", + "}" + ] + }, + { + "name": "hugepageSizeToInt", + "qualifiedName": "hugepageSizeToInt", + "exported": false, + "signature": "func(string)(int)", + "doc": "hugepageSizeToInt Converts a hugepage size string into an integer kilobyte value\n\nThis function takes a size string such as \"2M\" or \"1G\", extracts the numeric\nportion and multiplies it by 1024 for megabytes or 1024 squared for\ngigabytes. It returns the resulting value in kilobytes as an int, ignoring\nany errors from parsing. The conversion is used to translate kernel argument\nvalues into usable integer sizes within the program.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:84", + "calls": [ + { + "pkgPath": "strconv", + "name": "Atoi", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "getMcHugepagesFromMcKernelArguments", + "kind": "function", + "source": [ + "func getMcHugepagesFromMcKernelArguments(mc *provider.MachineConfig) (hugepagesPerSize map[int]int, defhugepagesz int) {", + "\tdefhugepagesz = RhelDefaultHugepagesz", + "\thugepagesPerSize = map[int]int{}", + "", + "\thugepagesz := 0", + "\tfor _, arg := range mc.Spec.KernelArguments {", + "\t\tkeyValueSlice := strings.Split(arg, \"=\")", + "\t\tif len(keyValueSlice) != KernArgsKeyValueSplitLen {", + "\t\t\t// Some kernel arguments do not come in name=value", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tkey, value := keyValueSlice[0], keyValueSlice[1]", + "\t\tif key == HugepagesParam \u0026\u0026 value != \"\" {", + "\t\t\thugepages, _ := strconv.Atoi(value)", + "\t\t\tif _, sizeFound := hugepagesPerSize[hugepagesz]; sizeFound {", + "\t\t\t\t// hugepagesz was parsed before.", + "\t\t\t\thugepagesPerSize[hugepagesz] = hugepages", + "\t\t\t} else {", + "\t\t\t\t// use RHEL's default size for this count.", + "\t\t\t\thugepagesPerSize[RhelDefaultHugepagesz] = hugepages", + "\t\t\t}", + "\t\t}", + "", + "\t\tif key == HugepageszParam \u0026\u0026 value != \"\" {", + "\t\t\thugepagesz = hugepageSizeToInt(value)", + "\t\t\t// Create new map entry for this size", + "\t\t\thugepagesPerSize[hugepagesz] = 0", + "\t\t}", + "", + "\t\tif key == DefaultHugepagesz \u0026\u0026 value != \"\" {", + "\t\t\tdefhugepagesz = hugepageSizeToInt(value)", + "\t\t\t// In case only default_hugepagesz and hugepages values are provided. The actual value should be", + "\t\t\t// parsed next and this default value overwritten.", + "\t\t\thugepagesPerSize[defhugepagesz] = RhelDefaultHugepages", + "\t\t\thugepagesz = defhugepagesz", + "\t\t}", + "\t}", + "", + "\tif len(hugepagesPerSize) == 0 {", + "\t\thugepagesPerSize[RhelDefaultHugepagesz] = RhelDefaultHugepages", + "\t\tlog.Warn(\"No hugepages size found in node's machineconfig. Defaulting to size=%dkB (count=%d)\", RhelDefaultHugepagesz, RhelDefaultHugepages)", + "\t}", + "", + "\tlogMcKernelArgumentsHugepages(hugepagesPerSize, defhugepagesz)", + "\treturn hugepagesPerSize, defhugepagesz", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func hugepageSizeToInt(s string) int {", + "\tnum, _ := strconv.Atoi(s[:len(s)-1])", + "\tunit := s[len(s)-1]", + "\tswitch unit {", + "\tcase 'M':", + "\t\tnum *= 1024", + "\tcase 'G':", + "\t\tnum *= 1024 * 1024", + "\t}", + "", + "\treturn num", + "}" + ] + }, + { + "name": "String", + "qualifiedName": "hugepagesByNuma.String", + "exported": true, + "receiver": "hugepagesByNuma", + "signature": "func()(string)", + "doc": "hugepagesByNuma.String Produces a formatted string of NUMA node hugepage allocations\n\nIt orders the NUMA indices, then for each index lists all page sizes with\ntheir counts in a human‑readable format. The resulting string contains\nentries like \"Numa=0 [Size=2048kB Count=4]\" and is returned for debugging or\nlogging purposes.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:41", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Ints", + "kind": "function" + }, + { + "name": "WriteString", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "WriteString", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "hugepagesByNuma.String", + "kind": "function", + "source": [ + "func (numaHps hugepagesByNuma) String() string {", + "\t// Order numa ids/indexes", + "\tnumaIndexes := []int{}", + "", + "\tfor numaIdx := range numaHps {", + "\t\tnumaIndexes = append(numaIndexes, numaIdx)", + "\t}", + "\tsort.Ints(numaIndexes)", + "", + "\tvar sb strings.Builder", + "\tfor _, numaIdx := range numaIndexes {", + "\t\tsizeCounts := numaHps[numaIdx]", + "\t\tsb.WriteString(fmt.Sprintf(\"Numa=%d \", numaIdx))", + "\t\tfor size, count := range sizeCounts {", + "\t\t\tsb.WriteString(fmt.Sprintf(\"[Size=%dkB Count=%d] \", size, count))", + "\t\t}", + "\t}", + "\treturn sb.String()", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "hugepagesByNuma.String", + "kind": "function", + "source": [ + "func (numaHps hugepagesByNuma) String() string {", + "\t// Order numa ids/indexes", + "\tnumaIndexes := []int{}", + "", + "\tfor numaIdx := range numaHps {", + "\t\tnumaIndexes = append(numaIndexes, numaIdx)", + "\t}", + "\tsort.Ints(numaIndexes)", + "", + "\tvar sb strings.Builder", + "\tfor _, numaIdx := range numaIndexes {", + "\t\tsizeCounts := numaHps[numaIdx]", + "\t\tsb.WriteString(fmt.Sprintf(\"Numa=%d \", numaIdx))", + "\t\tfor size, count := range sizeCounts {", + "\t\t\tsb.WriteString(fmt.Sprintf(\"[Size=%dkB Count=%d] \", size, count))", + "\t\t}", + "\t}", + "\treturn sb.String()", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "logMcKernelArgumentsHugepages", + "kind": "function", + "source": [ + "func logMcKernelArgumentsHugepages(hugepagesPerSize map[int]int, defhugepagesz int) {", + "\tvar sb strings.Builder", + "\tsb.WriteString(fmt.Sprintf(\"MC KernelArguments hugepages config: default_hugepagesz=%d-kB\", defhugepagesz))", + "\tfor size, count := range hugepagesPerSize {", + "\t\tsb.WriteString(fmt.Sprintf(\", size=%dkB - count=%d\", size, count))", + "\t}", + "\tlog.Info(\"%s\", sb.String())", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (numaHps hugepagesByNuma) String() string {", + "\t// Order numa ids/indexes", + "\tnumaIndexes := []int{}", + "", + "\tfor numaIdx := range numaHps {", + "\t\tnumaIndexes = append(numaIndexes, numaIdx)", + "\t}", + "\tsort.Ints(numaIndexes)", + "", + "\tvar sb strings.Builder", + "\tfor _, numaIdx := range numaIndexes {", + "\t\tsizeCounts := numaHps[numaIdx]", + "\t\tsb.WriteString(fmt.Sprintf(\"Numa=%d \", numaIdx))", + "\t\tfor size, count := range sizeCounts {", + "\t\t\tsb.WriteString(fmt.Sprintf(\"[Size=%dkB Count=%d] \", size, count))", + "\t\t}", + "\t}", + "\treturn sb.String()", + "}" + ] + }, + { + "name": "logMcKernelArgumentsHugepages", + "qualifiedName": "logMcKernelArgumentsHugepages", + "exported": false, + "signature": "func(map[int]int, int)()", + "doc": "logMcKernelArgumentsHugepages Logs the hugepage configuration extracted from machine‑config kernel arguments\n\nThis function builds a human‑readable string that includes the default\nhugepage size and each configured size with its count. It then sends this\nmessage to the package logger at info level, providing visibility into how\nmany hugepages of each size were requested by the node’s machine\nconfiguration.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:357", + "calls": [ + { + "name": "WriteString", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "WriteString", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "hugepagesByNuma.String", + "kind": "function", + "source": [ + "func (numaHps hugepagesByNuma) String() string {", + "\t// Order numa ids/indexes", + "\tnumaIndexes := []int{}", + "", + "\tfor numaIdx := range numaHps {", + "\t\tnumaIndexes = append(numaIndexes, numaIdx)", + "\t}", + "\tsort.Ints(numaIndexes)", + "", + "\tvar sb strings.Builder", + "\tfor _, numaIdx := range numaIndexes {", + "\t\tsizeCounts := numaHps[numaIdx]", + "\t\tsb.WriteString(fmt.Sprintf(\"Numa=%d \", numaIdx))", + "\t\tfor size, count := range sizeCounts {", + "\t\t\tsb.WriteString(fmt.Sprintf(\"[Size=%dkB Count=%d] \", size, count))", + "\t\t}", + "\t}", + "\treturn sb.String()", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages", + "name": "getMcHugepagesFromMcKernelArguments", + "kind": "function", + "source": [ + "func getMcHugepagesFromMcKernelArguments(mc *provider.MachineConfig) (hugepagesPerSize map[int]int, defhugepagesz int) {", + "\tdefhugepagesz = RhelDefaultHugepagesz", + "\thugepagesPerSize = map[int]int{}", + "", + "\thugepagesz := 0", + "\tfor _, arg := range mc.Spec.KernelArguments {", + "\t\tkeyValueSlice := strings.Split(arg, \"=\")", + "\t\tif len(keyValueSlice) != KernArgsKeyValueSplitLen {", + "\t\t\t// Some kernel arguments do not come in name=value", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tkey, value := keyValueSlice[0], keyValueSlice[1]", + "\t\tif key == HugepagesParam \u0026\u0026 value != \"\" {", + "\t\t\thugepages, _ := strconv.Atoi(value)", + "\t\t\tif _, sizeFound := hugepagesPerSize[hugepagesz]; sizeFound {", + "\t\t\t\t// hugepagesz was parsed before.", + "\t\t\t\thugepagesPerSize[hugepagesz] = hugepages", + "\t\t\t} else {", + "\t\t\t\t// use RHEL's default size for this count.", + "\t\t\t\thugepagesPerSize[RhelDefaultHugepagesz] = hugepages", + "\t\t\t}", + "\t\t}", + "", + "\t\tif key == HugepageszParam \u0026\u0026 value != \"\" {", + "\t\t\thugepagesz = hugepageSizeToInt(value)", + "\t\t\t// Create new map entry for this size", + "\t\t\thugepagesPerSize[hugepagesz] = 0", + "\t\t}", + "", + "\t\tif key == DefaultHugepagesz \u0026\u0026 value != \"\" {", + "\t\t\tdefhugepagesz = hugepageSizeToInt(value)", + "\t\t\t// In case only default_hugepagesz and hugepages values are provided. The actual value should be", + "\t\t\t// parsed next and this default value overwritten.", + "\t\t\thugepagesPerSize[defhugepagesz] = RhelDefaultHugepages", + "\t\t\thugepagesz = defhugepagesz", + "\t\t}", + "\t}", + "", + "\tif len(hugepagesPerSize) == 0 {", + "\t\thugepagesPerSize[RhelDefaultHugepagesz] = RhelDefaultHugepages", + "\t\tlog.Warn(\"No hugepages size found in node's machineconfig. Defaulting to size=%dkB (count=%d)\", RhelDefaultHugepagesz, RhelDefaultHugepages)", + "\t}", + "", + "\tlogMcKernelArgumentsHugepages(hugepagesPerSize, defhugepagesz)", + "\treturn hugepagesPerSize, defhugepagesz", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func logMcKernelArgumentsHugepages(hugepagesPerSize map[int]int, defhugepagesz int) {", + "\tvar sb strings.Builder", + "\tsb.WriteString(fmt.Sprintf(\"MC KernelArguments hugepages config: default_hugepagesz=%d-kB\", defhugepagesz))", + "\tfor size, count := range hugepagesPerSize {", + "\t\tsb.WriteString(fmt.Sprintf(\", size=%dkB - count=%d\", size, count))", + "\t}", + "\tlog.Info(\"%s\", sb.String())", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "DefaultHugepagesz", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:22" + }, + { + "name": "HugepagesParam", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:20" + }, + { + "name": "HugepageszParam", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:21" + }, + { + "name": "KernArgsKeyValueSplitLen", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:23" + }, + { + "name": "RhelDefaultHugepages", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:19" + }, + { + "name": "RhelDefaultHugepagesz", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:18" + }, + { + "name": "cmd", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:24" + }, + { + "name": "numRegexFields", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:26" + }, + { + "name": "outputRegex", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/tests/platform/hugepages/hugepages.go:25" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/isredhat", + "name": "isredhat", + "files": 1, + "imports": [ + "errors", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "regexp" + ], + "structs": [ + { + "name": "BaseImageInfo", + "exported": true, + "doc": "BaseImageInfo provides utilities for inspecting a container’s base image\n\nThe struct holds a command executor and context, enabling it to run commands\ninside a container. It offers methods such as TestContainerIsRedHatRelease,\nwhich checks the presence of /etc/redhat-release to determine if the image is\nRHEL-based, returning a boolean and error. The helper runCommand executes\narbitrary shell commands via the client holder, handling errors and capturing\noutput.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/isredhat/isredhat.go:42", + "fields": { + "ClientHolder": "clientsholder.Command", + "OCPContext": "clientsholder.Context" + }, + "methodNames": [ + "TestContainerIsRedHatRelease", + "runCommand" + ], + "source": [ + "type BaseImageInfo struct {", + "\tClientHolder clientsholder.Command", + "\tOCPContext clientsholder.Context", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "TestContainerIsRedHatRelease", + "qualifiedName": "BaseImageInfo.TestContainerIsRedHatRelease", + "exported": true, + "receiver": "BaseImageInfo", + "signature": "func()(bool, error)", + "doc": "BaseImageInfo.TestContainerIsRedHatRelease Checks if the container image is a Red Hat release\n\nThe method runs a shell command inside the container to read\n/etc/redhat-release or report an unknown base image, logs the output, and\nthen uses IsRHEL to determine whether the image matches known Red Hat\npatterns. It returns true when the image is confirmed as a Red Hat release,\notherwise false, along with any execution error that occurs.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/isredhat/isredhat.go:67", + "calls": [ + { + "name": "BaseImageInfo.runCommand", + "kind": "function", + "source": [ + "func (b *BaseImageInfo) runCommand(cmd string) (string, error) {", + "\toutput, outerr, err := b.ClientHolder.ExecCommandContainer(b.OCPContext, cmd)", + "\tif err != nil {", + "\t\tlog.Error(\"can not execute command on container, err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tif outerr != \"\" {", + "\t\tlog.Error(\"Error when running baseimage command, err: %v\", outerr)", + "\t\treturn \"\", errors.New(outerr)", + "\t}", + "\treturn output, nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "IsRHEL", + "kind": "function", + "source": [ + "func IsRHEL(output string) bool {", + "\t// If the 'Unknown Base Image' string appears, return false.", + "\tnotRedHatRegex := regexp.MustCompile(NotRedHatBasedRegex)", + "\tmatchNotRedhat := notRedHatRegex.FindAllString(output, -1)", + "\tif len(matchNotRedhat) \u003e 0 {", + "\t\treturn false", + "\t}", + "", + "\t// /etc/redhat-release exists. check if it matches the regex for an official build.", + "\tlog.Info(\"redhat-release was found to be: %s\", output)", + "\tredHatVersionRegex := regexp.MustCompile(VersionRegex)", + "\tmatchVersion := redHatVersionRegex.FindAllString(output, -1)", + "\treturn len(matchVersion) \u003e 0", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (b *BaseImageInfo) TestContainerIsRedHatRelease() (bool, error) {", + "\toutput, err := b.runCommand(`if [ -e /etc/redhat-release ]; then cat /etc/redhat-release; else echo \\\"Unknown Base Image\\\"; fi`)", + "\tlog.Info(\"Output from /etc/redhat-release: %q\", output)", + "\tif err != nil {", + "\t\treturn false, err", + "\t}", + "\treturn IsRHEL(output), nil", + "}" + ] + }, + { + "name": "runCommand", + "qualifiedName": "BaseImageInfo.runCommand", + "exported": false, + "receiver": "BaseImageInfo", + "signature": "func(string)(string, error)", + "doc": "BaseImageInfo.runCommand Executes a shell command inside a container\n\nThe method runs the supplied command in the container using the client\nholder, capturing both standard output and error streams. If execution fails\nor an error string is returned, it logs the issue and propagates an error to\nthe caller. On success, it returns the command's output as a string.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/isredhat/isredhat.go:104", + "calls": [ + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "errors", + "name": "New", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/isredhat", + "name": "BaseImageInfo.TestContainerIsRedHatRelease", + "kind": "function", + "source": [ + "func (b *BaseImageInfo) TestContainerIsRedHatRelease() (bool, error) {", + "\toutput, err := b.runCommand(`if [ -e /etc/redhat-release ]; then cat /etc/redhat-release; else echo \\\"Unknown Base Image\\\"; fi`)", + "\tlog.Info(\"Output from /etc/redhat-release: %q\", output)", + "\tif err != nil {", + "\t\treturn false, err", + "\t}", + "\treturn IsRHEL(output), nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (b *BaseImageInfo) runCommand(cmd string) (string, error) {", + "\toutput, outerr, err := b.ClientHolder.ExecCommandContainer(b.OCPContext, cmd)", + "\tif err != nil {", + "\t\tlog.Error(\"can not execute command on container, err: %v\", err)", + "\t\treturn \"\", err", + "\t}", + "\tif outerr != \"\" {", + "\t\tlog.Error(\"Error when running baseimage command, err: %v\", outerr)", + "\t\treturn \"\", errors.New(outerr)", + "\t}", + "\treturn output, nil", + "}" + ] + }, + { + "name": "IsRHEL", + "qualifiedName": "IsRHEL", + "exported": true, + "signature": "func(string)(bool)", + "doc": "IsRHEL determines whether the provided string signifies a Red Hat based release\n\nThe function examines the supplied text for patterns that indicate a\nnon‑Red Hat base image and immediately returns false if such patterns are\nfound. If no negative matches occur, it logs the content of\n/etc/redhat-release and checks against a regular expression describing\nofficial Red Hat releases, returning true when a match is detected.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/isredhat/isredhat.go:83", + "calls": [ + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "name": "FindAllString", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "name": "FindAllString", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/isredhat", + "name": "BaseImageInfo.TestContainerIsRedHatRelease", + "kind": "function", + "source": [ + "func (b *BaseImageInfo) TestContainerIsRedHatRelease() (bool, error) {", + "\toutput, err := b.runCommand(`if [ -e /etc/redhat-release ]; then cat /etc/redhat-release; else echo \\\"Unknown Base Image\\\"; fi`)", + "\tlog.Info(\"Output from /etc/redhat-release: %q\", output)", + "\tif err != nil {", + "\t\treturn false, err", + "\t}", + "\treturn IsRHEL(output), nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func IsRHEL(output string) bool {", + "\t// If the 'Unknown Base Image' string appears, return false.", + "\tnotRedHatRegex := regexp.MustCompile(NotRedHatBasedRegex)", + "\tmatchNotRedhat := notRedHatRegex.FindAllString(output, -1)", + "\tif len(matchNotRedhat) \u003e 0 {", + "\t\treturn false", + "\t}", + "", + "\t// /etc/redhat-release exists. check if it matches the regex for an official build.", + "\tlog.Info(\"redhat-release was found to be: %s\", output)", + "\tredHatVersionRegex := regexp.MustCompile(VersionRegex)", + "\tmatchVersion := redHatVersionRegex.FindAllString(output, -1)", + "\treturn len(matchVersion) \u003e 0", + "}" + ] + }, + { + "name": "NewBaseImageTester", + "qualifiedName": "NewBaseImageTester", + "exported": true, + "signature": "func(clientsholder.Command, clientsholder.Context)(*BaseImageInfo)", + "doc": "NewBaseImageTester Creates a new instance of the base image tester\n\nThe function accepts a client holder and a contextual object representing a\nKubernetes pod or container. It constructs and returns a pointer to a struct\nthat stores these inputs for subsequent checks on the container's base image.\nNo additional processing occurs during construction.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/isredhat/isredhat.go:53", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testIsRedHatRelease", + "kind": "function", + "source": [ + "func testIsRedHatRelease(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tbaseImageTester := isredhat.NewBaseImageTester(clientsholder.GetClientsHolder(), clientsholder.NewContext(cut.Namespace, cut.Podname, cut.Name))", + "", + "\t\tresult, err := baseImageTester.TestContainerIsRedHatRelease()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not collect release information from Container %q, err=%v\", cut, err)", + "\t\t}", + "\t\tif !result {", + "\t\t\tcheck.LogError(\"Container %q has failed the RHEL release check\", cut)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Failed the RHEL release check\", false))", + "\t\t} else {", + "\t\t\tcheck.LogInfo(\"Container %q has passed the RHEL release check\", cut)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Passed the RHEL release check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewBaseImageTester(client clientsholder.Command, ctx clientsholder.Context) *BaseImageInfo {", + "\treturn \u0026BaseImageInfo{", + "\t\tClientHolder: client,", + "\t\tOCPContext: ctx,", + "\t}", + "}" + ] + } + ], + "globals": null, + "consts": [ + { + "name": "NotRedHatBasedRegex", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/platform/isredhat/isredhat.go:29" + }, + { + "name": "VersionRegex", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/platform/isredhat/isredhat.go:31" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "nodetainted", + "files": 1, + "imports": [ + "errors", + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "regexp", + "strconv", + "strings" + ], + "structs": [ + { + "name": "KernelTaint", + "exported": true, + "doc": "KernelTaint Represents an individual kernel taint\n\nThis structure holds the human-readable description of a taint as well as its\nidentifying letters used by the kernel to mark nodes. The Description field\nexplains why the taint exists, while Letters contains the short string that\nis applied to node metadata. Instances are typically collected and examined\nwhen evaluating node health or scheduling constraints.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:102", + "fields": { + "Description": "string", + "Letters": "string" + }, + "methodNames": null, + "source": [ + "type KernelTaint struct {", + "\tDescription string", + "\tLetters string", + "}" + ] + }, + { + "name": "NodeTainted", + "exported": true, + "doc": "NodeTainted provides access to kernel taint information for a node\n\nIt holds the context and node name used to query system files and run shell\ncommands that expose kernel taints. The struct offers methods to retrieve the\nnumeric taint mask, list modules that set taints, and parse those module\ntaints from /sys/module. These functions enable inspection of tainted states\non a target node.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:37", + "fields": { + "ctx": "*clientsholder.Context", + "node": "string" + }, + "methodNames": [ + "GetKernelTaintsMask", + "GetTainterModules", + "getAllTainterModules" + ], + "source": [ + "type NodeTainted struct {", + "\tctx *clientsholder.Context", + "\tnode string", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "DecodeKernelTaintsFromBitMask", + "qualifiedName": "DecodeKernelTaintsFromBitMask", + "exported": true, + "signature": "func(uint64)([]string)", + "doc": "DecodeKernelTaintsFromBitMask Converts a bitmask into human‑readable kernel taint messages\n\nThe function iterates over all 64 bits of the supplied unsigned integer,\nchecking each bit for a set value. For every bit that is on, it calls a\nhelper to retrieve a descriptive message and appends that string to a slice.\nThe resulting list of strings represents the active kernel taints\ncorresponding to the original mask.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:159", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "name": "GetTaintMsg", + "kind": "function", + "source": [ + "func GetTaintMsg(bit int) string {", + "\tif taintMsg, exists := kernelTaints[bit]; exists {", + "\t\treturn fmt.Sprintf(\"%s (tainted bit %d)\", taintMsg.Description, bit)", + "\t}", + "", + "\treturn fmt.Sprintf(\"reserved (tainted bit %d)\", bit)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testTainted", + "kind": "function", + "source": [ + "func testTainted(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// errNodes has nodes that failed some operation while checking kernel taints.", + "\terrNodes := []string{}", + "", + "\ttype badModuleTaints struct {", + "\t\tname string", + "\t\ttaints []string", + "\t}", + "", + "\t// badModules maps node names to list of \"bad\"/offending modules.", + "\tbadModules := map[string][]badModuleTaints{}", + "\t// otherTaints maps a node to a list of taint bits that haven't been set by any module.", + "\totherTaints := map[string][]int{}", + "", + "\tcheck.LogInfo(\"Modules allowlist: %+v\", env.Config.AcceptedKernelTaints)", + "\t// helper map to make the checks easier.", + "\tallowListedModules := map[string]bool{}", + "\tfor _, module := range env.Config.AcceptedKernelTaints {", + "\t\tallowListedModules[module.Module] = true", + "\t}", + "", + "\t// Loop through the probe pods that are tied to each node.", + "\tfor _, n := range env.Nodes {", + "\t\tnodeName := n.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "", + "\t\t// Ensure we are only testing nodes that have CNF workload deployed on them.", + "\t\tif !n.HasWorkloadDeployed(env.Pods) {", + "\t\t\tcheck.LogInfo(\"Node %q has no workload deployed on it. Skipping tainted kernel check.\", nodeName)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tdp := env.ProbePods[nodeName]", + "", + "\t\tocpContext := clientsholder.NewContext(dp.Namespace, dp.Name, dp.Spec.Containers[0].Name)", + "\t\ttf := nodetainted.NewNodeTaintedTester(\u0026ocpContext, nodeName)", + "", + "\t\t// Get the taints mask from the node kernel", + "\t\ttaintsMask, err := tf.GetKernelTaintsMask()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to retrieve kernel taint information from node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to retrieve kernel taint information from node\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif taintsMask == 0 {", + "\t\t\tcheck.LogInfo(\"Node %q has no non-approved kernel taints.\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has no non-approved kernel taints\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Node %q kernel is tainted. Taints mask=%d - Decoded taints: %v\",", + "\t\t\tnodeName, taintsMask, nodetainted.DecodeKernelTaintsFromBitMask(taintsMask))", + "", + "\t\t// Check the allow list. If empty, mark this node as failed.", + "\t\tif len(allowListedModules) == 0 {", + "\t\t\ttaintsMaskStr := strconv.FormatUint(taintsMask, 10)", + "\t\t\ttaintsStr := strings.Join(nodetainted.DecodeKernelTaintsFromBitMask(taintsMask), \",\")", + "\t\t\tcheck.LogError(\"Node %q contains taints not covered by module allowlist. Taints: %q (mask=%q)\", nodeName, taintsStr, taintsMaskStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node contains taints not covered by module allowlist\", false).", + "\t\t\t\tAddField(testhelper.TaintMask, taintsMaskStr).", + "\t\t\t\tAddField(testhelper.Taints, taintsStr))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// allow list check.", + "\t\t// Get the list of modules (tainters) that have set a taint bit.", + "\t\t// 1. Each module should appear in the allow list.", + "\t\t// 2. All kernel taint bits (one bit \u003c-\u003e one letter) should have been set by at least", + "\t\t// one tainter module.", + "\t\ttainters, taintBitsByAllModules, err := tf.GetTainterModules(allowListedModules)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get tainter modules from node %q, err: %v\", nodeName, err)", + "\t\t\terrNodes = append(errNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to get tainter modules\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Keep track of whether or not this node is compliant with module allow list.", + "\t\tcompliantNode := true", + "", + "\t\t// Save modules' names only.", + "\t\tfor moduleName, taintsLetters := range tainters {", + "\t\t\tmoduleTaints := nodetainted.DecodeKernelTaintsFromLetters(taintsLetters)", + "\t\t\tbadModules[nodeName] = append(badModules[nodeName], badModuleTaints{name: moduleName, taints: moduleTaints})", + "", + "\t\t\t// Create non-compliant taint objects for each of the taints", + "\t\t\tfor _, taint := range moduleTaints {", + "\t\t\t\tcheck.LogError(\"Node %q - module %q taints kernel: %q\", nodeName, moduleName, taint)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(nodetainted.RemoveAllExceptNumbers(taint), nodeName, taint, false).AddField(testhelper.ModuleName, moduleName))", + "", + "\t\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\t\tcompliantNode = false", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Lastly, check that all kernel taint bits come from modules.", + "\t\totherKernelTaints := nodetainted.GetOtherTaintedBits(taintsMask, taintBitsByAllModules)", + "\t\tfor _, taintedBit := range otherKernelTaints {", + "\t\t\tcheck.LogError(\"Node %q - taint bit %d is set but it is not caused by any module.\", nodeName, taintedBit)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(strconv.Itoa(taintedBit), nodeName, nodetainted.GetTaintMsg(taintedBit), false).", + "\t\t\t\tAddField(testhelper.ModuleName, \"N/A\"))", + "\t\t\totherTaints[nodeName] = append(otherTaints[nodeName], taintedBit)", + "", + "\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\tcompliantNode = false", + "\t\t}", + "", + "\t\tif compliantNode {", + "\t\t\tcheck.LogInfo(\"Node %q passed the tainted kernel check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the tainted kernel check\", true))", + "\t\t}", + "\t}", + "", + "\tif len(errNodes) \u003e 0 {", + "\t\tcheck.LogError(\"Failed to get kernel taints from some nodes: %+v\", errNodes)", + "\t}", + "", + "\tif len(badModules) \u003e 0 || len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Nodes have been found to be tainted. Tainted modules: %+v\", badModules)", + "\t}", + "", + "\tif len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Taints not related to any module: %+v\", otherTaints)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func DecodeKernelTaintsFromBitMask(bitmask uint64) []string {", + "\ttaints := []string{}", + "\tfor i := 0; i \u003c 64; i++ {", + "\t\tbit := (bitmask \u003e\u003e i) \u0026 1", + "\t\tif bit == 1 {", + "\t\t\ttaints = append(taints, GetTaintMsg(i))", + "\t\t}", + "\t}", + "\treturn taints", + "}" + ] + }, + { + "name": "DecodeKernelTaintsFromLetters", + "qualifiedName": "DecodeKernelTaintsFromLetters", + "exported": true, + "signature": "func(string)([]string)", + "doc": "DecodeKernelTaintsFromLetters Converts a string of taint letters into descriptive taint strings\n\nThis routine iterates over each character in the input, matching it against a\npredefined list of kernel taints. For matched letters it builds a\nhuman‑readable description that includes the taint’s name, the letter\nused, and its bit index. If a letter is unknown it records an \"unknown taint\"\nentry. The resulting slice contains one entry per letter.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:192", + "calls": [ + { + "name": "string", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testTainted", + "kind": "function", + "source": [ + "func testTainted(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// errNodes has nodes that failed some operation while checking kernel taints.", + "\terrNodes := []string{}", + "", + "\ttype badModuleTaints struct {", + "\t\tname string", + "\t\ttaints []string", + "\t}", + "", + "\t// badModules maps node names to list of \"bad\"/offending modules.", + "\tbadModules := map[string][]badModuleTaints{}", + "\t// otherTaints maps a node to a list of taint bits that haven't been set by any module.", + "\totherTaints := map[string][]int{}", + "", + "\tcheck.LogInfo(\"Modules allowlist: %+v\", env.Config.AcceptedKernelTaints)", + "\t// helper map to make the checks easier.", + "\tallowListedModules := map[string]bool{}", + "\tfor _, module := range env.Config.AcceptedKernelTaints {", + "\t\tallowListedModules[module.Module] = true", + "\t}", + "", + "\t// Loop through the probe pods that are tied to each node.", + "\tfor _, n := range env.Nodes {", + "\t\tnodeName := n.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "", + "\t\t// Ensure we are only testing nodes that have CNF workload deployed on them.", + "\t\tif !n.HasWorkloadDeployed(env.Pods) {", + "\t\t\tcheck.LogInfo(\"Node %q has no workload deployed on it. Skipping tainted kernel check.\", nodeName)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tdp := env.ProbePods[nodeName]", + "", + "\t\tocpContext := clientsholder.NewContext(dp.Namespace, dp.Name, dp.Spec.Containers[0].Name)", + "\t\ttf := nodetainted.NewNodeTaintedTester(\u0026ocpContext, nodeName)", + "", + "\t\t// Get the taints mask from the node kernel", + "\t\ttaintsMask, err := tf.GetKernelTaintsMask()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to retrieve kernel taint information from node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to retrieve kernel taint information from node\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif taintsMask == 0 {", + "\t\t\tcheck.LogInfo(\"Node %q has no non-approved kernel taints.\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has no non-approved kernel taints\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Node %q kernel is tainted. Taints mask=%d - Decoded taints: %v\",", + "\t\t\tnodeName, taintsMask, nodetainted.DecodeKernelTaintsFromBitMask(taintsMask))", + "", + "\t\t// Check the allow list. If empty, mark this node as failed.", + "\t\tif len(allowListedModules) == 0 {", + "\t\t\ttaintsMaskStr := strconv.FormatUint(taintsMask, 10)", + "\t\t\ttaintsStr := strings.Join(nodetainted.DecodeKernelTaintsFromBitMask(taintsMask), \",\")", + "\t\t\tcheck.LogError(\"Node %q contains taints not covered by module allowlist. Taints: %q (mask=%q)\", nodeName, taintsStr, taintsMaskStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node contains taints not covered by module allowlist\", false).", + "\t\t\t\tAddField(testhelper.TaintMask, taintsMaskStr).", + "\t\t\t\tAddField(testhelper.Taints, taintsStr))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// allow list check.", + "\t\t// Get the list of modules (tainters) that have set a taint bit.", + "\t\t// 1. Each module should appear in the allow list.", + "\t\t// 2. All kernel taint bits (one bit \u003c-\u003e one letter) should have been set by at least", + "\t\t// one tainter module.", + "\t\ttainters, taintBitsByAllModules, err := tf.GetTainterModules(allowListedModules)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get tainter modules from node %q, err: %v\", nodeName, err)", + "\t\t\terrNodes = append(errNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to get tainter modules\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Keep track of whether or not this node is compliant with module allow list.", + "\t\tcompliantNode := true", + "", + "\t\t// Save modules' names only.", + "\t\tfor moduleName, taintsLetters := range tainters {", + "\t\t\tmoduleTaints := nodetainted.DecodeKernelTaintsFromLetters(taintsLetters)", + "\t\t\tbadModules[nodeName] = append(badModules[nodeName], badModuleTaints{name: moduleName, taints: moduleTaints})", + "", + "\t\t\t// Create non-compliant taint objects for each of the taints", + "\t\t\tfor _, taint := range moduleTaints {", + "\t\t\t\tcheck.LogError(\"Node %q - module %q taints kernel: %q\", nodeName, moduleName, taint)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(nodetainted.RemoveAllExceptNumbers(taint), nodeName, taint, false).AddField(testhelper.ModuleName, moduleName))", + "", + "\t\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\t\tcompliantNode = false", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Lastly, check that all kernel taint bits come from modules.", + "\t\totherKernelTaints := nodetainted.GetOtherTaintedBits(taintsMask, taintBitsByAllModules)", + "\t\tfor _, taintedBit := range otherKernelTaints {", + "\t\t\tcheck.LogError(\"Node %q - taint bit %d is set but it is not caused by any module.\", nodeName, taintedBit)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(strconv.Itoa(taintedBit), nodeName, nodetainted.GetTaintMsg(taintedBit), false).", + "\t\t\t\tAddField(testhelper.ModuleName, \"N/A\"))", + "\t\t\totherTaints[nodeName] = append(otherTaints[nodeName], taintedBit)", + "", + "\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\tcompliantNode = false", + "\t\t}", + "", + "\t\tif compliantNode {", + "\t\t\tcheck.LogInfo(\"Node %q passed the tainted kernel check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the tainted kernel check\", true))", + "\t\t}", + "\t}", + "", + "\tif len(errNodes) \u003e 0 {", + "\t\tcheck.LogError(\"Failed to get kernel taints from some nodes: %+v\", errNodes)", + "\t}", + "", + "\tif len(badModules) \u003e 0 || len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Nodes have been found to be tainted. Tainted modules: %+v\", badModules)", + "\t}", + "", + "\tif len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Taints not related to any module: %+v\", otherTaints)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "NodeTainted.GetTainterModules", + "kind": "function", + "source": [ + "func (nt *NodeTainted) GetTainterModules(allowList map[string]bool) (tainters map[string]string, taintBits map[int]bool, err error) {", + "\t// First, get all the modules that are tainting the kernel in this node.", + "\tallTainters, err := nt.getAllTainterModules()", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"failed to get tainter modules: %w\", err)", + "\t}", + "", + "\tfilteredTainters := map[string]string{}", + "\tfor moduleName, moduleTaintsLetters := range allTainters {", + "\t\tmoduleTaints := DecodeKernelTaintsFromLetters(moduleTaintsLetters)", + "\t\tlog.Debug(\"%s: Module %s has taints (%s): %s\", nt.node, moduleName, moduleTaintsLetters, moduleTaints)", + "", + "\t\t// Apply allowlist.", + "\t\tif allowList[moduleName] {", + "\t\t\tlog.Debug(\"%s module %s is tainting the kernel but it has been allowlisted (taints: %v)\",", + "\t\t\t\tnt.node, moduleName, moduleTaints)", + "\t\t} else {", + "\t\t\tfilteredTainters[moduleName] = moduleTaintsLetters", + "\t\t}", + "\t}", + "", + "\t// Finally, get all the bits that all the modules have set.", + "\ttaintBits, err = GetTaintedBitsByModules(allTainters)", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"failed to get taint bits by modules: %w\", err)", + "\t}", + "", + "\treturn filteredTainters, taintBits, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func DecodeKernelTaintsFromLetters(letters string) []string {", + "\ttaints := []string{}", + "", + "\tfor _, l := range letters {", + "\t\ttaintLetter := string(l)", + "\t\tfound := false", + "", + "\t\tfor i := range kernelTaints {", + "\t\t\tkernelTaint := kernelTaints[i]", + "\t\t\tif strings.Contains(kernelTaint.Letters, taintLetter) {", + "\t\t\t\ttaints = append(taints, fmt.Sprintf(\"%s (taint letter:%s, bit:%d)\",", + "\t\t\t\t\tkernelTaint.Description, taintLetter, i))", + "\t\t\t\tfound = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\t// The letter does not belong to any known (yet) taint...", + "\t\tif !found {", + "\t\t\ttaints = append(taints, fmt.Sprintf(\"unknown taint (letter %s)\", taintLetter))", + "\t\t}", + "\t}", + "", + "\treturn taints", + "}" + ] + }, + { + "name": "GetOtherTaintedBits", + "qualifiedName": "GetOtherTaintedBits", + "exported": true, + "signature": "func(uint64, map[int]bool)([]int)", + "doc": "GetOtherTaintedBits Identifies kernel taint bits not associated with any module\n\nThe function examines a 64‑bit mask of currently set kernel taints and\ncompares each bit to a map that records which bits have been set by known\nmodules. It iterates over all possible bit positions, collecting those that\nare active in the mask but absent from the module record. The result is a\nslice of integers representing the indices of these orphaned taint bits.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:271", + "calls": [ + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testTainted", + "kind": "function", + "source": [ + "func testTainted(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// errNodes has nodes that failed some operation while checking kernel taints.", + "\terrNodes := []string{}", + "", + "\ttype badModuleTaints struct {", + "\t\tname string", + "\t\ttaints []string", + "\t}", + "", + "\t// badModules maps node names to list of \"bad\"/offending modules.", + "\tbadModules := map[string][]badModuleTaints{}", + "\t// otherTaints maps a node to a list of taint bits that haven't been set by any module.", + "\totherTaints := map[string][]int{}", + "", + "\tcheck.LogInfo(\"Modules allowlist: %+v\", env.Config.AcceptedKernelTaints)", + "\t// helper map to make the checks easier.", + "\tallowListedModules := map[string]bool{}", + "\tfor _, module := range env.Config.AcceptedKernelTaints {", + "\t\tallowListedModules[module.Module] = true", + "\t}", + "", + "\t// Loop through the probe pods that are tied to each node.", + "\tfor _, n := range env.Nodes {", + "\t\tnodeName := n.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "", + "\t\t// Ensure we are only testing nodes that have CNF workload deployed on them.", + "\t\tif !n.HasWorkloadDeployed(env.Pods) {", + "\t\t\tcheck.LogInfo(\"Node %q has no workload deployed on it. Skipping tainted kernel check.\", nodeName)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tdp := env.ProbePods[nodeName]", + "", + "\t\tocpContext := clientsholder.NewContext(dp.Namespace, dp.Name, dp.Spec.Containers[0].Name)", + "\t\ttf := nodetainted.NewNodeTaintedTester(\u0026ocpContext, nodeName)", + "", + "\t\t// Get the taints mask from the node kernel", + "\t\ttaintsMask, err := tf.GetKernelTaintsMask()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to retrieve kernel taint information from node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to retrieve kernel taint information from node\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif taintsMask == 0 {", + "\t\t\tcheck.LogInfo(\"Node %q has no non-approved kernel taints.\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has no non-approved kernel taints\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Node %q kernel is tainted. Taints mask=%d - Decoded taints: %v\",", + "\t\t\tnodeName, taintsMask, nodetainted.DecodeKernelTaintsFromBitMask(taintsMask))", + "", + "\t\t// Check the allow list. If empty, mark this node as failed.", + "\t\tif len(allowListedModules) == 0 {", + "\t\t\ttaintsMaskStr := strconv.FormatUint(taintsMask, 10)", + "\t\t\ttaintsStr := strings.Join(nodetainted.DecodeKernelTaintsFromBitMask(taintsMask), \",\")", + "\t\t\tcheck.LogError(\"Node %q contains taints not covered by module allowlist. Taints: %q (mask=%q)\", nodeName, taintsStr, taintsMaskStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node contains taints not covered by module allowlist\", false).", + "\t\t\t\tAddField(testhelper.TaintMask, taintsMaskStr).", + "\t\t\t\tAddField(testhelper.Taints, taintsStr))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// allow list check.", + "\t\t// Get the list of modules (tainters) that have set a taint bit.", + "\t\t// 1. Each module should appear in the allow list.", + "\t\t// 2. All kernel taint bits (one bit \u003c-\u003e one letter) should have been set by at least", + "\t\t// one tainter module.", + "\t\ttainters, taintBitsByAllModules, err := tf.GetTainterModules(allowListedModules)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get tainter modules from node %q, err: %v\", nodeName, err)", + "\t\t\terrNodes = append(errNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to get tainter modules\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Keep track of whether or not this node is compliant with module allow list.", + "\t\tcompliantNode := true", + "", + "\t\t// Save modules' names only.", + "\t\tfor moduleName, taintsLetters := range tainters {", + "\t\t\tmoduleTaints := nodetainted.DecodeKernelTaintsFromLetters(taintsLetters)", + "\t\t\tbadModules[nodeName] = append(badModules[nodeName], badModuleTaints{name: moduleName, taints: moduleTaints})", + "", + "\t\t\t// Create non-compliant taint objects for each of the taints", + "\t\t\tfor _, taint := range moduleTaints {", + "\t\t\t\tcheck.LogError(\"Node %q - module %q taints kernel: %q\", nodeName, moduleName, taint)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(nodetainted.RemoveAllExceptNumbers(taint), nodeName, taint, false).AddField(testhelper.ModuleName, moduleName))", + "", + "\t\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\t\tcompliantNode = false", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Lastly, check that all kernel taint bits come from modules.", + "\t\totherKernelTaints := nodetainted.GetOtherTaintedBits(taintsMask, taintBitsByAllModules)", + "\t\tfor _, taintedBit := range otherKernelTaints {", + "\t\t\tcheck.LogError(\"Node %q - taint bit %d is set but it is not caused by any module.\", nodeName, taintedBit)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(strconv.Itoa(taintedBit), nodeName, nodetainted.GetTaintMsg(taintedBit), false).", + "\t\t\t\tAddField(testhelper.ModuleName, \"N/A\"))", + "\t\t\totherTaints[nodeName] = append(otherTaints[nodeName], taintedBit)", + "", + "\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\tcompliantNode = false", + "\t\t}", + "", + "\t\tif compliantNode {", + "\t\t\tcheck.LogInfo(\"Node %q passed the tainted kernel check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the tainted kernel check\", true))", + "\t\t}", + "\t}", + "", + "\tif len(errNodes) \u003e 0 {", + "\t\tcheck.LogError(\"Failed to get kernel taints from some nodes: %+v\", errNodes)", + "\t}", + "", + "\tif len(badModules) \u003e 0 || len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Nodes have been found to be tainted. Tainted modules: %+v\", badModules)", + "\t}", + "", + "\tif len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Taints not related to any module: %+v\", otherTaints)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetOtherTaintedBits(taintsMask uint64, taintedBitsByModules map[int]bool) []int {", + "\totherTaintedBits := []int{}", + "\t// Lastly, check that all kernel taint bits come from modules.", + "\tfor i := 0; i \u003c 64; i++ {", + "\t\t// helper var that is true if bit \"i\" is set.", + "\t\tbitIsSet := (taintsMask \u0026 (1 \u003c\u003c i)) \u003e 0", + "", + "\t\tif bitIsSet \u0026\u0026 !taintedBitsByModules[i] {", + "\t\t\totherTaintedBits = append(otherTaintedBits, i)", + "\t\t}", + "\t}", + "", + "\treturn otherTaintedBits", + "}" + ] + }, + { + "name": "GetTaintMsg", + "qualifiedName": "GetTaintMsg", + "exported": true, + "signature": "func(int)(string)", + "doc": "GetTaintMsg Retrieves a descriptive message for a kernel taint bit\n\nThis function looks up the given integer bit in a predefined map of known\nkernel taints. If found, it returns the taint's description along with the\nbit number; otherwise it indicates the bit is reserved. The output string is\nused to label taint information throughout the test suite.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:144", + "calls": [ + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testTainted", + "kind": "function", + "source": [ + "func testTainted(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// errNodes has nodes that failed some operation while checking kernel taints.", + "\terrNodes := []string{}", + "", + "\ttype badModuleTaints struct {", + "\t\tname string", + "\t\ttaints []string", + "\t}", + "", + "\t// badModules maps node names to list of \"bad\"/offending modules.", + "\tbadModules := map[string][]badModuleTaints{}", + "\t// otherTaints maps a node to a list of taint bits that haven't been set by any module.", + "\totherTaints := map[string][]int{}", + "", + "\tcheck.LogInfo(\"Modules allowlist: %+v\", env.Config.AcceptedKernelTaints)", + "\t// helper map to make the checks easier.", + "\tallowListedModules := map[string]bool{}", + "\tfor _, module := range env.Config.AcceptedKernelTaints {", + "\t\tallowListedModules[module.Module] = true", + "\t}", + "", + "\t// Loop through the probe pods that are tied to each node.", + "\tfor _, n := range env.Nodes {", + "\t\tnodeName := n.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "", + "\t\t// Ensure we are only testing nodes that have CNF workload deployed on them.", + "\t\tif !n.HasWorkloadDeployed(env.Pods) {", + "\t\t\tcheck.LogInfo(\"Node %q has no workload deployed on it. Skipping tainted kernel check.\", nodeName)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tdp := env.ProbePods[nodeName]", + "", + "\t\tocpContext := clientsholder.NewContext(dp.Namespace, dp.Name, dp.Spec.Containers[0].Name)", + "\t\ttf := nodetainted.NewNodeTaintedTester(\u0026ocpContext, nodeName)", + "", + "\t\t// Get the taints mask from the node kernel", + "\t\ttaintsMask, err := tf.GetKernelTaintsMask()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to retrieve kernel taint information from node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to retrieve kernel taint information from node\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif taintsMask == 0 {", + "\t\t\tcheck.LogInfo(\"Node %q has no non-approved kernel taints.\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has no non-approved kernel taints\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Node %q kernel is tainted. Taints mask=%d - Decoded taints: %v\",", + "\t\t\tnodeName, taintsMask, nodetainted.DecodeKernelTaintsFromBitMask(taintsMask))", + "", + "\t\t// Check the allow list. If empty, mark this node as failed.", + "\t\tif len(allowListedModules) == 0 {", + "\t\t\ttaintsMaskStr := strconv.FormatUint(taintsMask, 10)", + "\t\t\ttaintsStr := strings.Join(nodetainted.DecodeKernelTaintsFromBitMask(taintsMask), \",\")", + "\t\t\tcheck.LogError(\"Node %q contains taints not covered by module allowlist. Taints: %q (mask=%q)\", nodeName, taintsStr, taintsMaskStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node contains taints not covered by module allowlist\", false).", + "\t\t\t\tAddField(testhelper.TaintMask, taintsMaskStr).", + "\t\t\t\tAddField(testhelper.Taints, taintsStr))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// allow list check.", + "\t\t// Get the list of modules (tainters) that have set a taint bit.", + "\t\t// 1. Each module should appear in the allow list.", + "\t\t// 2. All kernel taint bits (one bit \u003c-\u003e one letter) should have been set by at least", + "\t\t// one tainter module.", + "\t\ttainters, taintBitsByAllModules, err := tf.GetTainterModules(allowListedModules)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get tainter modules from node %q, err: %v\", nodeName, err)", + "\t\t\terrNodes = append(errNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to get tainter modules\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Keep track of whether or not this node is compliant with module allow list.", + "\t\tcompliantNode := true", + "", + "\t\t// Save modules' names only.", + "\t\tfor moduleName, taintsLetters := range tainters {", + "\t\t\tmoduleTaints := nodetainted.DecodeKernelTaintsFromLetters(taintsLetters)", + "\t\t\tbadModules[nodeName] = append(badModules[nodeName], badModuleTaints{name: moduleName, taints: moduleTaints})", + "", + "\t\t\t// Create non-compliant taint objects for each of the taints", + "\t\t\tfor _, taint := range moduleTaints {", + "\t\t\t\tcheck.LogError(\"Node %q - module %q taints kernel: %q\", nodeName, moduleName, taint)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(nodetainted.RemoveAllExceptNumbers(taint), nodeName, taint, false).AddField(testhelper.ModuleName, moduleName))", + "", + "\t\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\t\tcompliantNode = false", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Lastly, check that all kernel taint bits come from modules.", + "\t\totherKernelTaints := nodetainted.GetOtherTaintedBits(taintsMask, taintBitsByAllModules)", + "\t\tfor _, taintedBit := range otherKernelTaints {", + "\t\t\tcheck.LogError(\"Node %q - taint bit %d is set but it is not caused by any module.\", nodeName, taintedBit)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(strconv.Itoa(taintedBit), nodeName, nodetainted.GetTaintMsg(taintedBit), false).", + "\t\t\t\tAddField(testhelper.ModuleName, \"N/A\"))", + "\t\t\totherTaints[nodeName] = append(otherTaints[nodeName], taintedBit)", + "", + "\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\tcompliantNode = false", + "\t\t}", + "", + "\t\tif compliantNode {", + "\t\t\tcheck.LogInfo(\"Node %q passed the tainted kernel check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the tainted kernel check\", true))", + "\t\t}", + "\t}", + "", + "\tif len(errNodes) \u003e 0 {", + "\t\tcheck.LogError(\"Failed to get kernel taints from some nodes: %+v\", errNodes)", + "\t}", + "", + "\tif len(badModules) \u003e 0 || len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Nodes have been found to be tainted. Tainted modules: %+v\", badModules)", + "\t}", + "", + "\tif len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Taints not related to any module: %+v\", otherTaints)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "DecodeKernelTaintsFromBitMask", + "kind": "function", + "source": [ + "func DecodeKernelTaintsFromBitMask(bitmask uint64) []string {", + "\ttaints := []string{}", + "\tfor i := 0; i \u003c 64; i++ {", + "\t\tbit := (bitmask \u003e\u003e i) \u0026 1", + "\t\tif bit == 1 {", + "\t\t\ttaints = append(taints, GetTaintMsg(i))", + "\t\t}", + "\t}", + "\treturn taints", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetTaintMsg(bit int) string {", + "\tif taintMsg, exists := kernelTaints[bit]; exists {", + "\t\treturn fmt.Sprintf(\"%s (tainted bit %d)\", taintMsg.Description, bit)", + "\t}", + "", + "\treturn fmt.Sprintf(\"reserved (tainted bit %d)\", bit)", + "}" + ] + }, + { + "name": "GetTaintedBitsByModules", + "qualifiedName": "GetTaintedBitsByModules", + "exported": true, + "signature": "func(map[string]string)(map[int]bool, error)", + "doc": "GetTaintedBitsByModules Collects kernel taint bits from module letters\n\nThis function receives a map of modules to their taint letter strings. It\niterates over each letter, converts it to the corresponding bit position\nusing a helper, and records that bit as true in a result map. Errors are\nreturned if any letter cannot be mapped to a known taint.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:245", + "calls": [ + { + "name": "string", + "kind": "function" + }, + { + "name": "getBitPosFromLetter", + "kind": "function", + "source": [ + "func getBitPosFromLetter(letter string) (int, error) {", + "\tif letter == \"\" || len(letter) \u003e 1 {", + "\t\treturn 0, fmt.Errorf(\"input string must contain one letter\")", + "\t}", + "", + "\tfor bit, taint := range kernelTaints {", + "\t\tif strings.Contains(taint.Letters, letter) {", + "\t\t\treturn bit, nil", + "\t\t}", + "\t}", + "", + "\treturn 0, fmt.Errorf(\"letter %s does not belong to any known kernel taint\", letter)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "NodeTainted.GetTainterModules", + "kind": "function", + "source": [ + "func (nt *NodeTainted) GetTainterModules(allowList map[string]bool) (tainters map[string]string, taintBits map[int]bool, err error) {", + "\t// First, get all the modules that are tainting the kernel in this node.", + "\tallTainters, err := nt.getAllTainterModules()", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"failed to get tainter modules: %w\", err)", + "\t}", + "", + "\tfilteredTainters := map[string]string{}", + "\tfor moduleName, moduleTaintsLetters := range allTainters {", + "\t\tmoduleTaints := DecodeKernelTaintsFromLetters(moduleTaintsLetters)", + "\t\tlog.Debug(\"%s: Module %s has taints (%s): %s\", nt.node, moduleName, moduleTaintsLetters, moduleTaints)", + "", + "\t\t// Apply allowlist.", + "\t\tif allowList[moduleName] {", + "\t\t\tlog.Debug(\"%s module %s is tainting the kernel but it has been allowlisted (taints: %v)\",", + "\t\t\t\tnt.node, moduleName, moduleTaints)", + "\t\t} else {", + "\t\t\tfilteredTainters[moduleName] = moduleTaintsLetters", + "\t\t}", + "\t}", + "", + "\t// Finally, get all the bits that all the modules have set.", + "\ttaintBits, err = GetTaintedBitsByModules(allTainters)", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"failed to get taint bits by modules: %w\", err)", + "\t}", + "", + "\treturn filteredTainters, taintBits, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetTaintedBitsByModules(tainters map[string]string) (map[int]bool, error) {", + "\ttaintedBits := map[int]bool{}", + "", + "\tfor tainter, letters := range tainters {", + "\t\t// Save taint bits from this module.", + "\t\tfor i := range letters {", + "\t\t\tletter := string(letters[i])", + "\t\t\tbit, err := getBitPosFromLetter(letter)", + "\t\t\tif err != nil {", + "\t\t\t\treturn nil, fmt.Errorf(\"module %s has invalid taint letter %s: %w\", tainter, letter, err)", + "\t\t\t}", + "", + "\t\t\ttaintedBits[bit] = true", + "\t\t}", + "\t}", + "", + "\treturn taintedBits, nil", + "}" + ] + }, + { + "name": "NewNodeTaintedTester", + "qualifiedName": "NewNodeTaintedTester", + "exported": true, + "signature": "func(*clientsholder.Context, string)(*NodeTainted)", + "doc": "NewNodeTaintedTester Creates a tester for checking kernel taints on a node\n\nThis function constructs and returns a new instance of the NodeTainted type.\nIt stores the provided client context and node name so that subsequent\nmethods can interact with the node’s kernel taint state via the Kubernetes\nAPI. The returned object is used by test logic to retrieve and analyze taints\nfor compliance checks.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:63", + "calls": null, + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testTainted", + "kind": "function", + "source": [ + "func testTainted(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// errNodes has nodes that failed some operation while checking kernel taints.", + "\terrNodes := []string{}", + "", + "\ttype badModuleTaints struct {", + "\t\tname string", + "\t\ttaints []string", + "\t}", + "", + "\t// badModules maps node names to list of \"bad\"/offending modules.", + "\tbadModules := map[string][]badModuleTaints{}", + "\t// otherTaints maps a node to a list of taint bits that haven't been set by any module.", + "\totherTaints := map[string][]int{}", + "", + "\tcheck.LogInfo(\"Modules allowlist: %+v\", env.Config.AcceptedKernelTaints)", + "\t// helper map to make the checks easier.", + "\tallowListedModules := map[string]bool{}", + "\tfor _, module := range env.Config.AcceptedKernelTaints {", + "\t\tallowListedModules[module.Module] = true", + "\t}", + "", + "\t// Loop through the probe pods that are tied to each node.", + "\tfor _, n := range env.Nodes {", + "\t\tnodeName := n.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "", + "\t\t// Ensure we are only testing nodes that have CNF workload deployed on them.", + "\t\tif !n.HasWorkloadDeployed(env.Pods) {", + "\t\t\tcheck.LogInfo(\"Node %q has no workload deployed on it. Skipping tainted kernel check.\", nodeName)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tdp := env.ProbePods[nodeName]", + "", + "\t\tocpContext := clientsholder.NewContext(dp.Namespace, dp.Name, dp.Spec.Containers[0].Name)", + "\t\ttf := nodetainted.NewNodeTaintedTester(\u0026ocpContext, nodeName)", + "", + "\t\t// Get the taints mask from the node kernel", + "\t\ttaintsMask, err := tf.GetKernelTaintsMask()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to retrieve kernel taint information from node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to retrieve kernel taint information from node\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif taintsMask == 0 {", + "\t\t\tcheck.LogInfo(\"Node %q has no non-approved kernel taints.\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has no non-approved kernel taints\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Node %q kernel is tainted. Taints mask=%d - Decoded taints: %v\",", + "\t\t\tnodeName, taintsMask, nodetainted.DecodeKernelTaintsFromBitMask(taintsMask))", + "", + "\t\t// Check the allow list. If empty, mark this node as failed.", + "\t\tif len(allowListedModules) == 0 {", + "\t\t\ttaintsMaskStr := strconv.FormatUint(taintsMask, 10)", + "\t\t\ttaintsStr := strings.Join(nodetainted.DecodeKernelTaintsFromBitMask(taintsMask), \",\")", + "\t\t\tcheck.LogError(\"Node %q contains taints not covered by module allowlist. Taints: %q (mask=%q)\", nodeName, taintsStr, taintsMaskStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node contains taints not covered by module allowlist\", false).", + "\t\t\t\tAddField(testhelper.TaintMask, taintsMaskStr).", + "\t\t\t\tAddField(testhelper.Taints, taintsStr))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// allow list check.", + "\t\t// Get the list of modules (tainters) that have set a taint bit.", + "\t\t// 1. Each module should appear in the allow list.", + "\t\t// 2. All kernel taint bits (one bit \u003c-\u003e one letter) should have been set by at least", + "\t\t// one tainter module.", + "\t\ttainters, taintBitsByAllModules, err := tf.GetTainterModules(allowListedModules)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get tainter modules from node %q, err: %v\", nodeName, err)", + "\t\t\terrNodes = append(errNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to get tainter modules\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Keep track of whether or not this node is compliant with module allow list.", + "\t\tcompliantNode := true", + "", + "\t\t// Save modules' names only.", + "\t\tfor moduleName, taintsLetters := range tainters {", + "\t\t\tmoduleTaints := nodetainted.DecodeKernelTaintsFromLetters(taintsLetters)", + "\t\t\tbadModules[nodeName] = append(badModules[nodeName], badModuleTaints{name: moduleName, taints: moduleTaints})", + "", + "\t\t\t// Create non-compliant taint objects for each of the taints", + "\t\t\tfor _, taint := range moduleTaints {", + "\t\t\t\tcheck.LogError(\"Node %q - module %q taints kernel: %q\", nodeName, moduleName, taint)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(nodetainted.RemoveAllExceptNumbers(taint), nodeName, taint, false).AddField(testhelper.ModuleName, moduleName))", + "", + "\t\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\t\tcompliantNode = false", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Lastly, check that all kernel taint bits come from modules.", + "\t\totherKernelTaints := nodetainted.GetOtherTaintedBits(taintsMask, taintBitsByAllModules)", + "\t\tfor _, taintedBit := range otherKernelTaints {", + "\t\t\tcheck.LogError(\"Node %q - taint bit %d is set but it is not caused by any module.\", nodeName, taintedBit)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(strconv.Itoa(taintedBit), nodeName, nodetainted.GetTaintMsg(taintedBit), false).", + "\t\t\t\tAddField(testhelper.ModuleName, \"N/A\"))", + "\t\t\totherTaints[nodeName] = append(otherTaints[nodeName], taintedBit)", + "", + "\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\tcompliantNode = false", + "\t\t}", + "", + "\t\tif compliantNode {", + "\t\t\tcheck.LogInfo(\"Node %q passed the tainted kernel check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the tainted kernel check\", true))", + "\t\t}", + "\t}", + "", + "\tif len(errNodes) \u003e 0 {", + "\t\tcheck.LogError(\"Failed to get kernel taints from some nodes: %+v\", errNodes)", + "\t}", + "", + "\tif len(badModules) \u003e 0 || len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Nodes have been found to be tainted. Tainted modules: %+v\", badModules)", + "\t}", + "", + "\tif len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Taints not related to any module: %+v\", otherTaints)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func NewNodeTaintedTester(context *clientsholder.Context, node string) *NodeTainted {", + "\treturn \u0026NodeTainted{", + "\t\tctx: context,", + "\t\tnode: node,", + "\t}", + "}" + ] + }, + { + "name": "GetKernelTaintsMask", + "qualifiedName": "NodeTainted.GetKernelTaintsMask", + "exported": true, + "receiver": "NodeTainted", + "signature": "func()(uint64, error)", + "doc": "NodeTainted.GetKernelTaintsMask Retrieves the kernel taints bitmask from a node\n\nThis method runs a command to read /proc/sys/kernel/tainted, cleans up any\nwhitespace characters, then parses the resulting string as an unsigned\ninteger in base ten. If parsing fails it returns an error indicating the\nmalformed value. On success it returns the taints mask as a uint64 and a nil\nerror.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:77", + "calls": [ + { + "name": "runCommand", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strconv", + "name": "ParseUint", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (nt *NodeTainted) GetKernelTaintsMask() (uint64, error) {", + "\toutput, err := runCommand(nt.ctx, `cat /proc/sys/kernel/tainted`)", + "\tif err != nil {", + "\t\treturn 0, err", + "\t}", + "\toutput = strings.ReplaceAll(output, \"\\n\", \"\")", + "\toutput = strings.ReplaceAll(output, \"\\r\", \"\")", + "\toutput = strings.ReplaceAll(output, \"\\t\", \"\")", + "", + "\t// Convert to number.", + "\ttaintsMask, err := strconv.ParseUint(output, 10, 64) // base 10 and uint64", + "\tif err != nil {", + "\t\treturn 0, fmt.Errorf(\"failed to decode taints mask %q: %w\", output, err)", + "\t}", + "", + "\treturn taintsMask, nil", + "}" + ] + }, + { + "name": "GetTainterModules", + "qualifiedName": "NodeTainted.GetTainterModules", + "exported": true, + "receiver": "NodeTainted", + "signature": "func(map[string]bool)(map[string]string, map[int]bool, error)", + "doc": "NodeTainted.GetTainterModules Retrieves non-allowlisted modules that set kernel taint bits\n\nThe method runs a command on the node to list all modules with taint letters,\nthen filters out those present in an allowlist. It returns a map of module\nnames to their taint letter strings and another map indicating which taint\nbits are set across all modules. Errors from command execution or parsing are\nwrapped and returned.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:345", + "calls": [ + { + "name": "NodeTainted.getAllTainterModules", + "kind": "function", + "source": [ + "func (nt *NodeTainted) getAllTainterModules() (map[string]string, error) {", + "\tconst (", + "\t\tcommand = \"modules=`ls /sys/module`; for module_name in $modules; do taint_file=/sys/module/$module_name/taint; \" +", + "\t\t\t\"if [ -f $taint_file ]; then taints=`cat $taint_file`; \" +", + "\t\t\t\"if [[ ${#taints} -gt 0 ]]; then echo \\\"$module_name `cat $taint_file`\\\"; fi; fi; done\"", + "", + "\t\tnumFields = 2", + "\t\tposModuleName = 0", + "\t\tposModuleTaints = 1", + "\t)", + "", + "\tcmdOutput, err := runCommand(nt.ctx, command)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to run command: %w\", err)", + "\t}", + "", + "\tlines := strings.Split(cmdOutput, \"\\n\")", + "", + "\t// Parse line by line: \"module_name taints\"", + "\ttainters := map[string]string{}", + "", + "\tfor _, line := range lines {", + "\t\tif line == \"\" {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\telems := strings.Split(line, \" \")", + "\t\tif len(elems) != numFields {", + "\t\t\treturn nil, fmt.Errorf(\"failed to parse line %q (output=%s)\", line, cmdOutput)", + "\t\t}", + "", + "\t\tmoduleName := elems[posModuleName]", + "\t\tmoduleTaints := elems[posModuleTaints]", + "", + "\t\t// Save to the all tainters list.", + "\t\tif taints, exist := tainters[moduleName]; exist {", + "\t\t\treturn nil, fmt.Errorf(\"module %s (taints %s) has already been parsed (taints %s)\",", + "\t\t\t\tmoduleName, moduleTaints, taints)", + "\t\t}", + "", + "\t\ttainters[moduleName] = moduleTaints", + "\t}", + "", + "\treturn tainters, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "DecodeKernelTaintsFromLetters", + "kind": "function", + "source": [ + "func DecodeKernelTaintsFromLetters(letters string) []string {", + "\ttaints := []string{}", + "", + "\tfor _, l := range letters {", + "\t\ttaintLetter := string(l)", + "\t\tfound := false", + "", + "\t\tfor i := range kernelTaints {", + "\t\t\tkernelTaint := kernelTaints[i]", + "\t\t\tif strings.Contains(kernelTaint.Letters, taintLetter) {", + "\t\t\t\ttaints = append(taints, fmt.Sprintf(\"%s (taint letter:%s, bit:%d)\",", + "\t\t\t\t\tkernelTaint.Description, taintLetter, i))", + "\t\t\t\tfound = true", + "\t\t\t\tbreak", + "\t\t\t}", + "\t\t}", + "", + "\t\t// The letter does not belong to any known (yet) taint...", + "\t\tif !found {", + "\t\t\ttaints = append(taints, fmt.Sprintf(\"unknown taint (letter %s)\", taintLetter))", + "\t\t}", + "\t}", + "", + "\treturn taints", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "name": "GetTaintedBitsByModules", + "kind": "function", + "source": [ + "func GetTaintedBitsByModules(tainters map[string]string) (map[int]bool, error) {", + "\ttaintedBits := map[int]bool{}", + "", + "\tfor tainter, letters := range tainters {", + "\t\t// Save taint bits from this module.", + "\t\tfor i := range letters {", + "\t\t\tletter := string(letters[i])", + "\t\t\tbit, err := getBitPosFromLetter(letter)", + "\t\t\tif err != nil {", + "\t\t\t\treturn nil, fmt.Errorf(\"module %s has invalid taint letter %s: %w\", tainter, letter, err)", + "\t\t\t}", + "", + "\t\t\ttaintedBits[bit] = true", + "\t\t}", + "\t}", + "", + "\treturn taintedBits, nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (nt *NodeTainted) GetTainterModules(allowList map[string]bool) (tainters map[string]string, taintBits map[int]bool, err error) {", + "\t// First, get all the modules that are tainting the kernel in this node.", + "\tallTainters, err := nt.getAllTainterModules()", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"failed to get tainter modules: %w\", err)", + "\t}", + "", + "\tfilteredTainters := map[string]string{}", + "\tfor moduleName, moduleTaintsLetters := range allTainters {", + "\t\tmoduleTaints := DecodeKernelTaintsFromLetters(moduleTaintsLetters)", + "\t\tlog.Debug(\"%s: Module %s has taints (%s): %s\", nt.node, moduleName, moduleTaintsLetters, moduleTaints)", + "", + "\t\t// Apply allowlist.", + "\t\tif allowList[moduleName] {", + "\t\t\tlog.Debug(\"%s module %s is tainting the kernel but it has been allowlisted (taints: %v)\",", + "\t\t\t\tnt.node, moduleName, moduleTaints)", + "\t\t} else {", + "\t\t\tfilteredTainters[moduleName] = moduleTaintsLetters", + "\t\t}", + "\t}", + "", + "\t// Finally, get all the bits that all the modules have set.", + "\ttaintBits, err = GetTaintedBitsByModules(allTainters)", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"failed to get taint bits by modules: %w\", err)", + "\t}", + "", + "\treturn filteredTainters, taintBits, nil", + "}" + ] + }, + { + "name": "getAllTainterModules", + "qualifiedName": "NodeTainted.getAllTainterModules", + "exported": false, + "receiver": "NodeTainted", + "signature": "func()(map[string]string, error)", + "doc": "NodeTainted.getAllTainterModules Retrieves all kernel modules that are tainting the node\n\nThe function runs a shell command to list every module in /sys/module, reads\neach module's taint file if present, and collects non‑empty taints into a\nmap keyed by module name. It returns this mapping or an error if the command\nfails or parsing encounters duplicate entries or malformed lines.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:292", + "calls": [ + { + "name": "runCommand", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "NodeTainted.GetTainterModules", + "kind": "function", + "source": [ + "func (nt *NodeTainted) GetTainterModules(allowList map[string]bool) (tainters map[string]string, taintBits map[int]bool, err error) {", + "\t// First, get all the modules that are tainting the kernel in this node.", + "\tallTainters, err := nt.getAllTainterModules()", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"failed to get tainter modules: %w\", err)", + "\t}", + "", + "\tfilteredTainters := map[string]string{}", + "\tfor moduleName, moduleTaintsLetters := range allTainters {", + "\t\tmoduleTaints := DecodeKernelTaintsFromLetters(moduleTaintsLetters)", + "\t\tlog.Debug(\"%s: Module %s has taints (%s): %s\", nt.node, moduleName, moduleTaintsLetters, moduleTaints)", + "", + "\t\t// Apply allowlist.", + "\t\tif allowList[moduleName] {", + "\t\t\tlog.Debug(\"%s module %s is tainting the kernel but it has been allowlisted (taints: %v)\",", + "\t\t\t\tnt.node, moduleName, moduleTaints)", + "\t\t} else {", + "\t\t\tfilteredTainters[moduleName] = moduleTaintsLetters", + "\t\t}", + "\t}", + "", + "\t// Finally, get all the bits that all the modules have set.", + "\ttaintBits, err = GetTaintedBitsByModules(allTainters)", + "\tif err != nil {", + "\t\treturn nil, nil, fmt.Errorf(\"failed to get taint bits by modules: %w\", err)", + "\t}", + "", + "\treturn filteredTainters, taintBits, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func (nt *NodeTainted) getAllTainterModules() (map[string]string, error) {", + "\tconst (", + "\t\tcommand = \"modules=`ls /sys/module`; for module_name in $modules; do taint_file=/sys/module/$module_name/taint; \" +", + "\t\t\t\"if [ -f $taint_file ]; then taints=`cat $taint_file`; \" +", + "\t\t\t\"if [[ ${#taints} -gt 0 ]]; then echo \\\"$module_name `cat $taint_file`\\\"; fi; fi; done\"", + "", + "\t\tnumFields = 2", + "\t\tposModuleName = 0", + "\t\tposModuleTaints = 1", + "\t)", + "", + "\tcmdOutput, err := runCommand(nt.ctx, command)", + "\tif err != nil {", + "\t\treturn nil, fmt.Errorf(\"failed to run command: %w\", err)", + "\t}", + "", + "\tlines := strings.Split(cmdOutput, \"\\n\")", + "", + "\t// Parse line by line: \"module_name taints\"", + "\ttainters := map[string]string{}", + "", + "\tfor _, line := range lines {", + "\t\tif line == \"\" {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\telems := strings.Split(line, \" \")", + "\t\tif len(elems) != numFields {", + "\t\t\treturn nil, fmt.Errorf(\"failed to parse line %q (output=%s)\", line, cmdOutput)", + "\t\t}", + "", + "\t\tmoduleName := elems[posModuleName]", + "\t\tmoduleTaints := elems[posModuleTaints]", + "", + "\t\t// Save to the all tainters list.", + "\t\tif taints, exist := tainters[moduleName]; exist {", + "\t\t\treturn nil, fmt.Errorf(\"module %s (taints %s) has already been parsed (taints %s)\",", + "\t\t\t\tmoduleName, moduleTaints, taints)", + "\t\t}", + "", + "\t\ttainters[moduleName] = moduleTaints", + "\t}", + "", + "\treturn tainters, nil", + "}" + ] + }, + { + "name": "RemoveAllExceptNumbers", + "qualifiedName": "RemoveAllExceptNumbers", + "exported": true, + "signature": "func(string)(string)", + "doc": "RemoveAllExceptNumbers strips all non-digit characters from a string\n\nThis function takes an input string, compiles a regular expression that\nmatches any non‑digit sequence, and replaces those sequences with nothing.\nThe result is a new string containing only the numeric characters that were\npresent in the original input.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:176", + "calls": [ + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "name": "ReplaceAllString", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testTainted", + "kind": "function", + "source": [ + "func testTainted(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t// errNodes has nodes that failed some operation while checking kernel taints.", + "\terrNodes := []string{}", + "", + "\ttype badModuleTaints struct {", + "\t\tname string", + "\t\ttaints []string", + "\t}", + "", + "\t// badModules maps node names to list of \"bad\"/offending modules.", + "\tbadModules := map[string][]badModuleTaints{}", + "\t// otherTaints maps a node to a list of taint bits that haven't been set by any module.", + "\totherTaints := map[string][]int{}", + "", + "\tcheck.LogInfo(\"Modules allowlist: %+v\", env.Config.AcceptedKernelTaints)", + "\t// helper map to make the checks easier.", + "\tallowListedModules := map[string]bool{}", + "\tfor _, module := range env.Config.AcceptedKernelTaints {", + "\t\tallowListedModules[module.Module] = true", + "\t}", + "", + "\t// Loop through the probe pods that are tied to each node.", + "\tfor _, n := range env.Nodes {", + "\t\tnodeName := n.Data.Name", + "\t\tcheck.LogInfo(\"Testing node %q\", nodeName)", + "", + "\t\t// Ensure we are only testing nodes that have CNF workload deployed on them.", + "\t\tif !n.HasWorkloadDeployed(env.Pods) {", + "\t\t\tcheck.LogInfo(\"Node %q has no workload deployed on it. Skipping tainted kernel check.\", nodeName)", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tdp := env.ProbePods[nodeName]", + "", + "\t\tocpContext := clientsholder.NewContext(dp.Namespace, dp.Name, dp.Spec.Containers[0].Name)", + "\t\ttf := nodetainted.NewNodeTaintedTester(\u0026ocpContext, nodeName)", + "", + "\t\t// Get the taints mask from the node kernel", + "\t\ttaintsMask, err := tf.GetKernelTaintsMask()", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Failed to retrieve kernel taint information from node %q, err: %v\", nodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to retrieve kernel taint information from node\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tif taintsMask == 0 {", + "\t\t\tcheck.LogInfo(\"Node %q has no non-approved kernel taints.\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node has no non-approved kernel taints\", true))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tcheck.LogInfo(\"Node %q kernel is tainted. Taints mask=%d - Decoded taints: %v\",", + "\t\t\tnodeName, taintsMask, nodetainted.DecodeKernelTaintsFromBitMask(taintsMask))", + "", + "\t\t// Check the allow list. If empty, mark this node as failed.", + "\t\tif len(allowListedModules) == 0 {", + "\t\t\ttaintsMaskStr := strconv.FormatUint(taintsMask, 10)", + "\t\t\ttaintsStr := strings.Join(nodetainted.DecodeKernelTaintsFromBitMask(taintsMask), \",\")", + "\t\t\tcheck.LogError(\"Node %q contains taints not covered by module allowlist. Taints: %q (mask=%q)\", nodeName, taintsStr, taintsMaskStr)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Node contains taints not covered by module allowlist\", false).", + "\t\t\t\tAddField(testhelper.TaintMask, taintsMaskStr).", + "\t\t\t\tAddField(testhelper.Taints, taintsStr))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// allow list check.", + "\t\t// Get the list of modules (tainters) that have set a taint bit.", + "\t\t// 1. Each module should appear in the allow list.", + "\t\t// 2. All kernel taint bits (one bit \u003c-\u003e one letter) should have been set by at least", + "\t\t// one tainter module.", + "\t\ttainters, taintBitsByAllModules, err := tf.GetTainterModules(allowListedModules)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get tainter modules from node %q, err: %v\", nodeName, err)", + "\t\t\terrNodes = append(errNodes, nodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(nodeName, \"Failed to get tainter modules\", false).", + "\t\t\t\tAddField(testhelper.Error, err.Error()))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Keep track of whether or not this node is compliant with module allow list.", + "\t\tcompliantNode := true", + "", + "\t\t// Save modules' names only.", + "\t\tfor moduleName, taintsLetters := range tainters {", + "\t\t\tmoduleTaints := nodetainted.DecodeKernelTaintsFromLetters(taintsLetters)", + "\t\t\tbadModules[nodeName] = append(badModules[nodeName], badModuleTaints{name: moduleName, taints: moduleTaints})", + "", + "\t\t\t// Create non-compliant taint objects for each of the taints", + "\t\t\tfor _, taint := range moduleTaints {", + "\t\t\t\tcheck.LogError(\"Node %q - module %q taints kernel: %q\", nodeName, moduleName, taint)", + "\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(nodetainted.RemoveAllExceptNumbers(taint), nodeName, taint, false).AddField(testhelper.ModuleName, moduleName))", + "", + "\t\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\t\tcompliantNode = false", + "\t\t\t}", + "\t\t}", + "", + "\t\t// Lastly, check that all kernel taint bits come from modules.", + "\t\totherKernelTaints := nodetainted.GetOtherTaintedBits(taintsMask, taintBitsByAllModules)", + "\t\tfor _, taintedBit := range otherKernelTaints {", + "\t\t\tcheck.LogError(\"Node %q - taint bit %d is set but it is not caused by any module.\", nodeName, taintedBit)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewTaintReportObject(strconv.Itoa(taintedBit), nodeName, nodetainted.GetTaintMsg(taintedBit), false).", + "\t\t\t\tAddField(testhelper.ModuleName, \"N/A\"))", + "\t\t\totherTaints[nodeName] = append(otherTaints[nodeName], taintedBit)", + "", + "\t\t\t// Set the node as non-compliant for future reporting", + "\t\t\tcompliantNode = false", + "\t\t}", + "", + "\t\tif compliantNode {", + "\t\t\tcheck.LogInfo(\"Node %q passed the tainted kernel check\", nodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(nodeName, \"Passed the tainted kernel check\", true))", + "\t\t}", + "\t}", + "", + "\tif len(errNodes) \u003e 0 {", + "\t\tcheck.LogError(\"Failed to get kernel taints from some nodes: %+v\", errNodes)", + "\t}", + "", + "\tif len(badModules) \u003e 0 || len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Nodes have been found to be tainted. Tainted modules: %+v\", badModules)", + "\t}", + "", + "\tif len(otherTaints) \u003e 0 {", + "\t\tcheck.LogError(\"Taints not related to any module: %+v\", otherTaints)", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func RemoveAllExceptNumbers(incomingStr string) string {", + "\t// example string \", bit:10)\"", + "\t// return 10", + "", + "\t// remove all characters except numbers", + "\tre := regexp.MustCompile(`\\D+`)", + "\treturn re.ReplaceAllString(incomingStr, \"\")", + "}" + ] + }, + { + "name": "getBitPosFromLetter", + "qualifiedName": "getBitPosFromLetter", + "exported": false, + "signature": "func(string)(int, error)", + "doc": "getBitPosFromLetter Finds the bit index of a kernel taint letter\n\nThe function accepts a single-character string representing a module taint\nand searches through a predefined list of known kernel taints to determine\nits corresponding bit position. It returns that integer index if found,\notherwise it produces an error indicating the letter is invalid or unknown.\nInput validation ensures only one character is processed.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:225", + "calls": [ + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted", + "name": "GetTaintedBitsByModules", + "kind": "function", + "source": [ + "func GetTaintedBitsByModules(tainters map[string]string) (map[int]bool, error) {", + "\ttaintedBits := map[int]bool{}", + "", + "\tfor tainter, letters := range tainters {", + "\t\t// Save taint bits from this module.", + "\t\tfor i := range letters {", + "\t\t\tletter := string(letters[i])", + "\t\t\tbit, err := getBitPosFromLetter(letter)", + "\t\t\tif err != nil {", + "\t\t\t\treturn nil, fmt.Errorf(\"module %s has invalid taint letter %s: %w\", tainter, letter, err)", + "\t\t\t}", + "", + "\t\t\ttaintedBits[bit] = true", + "\t\t}", + "\t}", + "", + "\treturn taintedBits, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getBitPosFromLetter(letter string) (int, error) {", + "\tif letter == \"\" || len(letter) \u003e 1 {", + "\t\treturn 0, fmt.Errorf(\"input string must contain one letter\")", + "\t}", + "", + "\tfor bit, taint := range kernelTaints {", + "\t\tif strings.Contains(taint.Letters, letter) {", + "\t\t\treturn bit, nil", + "\t\t}", + "\t}", + "", + "\treturn 0, fmt.Errorf(\"letter %s does not belong to any known kernel taint\", letter)", + "}" + ] + } + ], + "globals": [ + { + "name": "kernelTaints", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:107" + }, + { + "name": "runCommand", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/platform/nodetainted/nodetainted.go:42" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/operatingsystem", + "name": "operatingsystem", + "files": 1, + "imports": [ + "embed", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "GetRHCOSMappedVersions", + "qualifiedName": "GetRHCOSMappedVersions", + "exported": true, + "signature": "func(string)(map[string]string, error)", + "doc": "GetRHCOSMappedVersions Parses a formatted string of RHCOS versions into a mapping\n\nThe function receives a multiline string where each line contains a short\nRHCOS version, a slash, and its long-form counterpart. It splits the input by\nnewline, trims whitespace, ignores empty lines, then separates each pair on\nthe slash to build a map from short to long versions. The resulting map is\nreturned along with any error .", + "position": "/Users/deliedit/dev/certsuite/tests/platform/operatingsystem/operatingsystem.go:38", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "TrimSpace", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/operatingsystem", + "name": "GetShortVersionFromLong", + "kind": "function", + "source": [ + "func GetShortVersionFromLong(longVersion string) (string, error) {", + "\tcapturedVersions, err := GetRHCOSMappedVersions(rhcosVersionMap)", + "\tif err != nil {", + "\t\treturn \"\", err", + "\t}", + "", + "\t// search through all available rhcos versions for a match", + "\tfor s, l := range capturedVersions {", + "\t\tif l == longVersion {", + "\t\t\treturn s, nil", + "\t\t}", + "\t}", + "", + "\t// return \"version-not-found\" if the short version cannot be found", + "\treturn NotFoundStr, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetRHCOSMappedVersions(rhcosVersionMap string) (map[string]string, error) {", + "\tcapturedInfo := make(map[string]string)", + "", + "\t// Example: Translate `Red Hat Enterprise Linux CoreOS 410.84.202205031645-0 (Ootpa)` into a RHCOS version number", + "\t// and long-form counterpart", + "", + "\t/// Example lines from the captured file", + "\t// 4.9.21 / 49.84.202202081504-0", + "\t// 4.9.25 / 49.84.202203112054-0", + "\t// 4.10.14 / 410.84.202205031645-0", + "", + "\tversions := strings.Split(rhcosVersionMap, \"\\n\")", + "\tfor _, v := range versions {", + "\t\t// Skip any empty lines", + "\t\tif strings.TrimSpace(v) == \"\" {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Split on the / and capture the line into the map", + "\t\tsplitVersion := strings.Split(v, \"/\")", + "\t\tcapturedInfo[strings.TrimSpace(splitVersion[0])] = strings.TrimSpace(splitVersion[1])", + "\t}", + "", + "\treturn capturedInfo, nil", + "}" + ] + }, + { + "name": "GetShortVersionFromLong", + "qualifiedName": "GetShortVersionFromLong", + "exported": true, + "signature": "func(string)(string, error)", + "doc": "GetShortVersionFromLong Retrieves a concise RHCOS version string from its full identifier\n\nThe function looks up the provided long-form RHCOS identifier in a preloaded\nmapping of short-to-long versions. If a match is found, it returns the\ncorresponding short version; otherwise, it returns a sentinel value\nindicating that the version was not located.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/operatingsystem/operatingsystem.go:70", + "calls": [ + { + "name": "GetRHCOSMappedVersions", + "kind": "function", + "source": [ + "func GetRHCOSMappedVersions(rhcosVersionMap string) (map[string]string, error) {", + "\tcapturedInfo := make(map[string]string)", + "", + "\t// Example: Translate `Red Hat Enterprise Linux CoreOS 410.84.202205031645-0 (Ootpa)` into a RHCOS version number", + "\t// and long-form counterpart", + "", + "\t/// Example lines from the captured file", + "\t// 4.9.21 / 49.84.202202081504-0", + "\t// 4.9.25 / 49.84.202203112054-0", + "\t// 4.10.14 / 410.84.202205031645-0", + "", + "\tversions := strings.Split(rhcosVersionMap, \"\\n\")", + "\tfor _, v := range versions {", + "\t\t// Skip any empty lines", + "\t\tif strings.TrimSpace(v) == \"\" {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\t// Split on the / and capture the line into the map", + "\t\tsplitVersion := strings.Split(v, \"/\")", + "\t\tcapturedInfo[strings.TrimSpace(splitVersion[0])] = strings.TrimSpace(splitVersion[1])", + "\t}", + "", + "\treturn capturedInfo, nil", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "Node.GetRHCOSVersion", + "kind": "function", + "source": [ + "func (node *Node) GetRHCOSVersion() (string, error) {", + "\t// Check if the node is running CoreOS or not", + "\tif !node.IsRHCOS() {", + "\t\treturn \"\", fmt.Errorf(\"invalid OS type: %s\", node.Data.Status.NodeInfo.OSImage)", + "\t}", + "", + "\t// Red Hat Enterprise Linux CoreOS 410.84.202205031645-0 (Ootpa) --\u003e 410.84.202205031645-0", + "\tsplitStr := strings.Split(node.Data.Status.NodeInfo.OSImage, rhcosName)", + "\tlongVersionSplit := strings.Split(strings.TrimSpace(splitStr[1]), \" \")", + "", + "\t// Get the short version string from the long version string", + "\tshortVersion, err := operatingsystem.GetShortVersionFromLong(longVersionSplit[0])", + "\tif err != nil {", + "\t\treturn \"\", err", + "\t}", + "", + "\treturn shortVersion, nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetShortVersionFromLong(longVersion string) (string, error) {", + "\tcapturedVersions, err := GetRHCOSMappedVersions(rhcosVersionMap)", + "\tif err != nil {", + "\t\treturn \"\", err", + "\t}", + "", + "\t// search through all available rhcos versions for a match", + "\tfor s, l := range capturedVersions {", + "\t\tif l == longVersion {", + "\t\t\treturn s, nil", + "\t\t}", + "\t}", + "", + "\t// return \"version-not-found\" if the short version cannot be found", + "\treturn NotFoundStr, nil", + "}" + ] + } + ], + "globals": [ + { + "name": "rhcosVersionMap", + "exported": false, + "type": "string", + "doc": "go:embed files/rhcos_version_map", + "position": "/Users/deliedit/dev/certsuite/tests/platform/operatingsystem/operatingsystem.go:29" + } + ], + "consts": [ + { + "name": "NotFoundStr", + "exported": true, + "position": "/Users/deliedit/dev/certsuite/tests/platform/operatingsystem/operatingsystem.go:25" + } + ] + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/sysctlconfig", + "name": "sysctlconfig", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "regexp", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "GetSysctlSettings", + "qualifiedName": "GetSysctlSettings", + "exported": true, + "signature": "func(*provider.TestEnvironment, string)(map[string]string, error)", + "doc": "GetSysctlSettings Retrieves system configuration values from a node's probe pod\n\nThis function runs the command \"chroot /host sysctl --system\" inside a\ndesignated probe container to collect kernel settings for a specified node.\nIt captures standard output and parses each line into key/value pairs,\nignoring comments or non‑matching lines. The resulting map of setting names\nto values is returned, with an error if the command fails or produces\nunexpected output.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/sysctlconfig/sysctlconfig.go:63", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetClientsHolder", + "kind": "function", + "source": [ + "func GetClientsHolder(filenames ...string) *ClientsHolder {", + "\tif clientsHolder.ready {", + "\t\treturn \u0026clientsHolder", + "\t}", + "\tclientsHolder, err := newClientsHolder(filenames...)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "\treturn clientsHolder", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "NewContext", + "kind": "function", + "source": [ + "func NewContext(namespace, podName, containerName string) Context {", + "\treturn Context{", + "\t\tnamespace: namespace,", + "\t\tpodName: podName,", + "\t\tcontainerName: containerName,", + "\t}", + "}" + ] + }, + { + "name": "ExecCommandContainer", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Errorf", + "kind": "function" + }, + { + "name": "parseSysctlSystemOutput", + "kind": "function", + "source": [ + "func parseSysctlSystemOutput(sysctlSystemOutput string) map[string]string {", + "\tretval := make(map[string]string)", + "\tsplitConfig := strings.Split(sysctlSystemOutput, \"\\n\")", + "\tfor _, line := range splitConfig {", + "\t\tif strings.HasPrefix(line, \"*\") {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tkeyValRegexp := regexp.MustCompile(`(\\S+)\\s*=\\s*(\\S+)`) // A line is of the form \"kernel.yama.ptrace_scope = 0\"", + "\t\tif !keyValRegexp.MatchString(line) {", + "\t\t\tcontinue", + "\t\t}", + "\t\tregexResults := keyValRegexp.FindStringSubmatch(line)", + "\t\tkey := regexResults[1]", + "\t\tval := regexResults[2]", + "\t\tretval[key] = val", + "\t}", + "\treturn retval", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform", + "name": "testSysctlConfigs", + "kind": "function", + "source": [ + "func testSysctlConfigs(check *checksdb.Check, env *provider.TestEnvironment) {", + "\tvar compliantObjects []*testhelper.ReportObject", + "\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\talreadyCheckedNodes := map[string]bool{}", + "\tfor _, cut := range env.Containers {", + "\t\tcheck.LogInfo(\"Testing Container %q\", cut)", + "\t\tif alreadyCheckedNodes[cut.NodeName] {", + "\t\t\tcontinue", + "\t\t}", + "\t\talreadyCheckedNodes[cut.NodeName] = true", + "\t\tprobePod := env.ProbePods[cut.NodeName]", + "\t\tif probePod == nil {", + "\t\t\tcheck.LogError(\"Probe Pod not found for node %q\", cut.NodeName)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"tnf probe pod not found\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tsysctlSettings, err := sysctlconfig.GetSysctlSettings(env, cut.NodeName)", + "\t\tif err != nil {", + "\t\t\tcheck.LogError(\"Could not get sysctl settings for node %q, error: %v\", cut.NodeName, err)", + "\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Could not get sysctl settings\", false))", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tmcKernelArgumentsMap := bootparams.GetMcKernelArguments(env, cut.NodeName)", + "\t\tvalidSettings := true", + "\t\tfor key, sysctlConfigVal := range sysctlSettings {", + "\t\t\tif mcVal, ok := mcKernelArgumentsMap[key]; ok {", + "\t\t\t\tif mcVal != sysctlConfigVal {", + "\t\t\t\t\tcheck.LogError(\"Kernel config mismatch in node %q for %q (sysctl value: %q, machine config value: %q)\",", + "\t\t\t\t\t\tcut.NodeName, key, sysctlConfigVal, mcVal)", + "\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewNodeReportObject(cut.NodeName, fmt.Sprintf(\"Kernel config mismatch for %s\", key), false))", + "\t\t\t\t\tvalidSettings = false", + "\t\t\t\t}", + "\t\t\t}", + "\t\t}", + "\t\tif validSettings {", + "\t\t\tcheck.LogInfo(\"Node %q passed the sysctl config check\", cut.NodeName)", + "\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewNodeReportObject(cut.NodeName, \"Passed the sysctl config check\", true))", + "\t\t}", + "\t}", + "", + "\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetSysctlSettings(env *provider.TestEnvironment, nodeName string) (map[string]string, error) {", + "\tconst (", + "\t\tsysctlCommand = \"chroot /host sysctl --system\"", + "\t)", + "", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, sysctlCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"failed to execute command %s in probe pod %s, err=%s, stderr=%s\", sysctlCommand,", + "\t\t\tenv.ProbePods[nodeName], err, errStr)", + "\t}", + "", + "\treturn parseSysctlSystemOutput(outStr), nil", + "}" + ] + }, + { + "name": "parseSysctlSystemOutput", + "qualifiedName": "parseSysctlSystemOutput", + "exported": false, + "signature": "func(string)(map[string]string)", + "doc": "parseSysctlSystemOutput parses sysctl output into a map of key-value pairs\n\nThe function takes the raw text returned by \"sysctl --system\" and splits it\nline by line. It ignores comment lines that start with an asterisk, then uses\na regular expression to extract keys and values from standard assignments\nsuch as \"kernel.yama.ptrace_scope = 0\". Each extracted key and value is\nstored in a map which the function returns.", + "position": "/Users/deliedit/dev/certsuite/tests/platform/sysctlconfig/sysctlconfig.go:35", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Split", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "HasPrefix", + "kind": "function" + }, + { + "pkgPath": "regexp", + "name": "MustCompile", + "kind": "function" + }, + { + "name": "MatchString", + "kind": "function" + }, + { + "name": "FindStringSubmatch", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/sysctlconfig", + "name": "GetSysctlSettings", + "kind": "function", + "source": [ + "func GetSysctlSettings(env *provider.TestEnvironment, nodeName string) (map[string]string, error) {", + "\tconst (", + "\t\tsysctlCommand = \"chroot /host sysctl --system\"", + "\t)", + "", + "\to := clientsholder.GetClientsHolder()", + "\tctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name)", + "", + "\toutStr, errStr, err := o.ExecCommandContainer(ctx, sysctlCommand)", + "\tif err != nil || errStr != \"\" {", + "\t\treturn nil, fmt.Errorf(\"failed to execute command %s in probe pod %s, err=%s, stderr=%s\", sysctlCommand,", + "\t\t\tenv.ProbePods[nodeName], err, errStr)", + "\t}", + "", + "\treturn parseSysctlSystemOutput(outStr), nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func parseSysctlSystemOutput(sysctlSystemOutput string) map[string]string {", + "\tretval := make(map[string]string)", + "\tsplitConfig := strings.Split(sysctlSystemOutput, \"\\n\")", + "\tfor _, line := range splitConfig {", + "\t\tif strings.HasPrefix(line, \"*\") {", + "\t\t\tcontinue", + "\t\t}", + "", + "\t\tkeyValRegexp := regexp.MustCompile(`(\\S+)\\s*=\\s*(\\S+)`) // A line is of the form \"kernel.yama.ptrace_scope = 0\"", + "\t\tif !keyValRegexp.MatchString(line) {", + "\t\t\tcontinue", + "\t\t}", + "\t\tregexResults := keyValRegexp.FindStringSubmatch(line)", + "\t\tkey := regexResults[1]", + "\t\tval := regexResults[2]", + "\t\tretval[key] = val", + "\t}", + "\treturn retval", + "}" + ] + } + ], + "globals": null, + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "preflight", + "files": 1, + "imports": [ + "fmt", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "strings" + ], + "structs": null, + "interfaces": null, + "functions": [ + { + "name": "LoadChecks", + "qualifiedName": "LoadChecks", + "exported": true, + "signature": "func()()", + "doc": "LoadChecks Initializes the test environment and runs Preflight checks for containers and operators\n\nThe function sets up logging, retrieves the current test environment, and\ncreates a checks group for Preflight tests. It executes container preflight\ntests and conditionally runs operator tests if the cluster is OpenShift.\nResults are recorded in the checks group for later reporting.", + "position": "/Users/deliedit/dev/certsuite/tests/preflight/suite.go:87", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Debug", + "kind": "function", + "source": [ + "func (logger *Logger) Debug(msg string, args ...any) {", + "\tLogf(logger, LevelDebug, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "name": "WithBeforeEachFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewChecksGroup", + "kind": "function", + "source": [ + "func NewChecksGroup(groupName string) *ChecksGroup {", + "\tdbLock.Lock()", + "\tdefer dbLock.Unlock()", + "", + "\tif dbByGroup == nil {", + "\t\tdbByGroup = map[string]*ChecksGroup{}", + "\t}", + "", + "\tgroup, exists := dbByGroup[groupName]", + "\tif exists {", + "\t\treturn group", + "\t}", + "", + "\tgroup = \u0026ChecksGroup{", + "\t\tname: groupName,", + "\t\tchecks: []*Check{},", + "\t\tcurrentRunningCheckIdx: checkIdxNone,", + "\t}", + "\tdbByGroup[groupName] = group", + "", + "\treturn group", + "}" + ] + }, + { + "name": "testPreflightContainers", + "kind": "function", + "source": [ + "func testPreflightContainers(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) {", + "\t// Using a cache to prevent unnecessary processing of images if we already have the results available", + "\tpreflightImageCache := make(map[string]provider.PreflightResultsDB)", + "", + "\t// Loop through all of the containers, run preflight, and set their results into their respective objects", + "\tfor _, cut := range env.Containers {", + "\t\terr := cut.SetPreflightResults(preflightImageCache, env)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed running Preflight on image %q, err: %v\", cut.Image, err)", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed running Preflight container tests for %d containers\", len(env.Containers))", + "", + "\t// Handle Container-based preflight tests", + "\t// Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables.", + "\tfor testName, testEntry := range getUniqueTestEntriesFromContainerResults(env.Containers) {", + "\t\tlog.Info(\"Setting Preflight container test results for %q\", testName)", + "\t\tgeneratePreflightContainerCnfCertTest(checksGroup, testName, testEntry.Description, testEntry.Remediation, env.Containers)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "IsOCPCluster", + "kind": "function", + "source": [ + "func IsOCPCluster() bool {", + "\treturn env.OpenshiftVersion != autodiscover.NonOpenshiftClusterVersion", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "testPreflightOperators", + "kind": "function", + "source": [ + "func testPreflightOperators(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) {", + "\t// Loop through all of the operators, run preflight, and set their results into their respective object", + "\tfor _, op := range env.Operators {", + "\t\t// Note: We are not using a cache here for the operator bundle images because", + "\t\t// in-general you are only going to have an operator installed once in a cluster.", + "\t\terr := op.SetPreflightResults(env)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed running Preflight on operator %q, err: %v\", op.Name, err)", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed running Preflight operator tests for %d operators\", len(env.Operators))", + "", + "\t// Handle Operator-based preflight tests", + "\t// Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables.", + "\tfor testName, testEntry := range getUniqueTestEntriesFromOperatorResults(env.Operators) {", + "\t\tlog.Info(\"Setting Preflight operator test results for %q\", testName)", + "\t\tgeneratePreflightOperatorCnfCertTest(checksGroup, testName, testEntry.Description, testEntry.Remediation, env.Operators)", + "\t}", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadChecksDB", + "kind": "function", + "source": [ + "func LoadChecksDB(labelsExpr string) {", + "\tLoadInternalChecksDB()", + "", + "\tif preflight.ShouldRun(labelsExpr) {", + "\t\tpreflight.LoadChecks()", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Running %s suite checks\", common.PreflightTestKey)", + "", + "\t// As the preflight lib's checks need to run here, we need to get the test environment now.", + "\tenv = provider.GetTestEnvironment()", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PreflightTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\ttestPreflightContainers(checksGroup, \u0026env)", + "\tif provider.IsOCPCluster() {", + "\t\tlog.Info(\"OCP cluster detected, allowing Preflight operator tests to run\")", + "\t\ttestPreflightOperators(checksGroup, \u0026env)", + "\t} else {", + "\t\tlog.Info(\"Skipping the Preflight operators test because it requires an OCP cluster to run against\")", + "\t}", + "}" + ] + }, + { + "name": "ShouldRun", + "qualifiedName": "ShouldRun", + "exported": true, + "signature": "func(string)(bool)", + "doc": "ShouldRun Determines whether preflight checks should be executed\n\nThe function evaluates the provided label expression to see if it includes\nany preflight-specific tags, then verifies that a Docker configuration file\nis available. If either condition fails, it returns false or logs a warning\nand marks the environment to skip preflight tests. When both conditions are\nsatisfied, it signals that the preflight suite may run.", + "position": "/Users/deliedit/dev/certsuite/tests/preflight/suite.go:63", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider", + "name": "GetTestEnvironment", + "kind": "function", + "source": [ + "func GetTestEnvironment() TestEnvironment {", + "\tif !loaded {", + "\t\tbuildTestEnvironment()", + "\t\tloaded = true", + "\t}", + "\treturn env", + "}" + ] + }, + { + "name": "labelsAllowTestRun", + "kind": "function", + "source": [ + "func labelsAllowTestRun(labelFilter string, allowedLabels []string) bool {", + "\tfor _, label := range allowedLabels {", + "\t\tif strings.Contains(labelFilter, label) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "name": "GetTestParameters", + "kind": "function", + "source": [ + "func GetTestParameters() *TestParameters {", + "\treturn \u0026parameters", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Warn", + "kind": "function", + "source": [ + "func Warn(msg string, args ...any) {", + "\tLogf(globalLogger, LevelWarn, msg, args...)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadChecksDB", + "kind": "function", + "source": [ + "func LoadChecksDB(labelsExpr string) {", + "\tLoadInternalChecksDB()", + "", + "\tif preflight.ShouldRun(labelsExpr) {", + "\t\tpreflight.LoadChecks()", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func ShouldRun(labelsExpr string) bool {", + "\tenv = provider.GetTestEnvironment()", + "\tpreflightAllowedLabels := []string{common.PreflightTestKey, identifiers.TagPreflight}", + "", + "\tif !labelsAllowTestRun(labelsExpr, preflightAllowedLabels) {", + "\t\treturn false", + "\t}", + "", + "\t// Add safeguard against running the preflight tests if the docker config does not exist.", + "\tpreflightDockerConfigFile := configuration.GetTestParameters().PfltDockerconfig", + "\tif preflightDockerConfigFile == \"\" || preflightDockerConfigFile == \"NA\" {", + "\t\tlog.Warn(\"Skipping the preflight suite because the Docker Config file is not provided.\")", + "\t\tenv.SkipPreflight = true", + "\t}", + "", + "\treturn true", + "}" + ] + }, + { + "name": "generatePreflightContainerCnfCertTest", + "qualifiedName": "generatePreflightContainerCnfCertTest", + "exported": false, + "signature": "func(*checksdb.ChecksGroup, string, string, string, []*provider.Container)()", + "doc": "generatePreflightContainerCnfCertTest Creates a test entry for each Preflight container check\n\nThe function registers a catalog entry using the supplied name, description,\nand remediation, then adds a corresponding check to the checks group. For\nevery container passed in, it examines preflight results and records which\ncontainers passed, failed, or errored on that specific test. The outcome is\nstored as compliant or non‑compliant objects within the check's result.", + "position": "/Users/deliedit/dev/certsuite/tests/preflight/suite.go:169", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoContainersUnderTestSkipFn", + "kind": "function", + "source": [ + "func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Containers) == 0 {", + "\t\t\treturn true, \"no containers to check found\"", + "\t\t}", + "", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewContainerReportObject", + "kind": "function", + "source": [ + "func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, ContainerType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(PodName, aPodName)", + "\tout.AddField(ContainerName, aContainerName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "testPreflightContainers", + "kind": "function", + "source": [ + "func testPreflightContainers(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) {", + "\t// Using a cache to prevent unnecessary processing of images if we already have the results available", + "\tpreflightImageCache := make(map[string]provider.PreflightResultsDB)", + "", + "\t// Loop through all of the containers, run preflight, and set their results into their respective objects", + "\tfor _, cut := range env.Containers {", + "\t\terr := cut.SetPreflightResults(preflightImageCache, env)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed running Preflight on image %q, err: %v\", cut.Image, err)", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed running Preflight container tests for %d containers\", len(env.Containers))", + "", + "\t// Handle Container-based preflight tests", + "\t// Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables.", + "\tfor testName, testEntry := range getUniqueTestEntriesFromContainerResults(env.Containers) {", + "\t\tlog.Info(\"Setting Preflight container test results for %q\", testName)", + "\t\tgeneratePreflightContainerCnfCertTest(checksGroup, testName, testEntry.Description, testEntry.Remediation, env.Containers)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func generatePreflightContainerCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, containers []*provider.Container) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "\t\t\tfor _, cut := range containers {", + "\t\t\t\tfor _, r := range cut.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Container %q has passed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has failed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has errored Preflight test %q, err: %v\", cut, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Container has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "name": "generatePreflightOperatorCnfCertTest", + "qualifiedName": "generatePreflightOperatorCnfCertTest", + "exported": false, + "signature": "func(*checksdb.ChecksGroup, string, string, string, []*provider.Operator)()", + "doc": "generatePreflightOperatorCnfCertTest Creates a test case that aggregates preflight results across operators\n\nThe function registers a new test in the catalog, then builds a check that\niterates over all operators to collect passed, failed, or errored preflight\noutcomes for a given test name. It constructs report objects for each\noperator and sets the overall result accordingly. The check is skipped if no\noperators are present.", + "position": "/Users/deliedit/dev/certsuite/tests/preflight/suite.go:219", + "calls": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "AddCatalogEntry", + "kind": "function", + "source": [ + "func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) {", + "\t// Default Values (if missing)", + "\tif strings.TrimSpace(exception) == \"\" {", + "\t\texception = NoDocumentedProcess", + "\t}", + "\tif strings.TrimSpace(reference) == \"\" {", + "\t\treference = \"No Reference Document Specified\"", + "\t}", + "\tif len(tags) == 0 {", + "\t\ttags = append(tags, TagCommon)", + "\t}", + "", + "\ttcDescription, aID := claim.BuildTestCaseDescription(testID, suiteName, description, remediation, exception, reference, qe, categoryclassification, tags...)", + "\tCatalog[aID] = tcDescription", + "\tClassification[aID.Id] = categoryclassification", + "", + "\treturn aID", + "}" + ] + }, + { + "name": "Add", + "kind": "function" + }, + { + "name": "WithCheckFn", + "kind": "function" + }, + { + "name": "WithSkipCheckFn", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "NewCheck", + "kind": "function", + "source": [ + "func NewCheck(id string, labels []string) *Check {", + "\tcheck := \u0026Check{", + "\t\tID: id,", + "\t\tLabels: labels,", + "\t\tResult: CheckResultPassed,", + "\t\tlogArchive: \u0026strings.Builder{},", + "\t}", + "", + "\tcheck.logger = log.GetMultiLogger(check.logArchive, cli.CliCheckLogSniffer).With(\"check\", check.ID)", + "", + "\treturn check", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "name": "GetTestIDAndLabels", + "kind": "function", + "source": [ + "func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) {", + "\ttags = strings.Split(identifier.Tags, \",\")", + "\ttags = append(tags, identifier.Id, identifier.Suite)", + "\tTestIDToClaimID[identifier.Id] = identifier", + "\treturn identifier.Id, tags", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "GetNoOperatorsSkipFn", + "kind": "function", + "source": [ + "func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) {", + "\treturn func() (bool, string) {", + "\t\tif len(env.Operators) == 0 {", + "\t\t\treturn true, \"no operators found\"", + "\t\t}", + "\t\treturn false, \"\"", + "\t}", + "}" + ] + }, + { + "name": "LogInfo", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "name": "LogError", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper", + "name": "NewOperatorReportObject", + "kind": "function", + "source": [ + "func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) {", + "\tout = NewReportObject(aReason, OperatorType, isCompliant)", + "\tout.AddField(Namespace, aNamespace)", + "\tout.AddField(Name, aOperatorName)", + "\treturn out", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "name": "SetResult", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "testPreflightOperators", + "kind": "function", + "source": [ + "func testPreflightOperators(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) {", + "\t// Loop through all of the operators, run preflight, and set their results into their respective object", + "\tfor _, op := range env.Operators {", + "\t\t// Note: We are not using a cache here for the operator bundle images because", + "\t\t// in-general you are only going to have an operator installed once in a cluster.", + "\t\terr := op.SetPreflightResults(env)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed running Preflight on operator %q, err: %v\", op.Name, err)", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed running Preflight operator tests for %d operators\", len(env.Operators))", + "", + "\t// Handle Operator-based preflight tests", + "\t// Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables.", + "\tfor testName, testEntry := range getUniqueTestEntriesFromOperatorResults(env.Operators) {", + "\t\tlog.Info(\"Setting Preflight operator test results for %q\", testName)", + "\t\tgeneratePreflightOperatorCnfCertTest(checksGroup, testName, testEntry.Description, testEntry.Remediation, env.Operators)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func generatePreflightOperatorCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, operators []*provider.Operator) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t\t\tfor _, op := range operators {", + "\t\t\t\tfor _, r := range op.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Operator %q has passed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has failed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has errored Preflight test %q, err: %v\", op, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, fmt.Sprintf(\"Operator has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + }, + { + "name": "getUniqueTestEntriesFromContainerResults", + "qualifiedName": "getUniqueTestEntriesFromContainerResults", + "exported": false, + "signature": "func([]*provider.Container)(map[string]provider.PreflightTest)", + "doc": "getUniqueTestEntriesFromContainerResults Collects unique preflight test results from multiple containers\n\nThis function iterates over a slice of container objects, extracting all\npassed, failed, and error preflight tests. It aggregates them into a map\nkeyed by test name, ensuring that duplicate entries are overridden with the\nmost recent result. The resulting map contains one entry per unique test\nacross all containers.", + "position": "/Users/deliedit/dev/certsuite/tests/preflight/suite.go:270", + "calls": [ + { + "name": "make", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "testPreflightContainers", + "kind": "function", + "source": [ + "func testPreflightContainers(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) {", + "\t// Using a cache to prevent unnecessary processing of images if we already have the results available", + "\tpreflightImageCache := make(map[string]provider.PreflightResultsDB)", + "", + "\t// Loop through all of the containers, run preflight, and set their results into their respective objects", + "\tfor _, cut := range env.Containers {", + "\t\terr := cut.SetPreflightResults(preflightImageCache, env)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed running Preflight on image %q, err: %v\", cut.Image, err)", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed running Preflight container tests for %d containers\", len(env.Containers))", + "", + "\t// Handle Container-based preflight tests", + "\t// Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables.", + "\tfor testName, testEntry := range getUniqueTestEntriesFromContainerResults(env.Containers) {", + "\t\tlog.Info(\"Setting Preflight container test results for %q\", testName)", + "\t\tgeneratePreflightContainerCnfCertTest(checksGroup, testName, testEntry.Description, testEntry.Remediation, env.Containers)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getUniqueTestEntriesFromContainerResults(containers []*provider.Container) map[string]provider.PreflightTest {", + "\t// If containers are sharing the same image, they should \"presumably\" have the same results returned from Preflight.", + "\ttestEntries := make(map[string]provider.PreflightTest)", + "\tfor _, cut := range containers {", + "\t\tfor _, r := range cut.PreflightResults.Passed {", + "\t\t\ttestEntries[r.Name] = r", + "\t\t}", + "\t\t// Failed Results have more information than the rest", + "\t\tfor _, r := range cut.PreflightResults.Failed {", + "\t\t\ttestEntries[r.Name] = r", + "\t\t}", + "\t\tfor _, r := range cut.PreflightResults.Errors {", + "\t\t\ttestEntries[r.Name] = r", + "\t\t}", + "\t}", + "", + "\treturn testEntries", + "}" + ] + }, + { + "name": "getUniqueTestEntriesFromOperatorResults", + "qualifiedName": "getUniqueTestEntriesFromOperatorResults", + "exported": false, + "signature": "func([]*provider.Operator)(map[string]provider.PreflightTest)", + "doc": "getUniqueTestEntriesFromOperatorResults collects unique preflight test results from all operators\n\nThe function iterates over a slice of operator objects, extracting each\npassed, failed, or errored test result. For every test name it stores the\ncorresponding test entry in a map, ensuring that only one instance per test\nname is kept even if multiple operators report the same test. The resulting\nmap associates test names with their detailed preflight test information for\nlater use.", + "position": "/Users/deliedit/dev/certsuite/tests/preflight/suite.go:297", + "calls": [ + { + "name": "make", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "testPreflightOperators", + "kind": "function", + "source": [ + "func testPreflightOperators(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) {", + "\t// Loop through all of the operators, run preflight, and set their results into their respective object", + "\tfor _, op := range env.Operators {", + "\t\t// Note: We are not using a cache here for the operator bundle images because", + "\t\t// in-general you are only going to have an operator installed once in a cluster.", + "\t\terr := op.SetPreflightResults(env)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed running Preflight on operator %q, err: %v\", op.Name, err)", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed running Preflight operator tests for %d operators\", len(env.Operators))", + "", + "\t// Handle Operator-based preflight tests", + "\t// Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables.", + "\tfor testName, testEntry := range getUniqueTestEntriesFromOperatorResults(env.Operators) {", + "\t\tlog.Info(\"Setting Preflight operator test results for %q\", testName)", + "\t\tgeneratePreflightOperatorCnfCertTest(checksGroup, testName, testEntry.Description, testEntry.Remediation, env.Operators)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func getUniqueTestEntriesFromOperatorResults(operators []*provider.Operator) map[string]provider.PreflightTest {", + "\ttestEntries := make(map[string]provider.PreflightTest)", + "\tfor _, op := range operators {", + "\t\tfor _, r := range op.PreflightResults.Passed {", + "\t\t\ttestEntries[r.Name] = r", + "\t\t}", + "\t\t// Failed Results have more information than the rest", + "\t\tfor _, r := range op.PreflightResults.Failed {", + "\t\t\ttestEntries[r.Name] = r", + "\t\t}", + "\t\tfor _, r := range op.PreflightResults.Errors {", + "\t\t\ttestEntries[r.Name] = r", + "\t\t}", + "\t}", + "\treturn testEntries", + "}" + ] + }, + { + "name": "labelsAllowTestRun", + "qualifiedName": "labelsAllowTestRun", + "exported": false, + "signature": "func(string, []string)(bool)", + "doc": "labelsAllowTestRun checks whether a test run is permitted based on labels\n\nThe function receives a string of labels and a list of allowed label\nidentifiers. It scans each allowed identifier to see if it appears within the\nprovided string, returning true upon the first match. If none of the allowed\nlabels are found, it returns false.", + "position": "/Users/deliedit/dev/certsuite/tests/preflight/suite.go:47", + "calls": [ + { + "pkgPath": "strings", + "name": "Contains", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "ShouldRun", + "kind": "function", + "source": [ + "func ShouldRun(labelsExpr string) bool {", + "\tenv = provider.GetTestEnvironment()", + "\tpreflightAllowedLabels := []string{common.PreflightTestKey, identifiers.TagPreflight}", + "", + "\tif !labelsAllowTestRun(labelsExpr, preflightAllowedLabels) {", + "\t\treturn false", + "\t}", + "", + "\t// Add safeguard against running the preflight tests if the docker config does not exist.", + "\tpreflightDockerConfigFile := configuration.GetTestParameters().PfltDockerconfig", + "\tif preflightDockerConfigFile == \"\" || preflightDockerConfigFile == \"NA\" {", + "\t\tlog.Warn(\"Skipping the preflight suite because the Docker Config file is not provided.\")", + "\t\tenv.SkipPreflight = true", + "\t}", + "", + "\treturn true", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func labelsAllowTestRun(labelFilter string, allowedLabels []string) bool {", + "\tfor _, label := range allowedLabels {", + "\t\tif strings.Contains(labelFilter, label) {", + "\t\t\treturn true", + "\t\t}", + "\t}", + "\treturn false", + "}" + ] + }, + { + "name": "testPreflightContainers", + "qualifiedName": "testPreflightContainers", + "exported": false, + "signature": "func(*checksdb.ChecksGroup, *provider.TestEnvironment)()", + "doc": "testPreflightContainers runs Preflight checks on all containers in the test environment\n\nThe function iterates over each container, executing Preflight diagnostics\nwhile caching results per image to avoid duplicate work. It logs any errors\nencountered during execution and records completion of tests for the entire\nset. After processing, it aggregates unique test entries from container\nresults and generates corresponding checks in the provided group.", + "position": "/Users/deliedit/dev/certsuite/tests/preflight/suite.go:140", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "SetPreflightResults", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "getUniqueTestEntriesFromContainerResults", + "kind": "function", + "source": [ + "func getUniqueTestEntriesFromContainerResults(containers []*provider.Container) map[string]provider.PreflightTest {", + "\t// If containers are sharing the same image, they should \"presumably\" have the same results returned from Preflight.", + "\ttestEntries := make(map[string]provider.PreflightTest)", + "\tfor _, cut := range containers {", + "\t\tfor _, r := range cut.PreflightResults.Passed {", + "\t\t\ttestEntries[r.Name] = r", + "\t\t}", + "\t\t// Failed Results have more information than the rest", + "\t\tfor _, r := range cut.PreflightResults.Failed {", + "\t\t\ttestEntries[r.Name] = r", + "\t\t}", + "\t\tfor _, r := range cut.PreflightResults.Errors {", + "\t\t\ttestEntries[r.Name] = r", + "\t\t}", + "\t}", + "", + "\treturn testEntries", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "generatePreflightContainerCnfCertTest", + "kind": "function", + "source": [ + "func generatePreflightContainerCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, containers []*provider.Container) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoContainersUnderTestSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "\t\t\tfor _, cut := range containers {", + "\t\t\t\tfor _, r := range cut.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Container %q has passed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has failed Preflight test %q\", cut, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, \"Container has failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range cut.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Container %q has errored Preflight test %q, err: %v\", cut, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewContainerReportObject(cut.Namespace, cut.Podname, cut.Name, fmt.Sprintf(\"Container has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Running %s suite checks\", common.PreflightTestKey)", + "", + "\t// As the preflight lib's checks need to run here, we need to get the test environment now.", + "\tenv = provider.GetTestEnvironment()", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PreflightTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\ttestPreflightContainers(checksGroup, \u0026env)", + "\tif provider.IsOCPCluster() {", + "\t\tlog.Info(\"OCP cluster detected, allowing Preflight operator tests to run\")", + "\t\ttestPreflightOperators(checksGroup, \u0026env)", + "\t} else {", + "\t\tlog.Info(\"Skipping the Preflight operators test because it requires an OCP cluster to run against\")", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPreflightContainers(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) {", + "\t// Using a cache to prevent unnecessary processing of images if we already have the results available", + "\tpreflightImageCache := make(map[string]provider.PreflightResultsDB)", + "", + "\t// Loop through all of the containers, run preflight, and set their results into their respective objects", + "\tfor _, cut := range env.Containers {", + "\t\terr := cut.SetPreflightResults(preflightImageCache, env)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed running Preflight on image %q, err: %v\", cut.Image, err)", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed running Preflight container tests for %d containers\", len(env.Containers))", + "", + "\t// Handle Container-based preflight tests", + "\t// Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables.", + "\tfor testName, testEntry := range getUniqueTestEntriesFromContainerResults(env.Containers) {", + "\t\tlog.Info(\"Setting Preflight container test results for %q\", testName)", + "\t\tgeneratePreflightContainerCnfCertTest(checksGroup, testName, testEntry.Description, testEntry.Remediation, env.Containers)", + "\t}", + "}" + ] + }, + { + "name": "testPreflightOperators", + "qualifiedName": "testPreflightOperators", + "exported": false, + "signature": "func(*checksdb.ChecksGroup, *provider.TestEnvironment)()", + "doc": "testPreflightOperators Runs preflight checks on all operators and records their outcomes\n\nThis function iterates over each operator in the test environment, executing\nits preflight tests and capturing any errors. After collecting results, it\nlogs completion of operator testing. Finally, it creates catalog entries for\nevery unique preflight test found across operators, adding these checks to\nthe provided group so they can be reported.", + "position": "/Users/deliedit/dev/certsuite/tests/preflight/suite.go:112", + "calls": [ + { + "name": "SetPreflightResults", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "getUniqueTestEntriesFromOperatorResults", + "kind": "function", + "source": [ + "func getUniqueTestEntriesFromOperatorResults(operators []*provider.Operator) map[string]provider.PreflightTest {", + "\ttestEntries := make(map[string]provider.PreflightTest)", + "\tfor _, op := range operators {", + "\t\tfor _, r := range op.PreflightResults.Passed {", + "\t\t\ttestEntries[r.Name] = r", + "\t\t}", + "\t\t// Failed Results have more information than the rest", + "\t\tfor _, r := range op.PreflightResults.Failed {", + "\t\t\ttestEntries[r.Name] = r", + "\t\t}", + "\t\tfor _, r := range op.PreflightResults.Errors {", + "\t\t\ttestEntries[r.Name] = r", + "\t\t}", + "\t}", + "\treturn testEntries", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "generatePreflightOperatorCnfCertTest", + "kind": "function", + "source": [ + "func generatePreflightOperatorCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, operators []*provider.Operator) {", + "\t// Based on a single test \"name\", we will be passing/failing in our test framework.", + "\t// Brute force-ish type of method.", + "", + "\t// Store the test names into the Catalog map for results to be dynamically printed", + "\taID := identifiers.AddCatalogEntry(testName, common.PreflightTestKey, description, remediation, \"\", \"\", false, map[string]string{", + "\t\tidentifiers.FarEdge: identifiers.Optional,", + "\t\tidentifiers.Telco: identifiers.Optional,", + "\t\tidentifiers.NonTelco: identifiers.Optional,", + "\t\tidentifiers.Extended: identifiers.Optional,", + "\t}, identifiers.TagPreflight)", + "", + "\tchecksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(aID)).", + "\t\tWithSkipCheckFn(testhelper.GetNoOperatorsSkipFn(\u0026env)).", + "\t\tWithCheckFn(func(check *checksdb.Check) error {", + "\t\t\tvar compliantObjects []*testhelper.ReportObject", + "\t\t\tvar nonCompliantObjects []*testhelper.ReportObject", + "", + "\t\t\tfor _, op := range operators {", + "\t\t\t\tfor _, r := range op.PreflightResults.Passed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogInfo(\"Operator %q has passed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tcompliantObjects = append(compliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator passed preflight test \"+testName, true))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Failed {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has failed Preflight test %q\", op, testName)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, \"Operator failed preflight test \"+testName, false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t\tfor _, r := range op.PreflightResults.Errors {", + "\t\t\t\t\tif r.Name == testName {", + "\t\t\t\t\t\tcheck.LogError(\"Operator %q has errored Preflight test %q, err: %v\", op, testName, r.Error)", + "\t\t\t\t\t\tnonCompliantObjects = append(nonCompliantObjects, testhelper.NewOperatorReportObject(op.Namespace, op.Name, fmt.Sprintf(\"Operator has errored preflight test %s, err: %v\", testName, r.Error), false))", + "\t\t\t\t\t}", + "\t\t\t\t}", + "\t\t\t}", + "", + "\t\t\tcheck.SetResult(compliantObjects, nonCompliantObjects)", + "\t\t\treturn nil", + "\t\t}))", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight", + "name": "LoadChecks", + "kind": "function", + "source": [ + "func LoadChecks() {", + "\tlog.Debug(\"Running %s suite checks\", common.PreflightTestKey)", + "", + "\t// As the preflight lib's checks need to run here, we need to get the test environment now.", + "\tenv = provider.GetTestEnvironment()", + "", + "\tchecksGroup := checksdb.NewChecksGroup(common.PreflightTestKey).", + "\t\tWithBeforeEachFn(beforeEachFn)", + "", + "\ttestPreflightContainers(checksGroup, \u0026env)", + "\tif provider.IsOCPCluster() {", + "\t\tlog.Info(\"OCP cluster detected, allowing Preflight operator tests to run\")", + "\t\ttestPreflightOperators(checksGroup, \u0026env)", + "\t} else {", + "\t\tlog.Info(\"Skipping the Preflight operators test because it requires an OCP cluster to run against\")", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func testPreflightOperators(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) {", + "\t// Loop through all of the operators, run preflight, and set their results into their respective object", + "\tfor _, op := range env.Operators {", + "\t\t// Note: We are not using a cache here for the operator bundle images because", + "\t\t// in-general you are only going to have an operator installed once in a cluster.", + "\t\terr := op.SetPreflightResults(env)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed running Preflight on operator %q, err: %v\", op.Name, err)", + "\t\t}", + "\t}", + "", + "\tlog.Info(\"Completed running Preflight operator tests for %d operators\", len(env.Operators))", + "", + "\t// Handle Operator-based preflight tests", + "\t// Note: We only care about the `testEntry` variable below because we need its 'Description' and 'Suggestion' variables.", + "\tfor testName, testEntry := range getUniqueTestEntriesFromOperatorResults(env.Operators) {", + "\t\tlog.Info(\"Setting Preflight operator test results for %q\", testName)", + "\t\tgeneratePreflightOperatorCnfCertTest(checksGroup, testName, testEntry.Description, testEntry.Remediation, env.Operators)", + "\t}", + "}" + ] + } + ], + "globals": [ + { + "name": "beforeEachFn", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/tests/preflight/suite.go:35" + }, + { + "name": "env", + "exported": false, + "type": "provider.TestEnvironment", + "position": "/Users/deliedit/dev/certsuite/tests/preflight/suite.go:33" + } + ], + "consts": null + }, + { + "path": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "webserver", + "files": 1, + "imports": [ + "bufio", + "bytes", + "context", + "embed", + "encoding/json", + "fmt", + "github.com/gorilla/websocket", + "github.com/redhat-best-practices-for-k8s/certsuite-claim/pkg/claim", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration", + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers", + "github.com/robert-nix/ansihtml", + "gopkg.in/yaml.v3", + "io", + "io/fs", + "net", + "net/http", + "os", + "sort", + "strings", + "time" + ], + "structs": [ + { + "name": "Entry", + "exported": true, + "doc": "Entry Represents a test case entry in the printable catalog\n\nEach instance holds the name of a test and its identifying information,\nincluding URL and version details. The struct is used to build a mapping from\nsuite names to collections of tests when generating a printable catalog.", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:571", + "fields": { + "identifier": "claim.Identifier", + "testName": "string" + }, + "methodNames": null, + "source": [ + "type Entry struct {", + "\ttestName string", + "\tidentifier claim.Identifier // {url and version}", + "}" + ] + }, + { + "name": "RequestedData", + "exported": true, + "doc": "RequestedData Holds user‑supplied configuration options for updating a test framework\n\nThis structure aggregates all settings that can be specified in the UI or\ncommand line, such as namespaces, labels, deployment names, and API\ncredentials. Each field is a slice of strings to allow multiple values, with\noptional fields omitted from JSON if empty. The data is consumed by updateTnf\nto rebuild the YAML configuration for the test environment.", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:111", + "fields": { + "AcceptedKernelTaints": "[]string", + "CollectorAppEndPoint": "[]string", + "CollectorAppPassword": "[]string", + "ConnectAPIBaseURL": "[]string", + "ConnectAPIKey": "[]string", + "ConnectAPIProxyPort": "[]string", + "ConnectAPIProxyURL": "[]string", + "ConnectProjectID": "[]string", + "ExecutedBy": "[]string", + "ManagedDeployments": "[]string", + "ManagedStatefulsets": "[]string", + "OperatorsUnderTestLabels": "[]string", + "PartnerName": "[]string", + "PodsUnderTestLabels": "[]string", + "ProbeDaemonSetNamespace": "[]string", + "SelectedOptions": "[]string", + "Servicesignorelist": "[]string", + "SkipHelmChartList": "[]string", + "SkipScalingTestDeploymentsname": "[]string", + "SkipScalingTestDeploymentsnamespace": "[]string", + "SkipScalingTestStatefulsetsname": "[]string", + "SkipScalingTestStatefulsetsnamespace": "[]string", + "TargetCrdFiltersnameSuffix": "[]string", + "TargetCrdFiltersscalable": "[]string", + "TargetNameSpaces": "[]string", + "ValidProtocolNames": "[]string" + }, + "methodNames": null, + "source": [ + "type RequestedData struct {", + "\tSelectedOptions []string `json:\"selectedOptions\"`", + "\tTargetNameSpaces []string `json:\"targetNameSpaces\"`", + "\tPodsUnderTestLabels []string `json:\"podsUnderTestLabels\"`", + "\tOperatorsUnderTestLabels []string `json:\"operatorsUnderTestLabels\"`", + "\tManagedDeployments []string `json:\"managedDeployments\"`", + "\tManagedStatefulsets []string `json:\"managedStatefulsets\"`", + "\tSkipScalingTestDeploymentsnamespace []string `json:\"skipScalingTestDeploymentsnamespace\"`", + "\tSkipScalingTestDeploymentsname []string `json:\"skipScalingTestDeploymentsname\"`", + "\tSkipScalingTestStatefulsetsnamespace []string `json:\"skipScalingTestStatefulsetsnamespace\"`", + "\tSkipScalingTestStatefulsetsname []string `json:\"skipScalingTestStatefulsetsname\"`", + "\tTargetCrdFiltersnameSuffix []string `json:\"targetCrdFiltersnameSuffix\"`", + "\tTargetCrdFiltersscalable []string `json:\"targetCrdFiltersscalable\"`", + "\tAcceptedKernelTaints []string `json:\"acceptedKernelTaints\"`", + "\tSkipHelmChartList []string `json:\"skipHelmChartList\"`", + "\tServicesignorelist []string `json:\"servicesignorelist\"`", + "\tValidProtocolNames []string `json:\"ValidProtocolNames\"`", + "\tProbeDaemonSetNamespace []string `json:\"ProbeDaemonSetNamespace\"`", + "\tCollectorAppEndPoint []string `json:\"CollectorAppEndPoint\"`", + "\tExecutedBy []string `json:\"executedBy\"`", + "\tCollectorAppPassword []string `json:\"CollectorAppPassword\"`", + "\tPartnerName []string `json:\"PartnerName\"`", + "\tConnectAPIKey []string `json:\"key,omitempty\"`", + "\tConnectProjectID []string `json:\"projectID,omitempty\"`", + "\tConnectAPIBaseURL []string `json:\"baseURL,omitempty\"`", + "\tConnectAPIProxyURL []string `json:\"proxyURL,omitempty\"`", + "\tConnectAPIProxyPort []string `json:\"proxyPort,omitempty\"`", + "}" + ] + }, + { + "name": "ResponseData", + "exported": true, + "doc": "ResponseData Holds a response message\n\nThis struct contains a single field that stores a text message to be returned\nin HTTP responses. The JSON tag ensures the field is serialized with the key\n\"message\" when the struct is encoded to JSON.", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:145", + "fields": { + "Message": "string" + }, + "methodNames": null, + "source": [ + "type ResponseData struct {", + "\tMessage string `json:\"message\"`", + "}" + ] + } + ], + "interfaces": null, + "functions": [ + { + "name": "CreatePrintableCatalogFromIdentifiers", + "qualifiedName": "CreatePrintableCatalogFromIdentifiers", + "exported": true, + "signature": "func([]claim.Identifier)(map[string][]Entry)", + "doc": "CreatePrintableCatalogFromIdentifiers Organizes identifiers into a map keyed by suite names\n\nThe function receives a slice of identifier objects and constructs a mapping\nfrom each identifier's suite to a list of entries containing the test name\nand the full identifier. It initializes an empty map, iterates over the input\nslice, appends a new entry for each identifier, and returns the populated\nmap. If no identifiers are provided, it simply returns an empty map.", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:583", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "outputTestCases", + "kind": "function", + "source": [ + "func outputTestCases() (outString string) {", + "\t// Building a separate data structure to store the key order for the map", + "\tkeys := make([]claim.Identifier, 0, len(identifiers.Catalog))", + "\tfor k := range identifiers.Catalog {", + "\t\tkeys = append(keys, k)", + "\t}", + "", + "\t// Sorting the map by identifier ID", + "\tsort.Slice(keys, func(i, j int) bool {", + "\t\treturn keys[i].Id \u003c keys[j].Id", + "\t})", + "", + "\tcatalog := CreatePrintableCatalogFromIdentifiers(keys)", + "\tif catalog == nil {", + "\t\treturn", + "\t}", + "\t// we need the list of suite's names", + "\tsuites := GetSuitesFromIdentifiers(keys)", + "", + "\t// Sort the list of suite names", + "\tsort.Strings(suites)", + "", + "\t// Iterating the map by test and suite names", + "\toutString = \"classification= {\\n\"", + "\tfor _, suite := range suites {", + "\t\tfor _, k := range catalog[suite] {", + "\t\t\tclassificationString := \"\\\"categoryClassification\\\": \"", + "\t\t\t// Every paragraph starts with a new line.", + "", + "\t\t\toutString += fmt.Sprintf(\"%q: [\\n{\\n\", k.identifier.Id)", + "\t\t\toutString += fmt.Sprintf(\"\\\"description\\\": %q,\\n\", strings.ReplaceAll(strings.ReplaceAll(identifiers.Catalog[k.identifier].Description, \"\\n\", \" \"), \"\\\"\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"\\\"remediation\\\": %q,\\n\", strings.ReplaceAll(strings.ReplaceAll(identifiers.Catalog[k.identifier].Remediation, \"\\n\", \" \"), \"\\\"\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"\\\"bestPracticeReference\\\": %q,\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].BestPracticeReference, \"\\n\", \" \"))", + "\t\t\toutString += classificationString + toJSONString(identifiers.Catalog[k.identifier].CategoryClassification) + \",\\n}\\n]\\n,\"", + "\t\t}", + "\t}", + "\toutString += \"}\"", + "\treturn outString", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func CreatePrintableCatalogFromIdentifiers(keys []claim.Identifier) map[string][]Entry {", + "\tcatalog := make(map[string][]Entry)", + "\t// we need the list of suite's names", + "\tfor _, i := range keys {", + "\t\tcatalog[i.Suite] = append(catalog[i.Suite], Entry{", + "\t\t\ttestName: i.Id,", + "\t\t\tidentifier: i,", + "\t\t})", + "\t}", + "\treturn catalog", + "}" + ] + }, + { + "name": "GetSuitesFromIdentifiers", + "qualifiedName": "GetSuitesFromIdentifiers", + "exported": true, + "signature": "func([]claim.Identifier)([]string)", + "doc": "GetSuitesFromIdentifiers Retrieves unique suite names from a list of identifiers\n\nThe function iterates over each identifier, collects its suite field into a\nslice, then removes duplicates using a helper that returns only distinct\nvalues. It returns a string slice containing the unique suite names present\nin the input.", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:558", + "calls": [ + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper", + "name": "Unique", + "kind": "function", + "source": [ + "func Unique(slice []string) []string {", + "\t// create a map with all the values as key", + "\tuniqMap := make(map[string]struct{})", + "\tfor _, v := range slice {", + "\t\tuniqMap[v] = struct{}{}", + "\t}", + "", + "\t// turn the map keys into a slice", + "\tuniqSlice := make([]string, 0, len(uniqMap))", + "\tfor v := range uniqMap {", + "\t\tuniqSlice = append(uniqSlice, v)", + "\t}", + "\treturn uniqSlice", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "outputTestCases", + "kind": "function", + "source": [ + "func outputTestCases() (outString string) {", + "\t// Building a separate data structure to store the key order for the map", + "\tkeys := make([]claim.Identifier, 0, len(identifiers.Catalog))", + "\tfor k := range identifiers.Catalog {", + "\t\tkeys = append(keys, k)", + "\t}", + "", + "\t// Sorting the map by identifier ID", + "\tsort.Slice(keys, func(i, j int) bool {", + "\t\treturn keys[i].Id \u003c keys[j].Id", + "\t})", + "", + "\tcatalog := CreatePrintableCatalogFromIdentifiers(keys)", + "\tif catalog == nil {", + "\t\treturn", + "\t}", + "\t// we need the list of suite's names", + "\tsuites := GetSuitesFromIdentifiers(keys)", + "", + "\t// Sort the list of suite names", + "\tsort.Strings(suites)", + "", + "\t// Iterating the map by test and suite names", + "\toutString = \"classification= {\\n\"", + "\tfor _, suite := range suites {", + "\t\tfor _, k := range catalog[suite] {", + "\t\t\tclassificationString := \"\\\"categoryClassification\\\": \"", + "\t\t\t// Every paragraph starts with a new line.", + "", + "\t\t\toutString += fmt.Sprintf(\"%q: [\\n{\\n\", k.identifier.Id)", + "\t\t\toutString += fmt.Sprintf(\"\\\"description\\\": %q,\\n\", strings.ReplaceAll(strings.ReplaceAll(identifiers.Catalog[k.identifier].Description, \"\\n\", \" \"), \"\\\"\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"\\\"remediation\\\": %q,\\n\", strings.ReplaceAll(strings.ReplaceAll(identifiers.Catalog[k.identifier].Remediation, \"\\n\", \" \"), \"\\\"\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"\\\"bestPracticeReference\\\": %q,\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].BestPracticeReference, \"\\n\", \" \"))", + "\t\t\toutString += classificationString + toJSONString(identifiers.Catalog[k.identifier].CategoryClassification) + \",\\n}\\n]\\n,\"", + "\t\t}", + "\t}", + "\toutString += \"}\"", + "\treturn outString", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func GetSuitesFromIdentifiers(keys []claim.Identifier) []string {", + "\tvar suites []string", + "\tfor _, i := range keys {", + "\t\tsuites = append(suites, i.Suite)", + "\t}", + "\treturn arrayhelper.Unique(suites)", + "}" + ] + }, + { + "name": "StartServer", + "qualifiedName": "StartServer", + "exported": true, + "signature": "func(string)()", + "doc": "StartServer Starts an HTTP server that serves test results and static assets\n\nThe function creates a server listening on port 8084, attaches context with\nthe output folder path, registers handlers for static files and runFunction,\nthen begins serving requests. It logs the server address and panics if\nListenAndServe returns an error. The server provides endpoints for HTML,\nJavaScript, and log streaming used by the web interface.", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:235", + "calls": [ + { + "pkgPath": "context", + "name": "TODO", + "kind": "function" + }, + { + "pkgPath": "context", + "name": "WithValue", + "kind": "function" + }, + { + "name": "installReqHandlers", + "kind": "function", + "source": [ + "func installReqHandlers() {", + "\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"text/html\".", + "\t\tw.Header().Set(\"Content-Type\", \"text/html\")", + "\t\t// Write the embedded HTML content to the response.", + "\t\t_, err := w.Write(indexHTML)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\thttp.HandleFunc(\"/submit.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write(submit)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\thttp.HandleFunc(\"/logs.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write(logs)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\thttp.HandleFunc(\"/toast.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write(toast)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\thttp.HandleFunc(\"/index.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write(index)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "\thttp.HandleFunc(\"/classification.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\tclassification := outputTestCases()", + "", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write([]byte(classification))", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\t// Serve the static HTML file", + "\thttp.HandleFunc(\"/logstream\", logStreamHandler)", + "}" + ] + }, + { + "pkgPath": "net/http", + "name": "HandleFunc", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "ListenAndServe", + "kind": "function" + }, + { + "name": "panic", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run", + "name": "runTestSuite", + "kind": "function", + "source": [ + "func runTestSuite(cmd *cobra.Command, _ []string) error {", + "\terr := initTestParamsFromFlags(cmd)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to initialize the test parameters, err: %v\", err)", + "\t}", + "", + "\ttestParams := configuration.GetTestParameters()", + "\tif testParams.ServerMode {", + "\t\tlog.Info(\"Running Certification Suite in web server mode\")", + "\t\twebserver.StartServer(testParams.OutputDir)", + "\t} else {", + "\t\tcertsuite.Startup()", + "\t\tdefer certsuite.Shutdown()", + "\t\tlog.Info(\"Running Certification Suite in stand-alone mode\")", + "\t\terr := certsuite.Run(testParams.LabelsFilter, testParams.OutputDir)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to run Certification Suite: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func StartServer(outputFolder string) {", + "\tctx := context.TODO()", + "\tserver := \u0026http.Server{", + "\t\tAddr: \":8084\", // Server address", + "\t\tReadTimeout: readTimeoutSeconds * time.Second, // Maximum duration for reading the entire request", + "\t\tBaseContext: func(l net.Listener) context.Context {", + "\t\t\tctx = context.WithValue(ctx, outputFolderCtxKey, outputFolder)", + "\t\t\treturn ctx", + "\t\t},", + "\t}", + "", + "\tinstallReqHandlers()", + "", + "\thttp.HandleFunc(\"/runFunction\", runHandler)", + "", + "\tlog.Info(\"Server is running on :8084...\")", + "\tif err := server.ListenAndServe(); err != nil {", + "\t\tpanic(err)", + "\t}", + "}" + ] + }, + { + "name": "installReqHandlers", + "qualifiedName": "installReqHandlers", + "exported": false, + "signature": "func()()", + "doc": "installReqHandlers Registers HTTP routes for static content and classification data\n\nThis function sets up several URL handlers that serve embedded HTML,\nJavaScript, and classification information. Each handler writes the\nappropriate content type header before sending the precompiled bytes or\ngenerated JSON string. Errors during writing result in a 500 response to the\nclient.", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:156", + "calls": [ + { + "pkgPath": "net/http", + "name": "HandleFunc", + "kind": "function" + }, + { + "name": "Set", + "kind": "function" + }, + { + "name": "Header", + "kind": "function" + }, + { + "name": "Write", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "HandleFunc", + "kind": "function" + }, + { + "name": "Set", + "kind": "function" + }, + { + "name": "Header", + "kind": "function" + }, + { + "name": "Write", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "HandleFunc", + "kind": "function" + }, + { + "name": "Set", + "kind": "function" + }, + { + "name": "Header", + "kind": "function" + }, + { + "name": "Write", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "HandleFunc", + "kind": "function" + }, + { + "name": "Set", + "kind": "function" + }, + { + "name": "Header", + "kind": "function" + }, + { + "name": "Write", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "HandleFunc", + "kind": "function" + }, + { + "name": "Set", + "kind": "function" + }, + { + "name": "Header", + "kind": "function" + }, + { + "name": "Write", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "HandleFunc", + "kind": "function" + }, + { + "name": "outputTestCases", + "kind": "function", + "source": [ + "func outputTestCases() (outString string) {", + "\t// Building a separate data structure to store the key order for the map", + "\tkeys := make([]claim.Identifier, 0, len(identifiers.Catalog))", + "\tfor k := range identifiers.Catalog {", + "\t\tkeys = append(keys, k)", + "\t}", + "", + "\t// Sorting the map by identifier ID", + "\tsort.Slice(keys, func(i, j int) bool {", + "\t\treturn keys[i].Id \u003c keys[j].Id", + "\t})", + "", + "\tcatalog := CreatePrintableCatalogFromIdentifiers(keys)", + "\tif catalog == nil {", + "\t\treturn", + "\t}", + "\t// we need the list of suite's names", + "\tsuites := GetSuitesFromIdentifiers(keys)", + "", + "\t// Sort the list of suite names", + "\tsort.Strings(suites)", + "", + "\t// Iterating the map by test and suite names", + "\toutString = \"classification= {\\n\"", + "\tfor _, suite := range suites {", + "\t\tfor _, k := range catalog[suite] {", + "\t\t\tclassificationString := \"\\\"categoryClassification\\\": \"", + "\t\t\t// Every paragraph starts with a new line.", + "", + "\t\t\toutString += fmt.Sprintf(\"%q: [\\n{\\n\", k.identifier.Id)", + "\t\t\toutString += fmt.Sprintf(\"\\\"description\\\": %q,\\n\", strings.ReplaceAll(strings.ReplaceAll(identifiers.Catalog[k.identifier].Description, \"\\n\", \" \"), \"\\\"\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"\\\"remediation\\\": %q,\\n\", strings.ReplaceAll(strings.ReplaceAll(identifiers.Catalog[k.identifier].Remediation, \"\\n\", \" \"), \"\\\"\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"\\\"bestPracticeReference\\\": %q,\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].BestPracticeReference, \"\\n\", \" \"))", + "\t\t\toutString += classificationString + toJSONString(identifiers.Catalog[k.identifier].CategoryClassification) + \",\\n}\\n]\\n,\"", + "\t\t}", + "\t}", + "\toutString += \"}\"", + "\treturn outString", + "}" + ] + }, + { + "name": "Set", + "kind": "function" + }, + { + "name": "Header", + "kind": "function" + }, + { + "name": "Write", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "HandleFunc", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "StartServer", + "kind": "function", + "source": [ + "func StartServer(outputFolder string) {", + "\tctx := context.TODO()", + "\tserver := \u0026http.Server{", + "\t\tAddr: \":8084\", // Server address", + "\t\tReadTimeout: readTimeoutSeconds * time.Second, // Maximum duration for reading the entire request", + "\t\tBaseContext: func(l net.Listener) context.Context {", + "\t\t\tctx = context.WithValue(ctx, outputFolderCtxKey, outputFolder)", + "\t\t\treturn ctx", + "\t\t},", + "\t}", + "", + "\tinstallReqHandlers()", + "", + "\thttp.HandleFunc(\"/runFunction\", runHandler)", + "", + "\tlog.Info(\"Server is running on :8084...\")", + "\tif err := server.ListenAndServe(); err != nil {", + "\t\tpanic(err)", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func installReqHandlers() {", + "\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"text/html\".", + "\t\tw.Header().Set(\"Content-Type\", \"text/html\")", + "\t\t// Write the embedded HTML content to the response.", + "\t\t_, err := w.Write(indexHTML)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\thttp.HandleFunc(\"/submit.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write(submit)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\thttp.HandleFunc(\"/logs.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write(logs)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\thttp.HandleFunc(\"/toast.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write(toast)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\thttp.HandleFunc(\"/index.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write(index)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "\thttp.HandleFunc(\"/classification.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\tclassification := outputTestCases()", + "", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write([]byte(classification))", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\t// Serve the static HTML file", + "\thttp.HandleFunc(\"/logstream\", logStreamHandler)", + "}" + ] + }, + { + "name": "logStreamHandler", + "qualifiedName": "logStreamHandler", + "exported": false, + "signature": "func(http.ResponseWriter, *http.Request)()", + "doc": "logStreamHandler Streams log output to a WebSocket client\n\nWhen called, the function upgrades an HTTP request to a WebSocket connection.\nIt then continuously reads lines from a log source, converts each line to\nHTML-safe format, appends a line break, and sends it over the socket. The\nloop sleeps briefly between messages and logs any errors that occur during\nreading or transmission.", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:75", + "calls": [ + { + "name": "Upgrade", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Close", + "kind": "function" + }, + { + "pkgPath": "bufio", + "name": "NewScanner", + "kind": "function" + }, + { + "name": "Scan", + "kind": "function" + }, + { + "name": "Bytes", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "github.com/robert-nix/ansihtml", + "name": "ConvertToHTML", + "kind": "function" + }, + { + "name": "WriteMessage", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "pkgPath": "time", + "name": "Sleep", + "kind": "function" + }, + { + "name": "Err", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func logStreamHandler(w http.ResponseWriter, r *http.Request) {", + "\tconn, err := upgrader.Upgrade(w, r, nil)", + "\tif err != nil {", + "\t\tlog.Info(\"WebSocket upgrade error: %v\", err)", + "\t\treturn", + "\t}", + "\tdefer conn.Close()", + "\t// Create a scanner to read the log file line by line", + "\tfor {", + "\t\tscanner := bufio.NewScanner(buf)", + "\t\tfor scanner.Scan() {", + "\t\t\tline := scanner.Bytes()", + "\t\t\tfmt.Println(string(line))", + "\t\t\tline = append(ansihtml.ConvertToHTML(line), []byte(\"\u003cbr\u003e\")...)", + "", + "\t\t\t// Send each log line to the client", + "\t\t\tif err := conn.WriteMessage(websocket.TextMessage, line); err != nil {", + "\t\t\t\tfmt.Println(err)", + "\t\t\t\treturn", + "\t\t\t}", + "\t\t\ttime.Sleep(logTimeout)", + "\t\t}", + "\t\tif err := scanner.Err(); err != nil {", + "\t\t\tlog.Info(\"Error reading log file: %v\", err)", + "\t\t\treturn", + "\t\t}", + "\t}", + "}" + ] + }, + { + "name": "outputTestCases", + "qualifiedName": "outputTestCases", + "exported": false, + "signature": "func()(string)", + "doc": "outputTestCases Creates a Markdown-formatted classification list for test cases\n\nThe function collects all identifiers from the catalog, sorts them by ID,\ngroups them by suite name, and then builds a string containing each test’s\ndescription, remediation, best practice reference, and category\nclassification in JSON-like format. The resulting string is returned for use\nas a JavaScript variable in the web UI.", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:497", + "calls": [ + { + "name": "make", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "pkgPath": "sort", + "name": "Slice", + "kind": "function" + }, + { + "name": "CreatePrintableCatalogFromIdentifiers", + "kind": "function", + "source": [ + "func CreatePrintableCatalogFromIdentifiers(keys []claim.Identifier) map[string][]Entry {", + "\tcatalog := make(map[string][]Entry)", + "\t// we need the list of suite's names", + "\tfor _, i := range keys {", + "\t\tcatalog[i.Suite] = append(catalog[i.Suite], Entry{", + "\t\t\ttestName: i.Id,", + "\t\t\tidentifier: i,", + "\t\t})", + "\t}", + "\treturn catalog", + "}" + ] + }, + { + "name": "GetSuitesFromIdentifiers", + "kind": "function", + "source": [ + "func GetSuitesFromIdentifiers(keys []claim.Identifier) []string {", + "\tvar suites []string", + "\tfor _, i := range keys {", + "\t\tsuites = append(suites, i.Suite)", + "\t}", + "\treturn arrayhelper.Unique(suites)", + "}" + ] + }, + { + "pkgPath": "sort", + "name": "Strings", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "ReplaceAll", + "kind": "function" + }, + { + "name": "toJSONString", + "kind": "function", + "source": [ + "func toJSONString(data map[string]string) string {", + "\t// Convert the map to a JSON-like string", + "\tjsonbytes, err := json.MarshalIndent(data, \"\", \" \")", + "\tif err != nil {", + "\t\treturn \"\"", + "\t}", + "", + "\treturn string(jsonbytes)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "installReqHandlers", + "kind": "function", + "source": [ + "func installReqHandlers() {", + "\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"text/html\".", + "\t\tw.Header().Set(\"Content-Type\", \"text/html\")", + "\t\t// Write the embedded HTML content to the response.", + "\t\t_, err := w.Write(indexHTML)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\thttp.HandleFunc(\"/submit.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write(submit)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\thttp.HandleFunc(\"/logs.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write(logs)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\thttp.HandleFunc(\"/toast.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write(toast)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\thttp.HandleFunc(\"/index.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write(index)", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "\thttp.HandleFunc(\"/classification.js\", func(w http.ResponseWriter, r *http.Request) {", + "\t\tclassification := outputTestCases()", + "", + "\t\t// Set the content type to \"application/javascript\".", + "\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")", + "\t\t// Write the embedded JavaScript content to the response.", + "\t\t_, err := w.Write([]byte(classification))", + "\t\tif err != nil {", + "\t\t\thttp.Error(w, \"Failed to write response\", http.StatusInternalServerError)", + "\t\t\treturn", + "\t\t}", + "\t})", + "", + "\t// Serve the static HTML file", + "\thttp.HandleFunc(\"/logstream\", logStreamHandler)", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func outputTestCases() (outString string) {", + "\t// Building a separate data structure to store the key order for the map", + "\tkeys := make([]claim.Identifier, 0, len(identifiers.Catalog))", + "\tfor k := range identifiers.Catalog {", + "\t\tkeys = append(keys, k)", + "\t}", + "", + "\t// Sorting the map by identifier ID", + "\tsort.Slice(keys, func(i, j int) bool {", + "\t\treturn keys[i].Id \u003c keys[j].Id", + "\t})", + "", + "\tcatalog := CreatePrintableCatalogFromIdentifiers(keys)", + "\tif catalog == nil {", + "\t\treturn", + "\t}", + "\t// we need the list of suite's names", + "\tsuites := GetSuitesFromIdentifiers(keys)", + "", + "\t// Sort the list of suite names", + "\tsort.Strings(suites)", + "", + "\t// Iterating the map by test and suite names", + "\toutString = \"classification= {\\n\"", + "\tfor _, suite := range suites {", + "\t\tfor _, k := range catalog[suite] {", + "\t\t\tclassificationString := \"\\\"categoryClassification\\\": \"", + "\t\t\t// Every paragraph starts with a new line.", + "", + "\t\t\toutString += fmt.Sprintf(\"%q: [\\n{\\n\", k.identifier.Id)", + "\t\t\toutString += fmt.Sprintf(\"\\\"description\\\": %q,\\n\", strings.ReplaceAll(strings.ReplaceAll(identifiers.Catalog[k.identifier].Description, \"\\n\", \" \"), \"\\\"\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"\\\"remediation\\\": %q,\\n\", strings.ReplaceAll(strings.ReplaceAll(identifiers.Catalog[k.identifier].Remediation, \"\\n\", \" \"), \"\\\"\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"\\\"bestPracticeReference\\\": %q,\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].BestPracticeReference, \"\\n\", \" \"))", + "\t\t\toutString += classificationString + toJSONString(identifiers.Catalog[k.identifier].CategoryClassification) + \",\\n}\\n]\\n,\"", + "\t\t}", + "\t}", + "\toutString += \"}\"", + "\treturn outString", + "}" + ] + }, + { + "name": "runHandler", + "qualifiedName": "runHandler", + "exported": false, + "signature": "func(http.ResponseWriter, *http.Request)()", + "doc": "runHandler Triggers Cert Suite tests from an HTTP request\n\nThe handler reads form data containing JSON options and a kubeconfig file,\nwrites the config to a temporary file, updates the test configuration YAML,\nand then runs the Cert Suite with the supplied labels filter. It logs\nprogress, handles errors by writing HTTP error responses or logging fatal\nmessages, and finally returns a JSON success message.\n\nnolint:funlen", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:265", + "calls": [ + { + "pkgPath": "bytes", + "name": "NewBufferString", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "SetLogger", + "kind": "function", + "source": [ + "func SetLogger(l *Logger) {", + "\tglobalLogger = l", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "GetMultiLogger", + "kind": "function", + "source": [ + "func GetMultiLogger(writers ...io.Writer) *Logger {", + "\topts := slog.HandlerOptions{", + "\t\tLevel: globalLogLevel,", + "\t\tReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {", + "\t\t\tif a.Key == slog.LevelKey {", + "\t\t\t\tlevel := a.Value.Any().(slog.Level)", + "\t\t\t\tlevelLabel, exists := CustomLevelNames[level]", + "\t\t\t\tif !exists {", + "\t\t\t\t\tlevelLabel = level.String()", + "\t\t\t\t}", + "", + "\t\t\t\ta.Value = slog.StringValue(levelLabel)", + "\t\t\t}", + "", + "\t\t\treturn a", + "\t\t},", + "\t}", + "", + "\tvar handlers []slog.Handler", + "\tif globalLogger != nil {", + "\t\thandlers = []slog.Handler{globalLogger.l.Handler()}", + "\t}", + "", + "\tfor _, writer := range writers {", + "\t\thandlers = append(handlers, NewCustomHandler(writer, \u0026opts))", + "\t}", + "", + "\treturn \u0026Logger{l: slog.New(NewMultiHandler(handlers...))}", + "}" + ] + }, + { + "name": "FormValue", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "fmt", + "name": "Println", + "kind": "function" + }, + { + "name": "FormFile", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "Error", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "os", + "name": "CreateTemp", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "Error", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Name", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Remove", + "kind": "function" + }, + { + "name": "Name", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "name": "Name", + "kind": "function" + }, + { + "pkgPath": "io", + "name": "Copy", + "kind": "function" + }, + { + "pkgPath": "net/http", + "name": "Error", + "kind": "function" + }, + { + "name": "Close", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Name", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "os", + "name": "ReadFile", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "updateTnf", + "kind": "function", + "source": [ + "func updateTnf(tnfConfig []byte, data *RequestedData) []byte {", + "\t// Unmarshal the YAML data into a Config struct", + "\tvar config configuration.TestConfiguration", + "", + "\terr := yaml.Unmarshal(tnfConfig, \u0026config)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error unmarshalling YAML: %v\", err)", + "\t}", + "", + "\t// Modify the configuration", + "\tvar namespace []configuration.Namespace", + "\tfor _, tnamespace := range data.TargetNameSpaces {", + "\t\tnamespace = append(namespace, configuration.Namespace{Name: tnamespace})", + "\t}", + "\tconfig.TargetNameSpaces = namespace", + "", + "\tconfig.PodsUnderTestLabels = data.PodsUnderTestLabels", + "", + "\tconfig.OperatorsUnderTestLabels = data.OperatorsUnderTestLabels", + "", + "\tvar managedDeployments []configuration.ManagedDeploymentsStatefulsets", + "\tfor _, val := range data.ManagedDeployments {", + "\t\tmanagedDeployments = append(managedDeployments, configuration.ManagedDeploymentsStatefulsets{Name: val})", + "\t}", + "\tconfig.ManagedDeployments = managedDeployments", + "", + "\tvar managedStatefulsets []configuration.ManagedDeploymentsStatefulsets", + "\tfor _, val := range data.ManagedDeployments {", + "\t\tmanagedStatefulsets = append(managedStatefulsets, configuration.ManagedDeploymentsStatefulsets{Name: val})", + "\t}", + "\tconfig.ManagedStatefulsets = managedStatefulsets", + "", + "\tvar crdFilter []configuration.CrdFilter", + "\tfor i := range data.TargetCrdFiltersnameSuffix {", + "\t\tval := true", + "\t\tif data.TargetCrdFiltersscalable[i] == \"false\" {", + "\t\t\tval = false", + "\t\t}", + "\t\tcrdFilter = append(crdFilter, configuration.CrdFilter{NameSuffix: data.TargetCrdFiltersnameSuffix[i],", + "\t\t\tScalable: val})", + "\t}", + "\tconfig.CrdFilters = crdFilter", + "", + "\tvar acceptedKernelTaints []configuration.AcceptedKernelTaintsInfo", + "\tfor _, val := range data.AcceptedKernelTaints {", + "\t\tacceptedKernelTaints = append(acceptedKernelTaints, configuration.AcceptedKernelTaintsInfo{Module: val})", + "\t}", + "\tconfig.AcceptedKernelTaints = acceptedKernelTaints", + "", + "\tvar skipHelmChartList []configuration.SkipHelmChartList", + "\tfor _, val := range data.SkipHelmChartList {", + "\t\tskipHelmChartList = append(skipHelmChartList, configuration.SkipHelmChartList{Name: val})", + "\t}", + "\tconfig.SkipHelmChartList = skipHelmChartList", + "", + "\tvar skipScalingTestDeployments []configuration.SkipScalingTestDeploymentsInfo", + "\tfor i := range data.SkipScalingTestDeploymentsname {", + "\t\tskipScalingTestDeployments = append(skipScalingTestDeployments, configuration.SkipScalingTestDeploymentsInfo{Name: data.SkipScalingTestDeploymentsname[i],", + "\t\t\tNamespace: data.SkipScalingTestDeploymentsnamespace[i]})", + "\t}", + "\tconfig.SkipScalingTestDeployments = skipScalingTestDeployments", + "", + "\tvar skipScalingTestStatefulSets []configuration.SkipScalingTestStatefulSetsInfo", + "\tfor i := range data.SkipScalingTestStatefulsetsname {", + "\t\tskipScalingTestStatefulSets = append(skipScalingTestStatefulSets, configuration.SkipScalingTestStatefulSetsInfo{Name: data.SkipScalingTestStatefulsetsname[i],", + "\t\t\tNamespace: data.SkipScalingTestStatefulsetsnamespace[i]})", + "\t}", + "\tconfig.SkipScalingTestStatefulSets = skipScalingTestStatefulSets", + "", + "\tconfig.ServicesIgnoreList = data.Servicesignorelist", + "\tconfig.ValidProtocolNames = data.ValidProtocolNames", + "\tif len(data.CollectorAppPassword) \u003e 0 {", + "\t\tconfig.CollectorAppPassword = data.CollectorAppPassword[0]", + "\t}", + "\tif len(data.ExecutedBy) \u003e 0 {", + "\t\tconfig.ExecutedBy = data.ExecutedBy[0]", + "\t}", + "\tif len(data.PartnerName) \u003e 0 {", + "\t\tconfig.PartnerName = data.PartnerName[0]", + "\t}", + "\tif len(data.ProbeDaemonSetNamespace) \u003e 0 {", + "\t\tconfig.ProbeDaemonSetNamespace = data.ProbeDaemonSetNamespace[0]", + "\t}", + "\tif len(data.ConnectAPIKey) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.APIKey = data.ConnectAPIKey[0]", + "\t}", + "\tif len(data.ConnectProjectID) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.ProjectID = data.ConnectProjectID[0]", + "\t}", + "\tif len(data.ConnectAPIBaseURL) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.BaseURL = data.ConnectAPIBaseURL[0]", + "\t}", + "\tif len(data.ConnectAPIProxyURL) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.ProxyURL = data.ConnectAPIProxyURL[0]", + "\t}", + "\tif len(data.ConnectAPIProxyPort) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.ProxyPort = data.ConnectAPIProxyPort[0]", + "\t}", + "", + "\t// Serialize the modified config back to YAML format", + "\tnewData, err := yaml.Marshal(\u0026config)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error marshaling YAML: %v\", err)", + "\t}", + "\treturn newData", + "}" + ] + }, + { + "pkgPath": "os", + "name": "WriteFile", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder", + "name": "GetNewClientsHolder", + "kind": "function", + "source": [ + "func GetNewClientsHolder(kubeconfigFile string) *ClientsHolder {", + "\t_, err := newClientsHolder(kubeconfigFile)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to create k8s clients holder, err: %v\", err)", + "\t}", + "", + "\treturn \u0026clientsHolder", + "}" + ] + }, + { + "name": "Name", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "LoadChecksDB", + "kind": "function", + "source": [ + "func LoadChecksDB(labelsExpr string) {", + "\tLoadInternalChecksDB()", + "", + "\tif preflight.ShouldRun(labelsExpr) {", + "\t\tpreflight.LoadChecks()", + "\t}", + "}" + ] + }, + { + "name": "Value", + "kind": "function" + }, + { + "name": "Context", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb", + "name": "InitLabelsExprEvaluator", + "kind": "function", + "source": [ + "func InitLabelsExprEvaluator(labelsFilter string) error {", + "\t// Expand the abstract \"all\" label into actual existing labels", + "\tif labelsFilter == \"all\" {", + "\t\tallTags := []string{identifiers.TagCommon, identifiers.TagExtended,", + "\t\t\tidentifiers.TagFarEdge, identifiers.TagTelco}", + "\t\tlabelsFilter = strings.Join(allTags, \",\")", + "\t}", + "", + "\teval, err := labels.NewLabelsExprEvaluator(labelsFilter)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not create a label evaluator, err: %v\", err)", + "\t}", + "", + "\tlabelsExprEvaluator = eval", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Exit", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "CreateGlobalLogFile", + "kind": "function", + "source": [ + "func CreateGlobalLogFile(outputDir, logLevel string) error {", + "\tlogFilePath := outputDir + \"/\" + LogFileName", + "\terr := os.Remove(logFilePath)", + "\tif err != nil \u0026\u0026 !os.IsNotExist(err) {", + "\t\treturn fmt.Errorf(\"could not delete old log file, err: %v\", err)", + "\t}", + "", + "\tlogFile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE, LogFilePermissions)", + "\tif err != nil {", + "\t\treturn fmt.Errorf(\"could not open a new log file, err: %v\", err)", + "\t}", + "", + "\tSetupLogger(logFile, logLevel)", + "\tglobalLogFile = logFile", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Fprintf", + "kind": "function" + }, + { + "pkgPath": "os", + "name": "Exit", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite", + "name": "Run", + "kind": "function", + "source": [ + "func Run(labelsFilter, outputFolder string) error {", + "\ttestParams := configuration.GetTestParameters()", + "", + "\tfmt.Println(\"Running discovery of CNF target resources...\")", + "\tfmt.Print(\"\\n\")", + "", + "\tenv := provider.GetTestEnvironment()", + "", + "\tlog.Info(\"Running checks matching labels expr %q with timeout %v\", labelsFilter, testParams.Timeout)", + "\tstartTime := time.Now()", + "\tfailedCtr, err := checksdb.RunChecks(testParams.Timeout)", + "\tif err != nil {", + "\t\tlog.Error(\"%v\", err)", + "\t}", + "\tendTime := time.Now()", + "\tlog.Info(\"Finished running checks in %v\", endTime.Sub(startTime))", + "", + "\tclaimOutputFile := filepath.Join(outputFolder, claimFileName)", + "", + "\toc := clientsholder.GetClientsHolder()", + "\t_, allPods := autodiscover.FindPodsByLabels(oc.K8sClient.CoreV1(), autodiscover.CreateLabels(env.Config.PodsUnderTestLabels), env.Namespaces)", + "\tenv.PodStates.AfterExecution = autodiscover.CountPodsByStatus(allPods)", + "\tif env.PodStates.BeforeExecution[\"ready\"] != env.PodStates.AfterExecution[\"ready\"] {", + "\t\tlog.Warn(\"Some pods were not ready during entire test execution. See %s podStates section for more details\", claimOutputFile)", + "\t}", + "", + "\tclaimBuilder, err := claimhelper.NewClaimBuilder(\u0026env)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Failed to get claim builder: %v\", err)", + "\t}", + "", + "\tif failedCtr \u003e 0 {", + "\t\tlog.Warn(\"Some checks failed. See %s for details\", claimOutputFile)", + "\t}", + "", + "\t// Marshal the claim and output to file", + "\tclaimBuilder.Build(claimOutputFile)", + "", + "\t// Create JUnit file if required", + "\tif configuration.GetTestParameters().EnableXMLCreation {", + "\t\tjunitOutputFileName := filepath.Join(outputFolder, junitXMLOutputFileName)", + "\t\tlog.Info(\"JUnit XML file creation is enabled. Creating JUnit XML file: %s\", junitOutputFileName)", + "\t\tclaimBuilder.ToJUnitXML(junitOutputFileName, startTime, endTime)", + "\t}", + "", + "\tif configuration.GetTestParameters().SanitizeClaim {", + "\t\tclaimOutputFile, err = claimhelper.SanitizeClaimFile(claimOutputFile, configuration.GetTestParameters().LabelsFilter)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to sanitize claim file: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Send claim file to the collector if specified by env var", + "\tif configuration.GetTestParameters().EnableDataCollection {", + "\t\tif env.CollectorAppEndpoint == \"\" {", + "\t\t\tenv.CollectorAppEndpoint = collectorAppURL", + "\t\t}", + "", + "\t\terr = collector.SendClaimFileToCollector(env.CollectorAppEndpoint, claimOutputFile, env.ExecutedBy, env.PartnerName, env.CollectorAppPassword)", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to send post request to the collector: %v\", err)", + "\t\t}", + "\t}", + "", + "\t// Create HTML artifacts for the web results viewer/parser.", + "\tresultsOutputDir := outputFolder", + "\twebFilePaths, err := results.CreateResultsWebFiles(resultsOutputDir, claimFileName)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to create results web files: %v\", err)", + "\t}", + "", + "\tallArtifactsFilePaths := []string{filepath.Join(outputFolder, claimFileName)}", + "", + "\t// Add all the web artifacts file paths.", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, webFilePaths...)", + "", + "\t// Add the log file path", + "\tallArtifactsFilePaths = append(allArtifactsFilePaths, filepath.Join(outputFolder, log.LogFileName))", + "", + "\t// Override the env vars if they are not set.", + "\tif env.ConnectAPIKey == \"\" {", + "\t\tenv.ConnectAPIKey = configuration.GetTestParameters().ConnectAPIKey", + "\t}", + "", + "\tif env.ConnectProjectID == \"\" {", + "\t\tenv.ConnectProjectID = configuration.GetTestParameters().ConnectProjectID", + "\t}", + "", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = configuration.GetTestParameters().ConnectAPIBaseURL", + "\t}", + "", + "\t// Default the base URL to the Red Hat Connect API if not set.", + "\t// This is if the config file does not have the base URL set and the cmd line", + "\t// does not have the base URL set.", + "\tif env.ConnectAPIBaseURL == \"\" {", + "\t\tenv.ConnectAPIBaseURL = \"https://access.redhat.com/hydra/cwe/rest/v1.0\"", + "\t}", + "", + "\tif env.ConnectAPIProxyURL == \"\" {", + "\t\tenv.ConnectAPIProxyURL = configuration.GetTestParameters().ConnectAPIProxyURL", + "\t}", + "", + "\tif env.ConnectAPIProxyPort == \"\" {", + "\t\tenv.ConnectAPIProxyPort = configuration.GetTestParameters().ConnectAPIProxyPort", + "\t}", + "", + "\t// Red Hat Connect API key and project ID are required to send the tar.gz to Red Hat Connect.", + "\tsendToConnectAPI := false", + "\tif env.ConnectAPIKey != \"\" \u0026\u0026 env.ConnectProjectID != \"\" {", + "\t\tlog.Info(\"Sending results to Red Hat Connect API for project ID %s\", env.ConnectProjectID)", + "\t\tsendToConnectAPI = true", + "\t} else {", + "\t\tlog.Info(\"Red Hat Connect API key and project ID are not set. Results will not be sent to Red Hat Connect.\")", + "\t}", + "", + "\tvar zipFile string", + "", + "\t// tar.gz file creation with results and html artifacts, unless omitted by env var.", + "\tif !configuration.GetTestParameters().OmitArtifactsZipFile || sendToConnectAPI {", + "\t\tzipFile, err = results.CompressResultsArtifacts(resultsOutputDir, allArtifactsFilePaths)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to compress results artifacts: %v\", err)", + "\t\t}", + "", + "\t\tif sendToConnectAPI {", + "\t\t\tlog.Debug(\"Get CertificationID from the Red Hat Connect API\")", + "\t\t\tcertificationID, err := results.GetCertIDFromConnectAPI(", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectProjectID,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tif certificationID == \"\" {", + "\t\t\t\tlog.Fatal(\"Failed to get CertificationID from Red Hat Connect\")", + "\t\t\t}", + "", + "\t\t\tlog.Debug(\"Sending ZIP file %s to Red Hat Connect\", zipFile)", + "\t\t\terr = results.SendResultsToConnectAPI(zipFile,", + "\t\t\t\tenv.ConnectAPIKey,", + "\t\t\t\tenv.ConnectAPIBaseURL,", + "\t\t\t\tcertificationID,", + "\t\t\t\tenv.ConnectAPIProxyURL,", + "\t\t\t\tenv.ConnectAPIProxyPort)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to send results to Red Hat Connect: %v\", err)", + "\t\t\t}", + "", + "\t\t\tlog.Info(\"Results successfully sent to Red Hat Connect with CertificationID %s\", certificationID)", + "\t\t}", + "\t}", + "", + "\tif configuration.GetTestParameters().OmitArtifactsZipFile \u0026\u0026 zipFile != \"\" {", + "\t\t// delete the zip as the user does not want it.", + "\t\terr = os.Remove(zipFile)", + "\t\tif err != nil {", + "\t\t\tlog.Fatal(\"Failed to remove zip file %s: %v\", zipFile, err)", + "\t\t}", + "\t}", + "", + "\t// Remove web artifacts if user does not want them.", + "\tif !configuration.GetTestParameters().IncludeWebFilesInOutputFolder {", + "\t\tfor _, file := range webFilePaths {", + "\t\t\terr := os.Remove(file)", + "\t\t\tif err != nil {", + "\t\t\t\tlog.Fatal(\"Failed to remove web file %s: %v\", file, err)", + "\t\t\t}", + "\t\t}", + "\t}", + "", + "\treturn nil", + "}" + ] + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "fmt", + "name": "Sprintf", + "kind": "function" + }, + { + "pkgPath": "strings", + "name": "Join", + "kind": "function" + }, + { + "pkgPath": "encoding/json", + "name": "Marshal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "net/http", + "name": "Error", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + }, + { + "name": "Set", + "kind": "function" + }, + { + "name": "Header", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Info", + "kind": "function", + "source": [ + "func (logger *Logger) Info(msg string, args ...any) {", + "\tLogf(logger, LevelInfo, msg, args...)", + "}" + ] + }, + { + "name": "Write", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Error", + "kind": "function", + "source": [ + "func (logger *Logger) Error(msg string, args ...any) {", + "\tLogf(logger, LevelError, msg, args...)", + "}" + ] + }, + { + "pkgPath": "net/http", + "name": "Error", + "kind": "function" + }, + { + "name": "Error", + "kind": "function" + } + ], + "calledBy": null, + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func runHandler(w http.ResponseWriter, r *http.Request) {", + "\tbuf = bytes.NewBufferString(\"\")", + "\t// The log output will be written to the log file and to this buffer buf", + "\tlog.SetLogger(log.GetMultiLogger(buf))", + "", + "\tjsonData := r.FormValue(\"jsonData\") // \"jsonData\" is the name of the JSON input field", + "\tvar data RequestedData", + "\tif err := json.Unmarshal([]byte(jsonData), \u0026data); err != nil {", + "\t\tfmt.Println(\"Error:\", err)", + "\t}", + "\tflattenedOptions := data.SelectedOptions", + "", + "\t// Get the file from the request", + "\tfile, fileHeader, err := r.FormFile(\"kubeConfigPath\") // \"fileInput\" is the name of the file input field", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to retrieve file from form\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "\tdefer file.Close()", + "", + "\tlog.Info(\"Kubeconfig file name received: %s\", fileHeader.Filename)", + "\tkubeconfigTempFile, err := os.CreateTemp(\"\", \"webserver-kubeconfig-*\")", + "\tif err != nil {", + "\t\thttp.Error(w, \"Failed to create temp file to store the kubeconfig content.\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "", + "\tdefer func() {", + "\t\tlog.Info(\"Removing temporary kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\terr = os.Remove(kubeconfigTempFile.Name())", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to remove temp kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\t}", + "\t}()", + "", + "\t_, err = io.Copy(kubeconfigTempFile, file)", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to copy file\", http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t_ = kubeconfigTempFile.Close()", + "", + "\tlog.Info(\"Web Server kubeconfig file : %v (copied into %v)\", fileHeader.Filename, kubeconfigTempFile.Name())", + "\tlog.Info(\"Web Server Labels filter : %v\", flattenedOptions)", + "", + "\ttnfConfig, err := os.ReadFile(\"config/certsuite_config.yml\")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error reading YAML file: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t}", + "", + "\tnewData := updateTnf(tnfConfig, \u0026data)", + "", + "\t// Write the modified YAML data back to the file", + "\tvar filePerm fs.FileMode = 0o644 // owner can read/write, group and others can only read", + "\terr = os.WriteFile(\"config/certsuite_config.yml\", newData, filePerm)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing YAML file: %v\", err)", + "\t}", + "\tlabelsFilter := strings.Join(flattenedOptions, \",\")", + "", + "\t_ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name())", + "\tcertsuite.LoadChecksDB(labelsFilter)", + "", + "\toutputFolder := r.Context().Value(outputFolderCtxKey).(string)", + "", + "\tif err := checksdb.InitLabelsExprEvaluator(labelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(outputFolder, \"debug\"); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tlog.Info(\"Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s\", labelsFilter, outputFolder)", + "\terr = certsuite.Run(labelsFilter, outputFolder)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to run CNF Cert Suite: %v\", err)", + "\t}", + "", + "\t// Return the result as JSON", + "\tresponse := struct {", + "\t\tMessage string `json:\"Message\"`", + "\t}{", + "\t\tMessage: fmt.Sprintf(\"Succeeded to run %s\", strings.Join(flattenedOptions, \" \")),", + "\t}", + "\t// Serialize the response data to JSON", + "\tjsonResponse, err := json.Marshal(response)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to marshal jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t// Set the Content-Type header to specify that the response is JSON", + "\tw.Header().Set(\"Content-Type\", \"application/json\")", + "\t// Write the JSON response to the client", + "\tlog.Info(\"Sending web response: %v\", response)", + "\t_, err = w.Write(jsonResponse)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to write jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "}" + ] + }, + { + "name": "toJSONString", + "qualifiedName": "toJSONString", + "exported": false, + "signature": "func(map[string]string)(string)", + "doc": "toJSONString Formats a map into an indented JSON string\n\nThe function takes a key/value map of strings, marshals it with indentation\nto produce readable JSON, and returns the result as a string. If marshalling\nfails, it simply returns an empty string.", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:542", + "calls": [ + { + "pkgPath": "encoding/json", + "name": "MarshalIndent", + "kind": "function" + }, + { + "name": "string", + "kind": "function" + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "outputTestCases", + "kind": "function", + "source": [ + "func outputTestCases() (outString string) {", + "\t// Building a separate data structure to store the key order for the map", + "\tkeys := make([]claim.Identifier, 0, len(identifiers.Catalog))", + "\tfor k := range identifiers.Catalog {", + "\t\tkeys = append(keys, k)", + "\t}", + "", + "\t// Sorting the map by identifier ID", + "\tsort.Slice(keys, func(i, j int) bool {", + "\t\treturn keys[i].Id \u003c keys[j].Id", + "\t})", + "", + "\tcatalog := CreatePrintableCatalogFromIdentifiers(keys)", + "\tif catalog == nil {", + "\t\treturn", + "\t}", + "\t// we need the list of suite's names", + "\tsuites := GetSuitesFromIdentifiers(keys)", + "", + "\t// Sort the list of suite names", + "\tsort.Strings(suites)", + "", + "\t// Iterating the map by test and suite names", + "\toutString = \"classification= {\\n\"", + "\tfor _, suite := range suites {", + "\t\tfor _, k := range catalog[suite] {", + "\t\t\tclassificationString := \"\\\"categoryClassification\\\": \"", + "\t\t\t// Every paragraph starts with a new line.", + "", + "\t\t\toutString += fmt.Sprintf(\"%q: [\\n{\\n\", k.identifier.Id)", + "\t\t\toutString += fmt.Sprintf(\"\\\"description\\\": %q,\\n\", strings.ReplaceAll(strings.ReplaceAll(identifiers.Catalog[k.identifier].Description, \"\\n\", \" \"), \"\\\"\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"\\\"remediation\\\": %q,\\n\", strings.ReplaceAll(strings.ReplaceAll(identifiers.Catalog[k.identifier].Remediation, \"\\n\", \" \"), \"\\\"\", \" \"))", + "\t\t\toutString += fmt.Sprintf(\"\\\"bestPracticeReference\\\": %q,\\n\", strings.ReplaceAll(identifiers.Catalog[k.identifier].BestPracticeReference, \"\\n\", \" \"))", + "\t\t\toutString += classificationString + toJSONString(identifiers.Catalog[k.identifier].CategoryClassification) + \",\\n}\\n]\\n,\"", + "\t\t}", + "\t}", + "\toutString += \"}\"", + "\treturn outString", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func toJSONString(data map[string]string) string {", + "\t// Convert the map to a JSON-like string", + "\tjsonbytes, err := json.MarshalIndent(data, \"\", \" \")", + "\tif err != nil {", + "\t\treturn \"\"", + "\t}", + "", + "\treturn string(jsonbytes)", + "}" + ] + }, + { + "name": "updateTnf", + "qualifiedName": "updateTnf", + "exported": false, + "signature": "func([]byte, *RequestedData)([]byte)", + "doc": "updateTnf Updates a YAML configuration with user-provided data\n\nThis function parses an existing YAML configuration into a struct, then\noverwrites numerous fields such as namespaces, labels, deployment lists,\nfilters, and connection settings based on the supplied RequestedData. After\nall updates are applied, it serializes the struct back to YAML and returns\nthe byte slice. Errors during unmarshalling or marshalling cause fatal log\nentries that terminate the program.\n\nnolint:funlen,gocyclo", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:383", + "calls": [ + { + "pkgPath": "gopkg.in/yaml.v3", + "name": "Unmarshal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "append", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "name": "len", + "kind": "function" + }, + { + "pkgPath": "gopkg.in/yaml.v3", + "name": "Marshal", + "kind": "function" + }, + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/internal/log", + "name": "Logger.Fatal", + "kind": "function", + "source": [ + "func (logger *Logger) Fatal(msg string, args ...any) {", + "\tLogf(logger, LevelFatal, msg, args...)", + "\tfmt.Fprintf(os.Stderr, \"\\nFATAL: \"+msg+\"\\n\", args...)", + "\tos.Exit(1)", + "}" + ] + } + ], + "calledBy": [ + { + "pkgPath": "github.com/redhat-best-practices-for-k8s/certsuite/webserver", + "name": "runHandler", + "kind": "function", + "source": [ + "func runHandler(w http.ResponseWriter, r *http.Request) {", + "\tbuf = bytes.NewBufferString(\"\")", + "\t// The log output will be written to the log file and to this buffer buf", + "\tlog.SetLogger(log.GetMultiLogger(buf))", + "", + "\tjsonData := r.FormValue(\"jsonData\") // \"jsonData\" is the name of the JSON input field", + "\tvar data RequestedData", + "\tif err := json.Unmarshal([]byte(jsonData), \u0026data); err != nil {", + "\t\tfmt.Println(\"Error:\", err)", + "\t}", + "\tflattenedOptions := data.SelectedOptions", + "", + "\t// Get the file from the request", + "\tfile, fileHeader, err := r.FormFile(\"kubeConfigPath\") // \"fileInput\" is the name of the file input field", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to retrieve file from form\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "\tdefer file.Close()", + "", + "\tlog.Info(\"Kubeconfig file name received: %s\", fileHeader.Filename)", + "\tkubeconfigTempFile, err := os.CreateTemp(\"\", \"webserver-kubeconfig-*\")", + "\tif err != nil {", + "\t\thttp.Error(w, \"Failed to create temp file to store the kubeconfig content.\", http.StatusBadRequest)", + "\t\treturn", + "\t}", + "", + "\tdefer func() {", + "\t\tlog.Info(\"Removing temporary kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\terr = os.Remove(kubeconfigTempFile.Name())", + "\t\tif err != nil {", + "\t\t\tlog.Error(\"Failed to remove temp kubeconfig file %s\", kubeconfigTempFile.Name())", + "\t\t}", + "\t}()", + "", + "\t_, err = io.Copy(kubeconfigTempFile, file)", + "\tif err != nil {", + "\t\thttp.Error(w, \"Unable to copy file\", http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t_ = kubeconfigTempFile.Close()", + "", + "\tlog.Info(\"Web Server kubeconfig file : %v (copied into %v)\", fileHeader.Filename, kubeconfigTempFile.Name())", + "\tlog.Info(\"Web Server Labels filter : %v\", flattenedOptions)", + "", + "\ttnfConfig, err := os.ReadFile(\"config/certsuite_config.yml\")", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error reading YAML file: %v\", err) //nolint:gocritic // exitAfterDefer", + "\t}", + "", + "\tnewData := updateTnf(tnfConfig, \u0026data)", + "", + "\t// Write the modified YAML data back to the file", + "\tvar filePerm fs.FileMode = 0o644 // owner can read/write, group and others can only read", + "\terr = os.WriteFile(\"config/certsuite_config.yml\", newData, filePerm)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error writing YAML file: %v\", err)", + "\t}", + "\tlabelsFilter := strings.Join(flattenedOptions, \",\")", + "", + "\t_ = clientsholder.GetNewClientsHolder(kubeconfigTempFile.Name())", + "\tcertsuite.LoadChecksDB(labelsFilter)", + "", + "\toutputFolder := r.Context().Value(outputFolderCtxKey).(string)", + "", + "\tif err := checksdb.InitLabelsExprEvaluator(labelsFilter); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Failed to initialize a test case label evaluator, err: %v\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tif err := log.CreateGlobalLogFile(outputFolder, \"debug\"); err != nil {", + "\t\tfmt.Fprintf(os.Stderr, \"Could not create the log file, err: %v\\n\", err)", + "\t\tos.Exit(1)", + "\t}", + "", + "\tlog.Info(\"Running CNF Cert Suite (web-mode). Labels filter: %s, outputFolder: %s\", labelsFilter, outputFolder)", + "\terr = certsuite.Run(labelsFilter, outputFolder)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to run CNF Cert Suite: %v\", err)", + "\t}", + "", + "\t// Return the result as JSON", + "\tresponse := struct {", + "\t\tMessage string `json:\"Message\"`", + "\t}{", + "\t\tMessage: fmt.Sprintf(\"Succeeded to run %s\", strings.Join(flattenedOptions, \" \")),", + "\t}", + "\t// Serialize the response data to JSON", + "\tjsonResponse, err := json.Marshal(response)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to marshal jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "", + "\t// Set the Content-Type header to specify that the response is JSON", + "\tw.Header().Set(\"Content-Type\", \"application/json\")", + "\t// Write the JSON response to the client", + "\tlog.Info(\"Sending web response: %v\", response)", + "\t_, err = w.Write(jsonResponse)", + "\tif err != nil {", + "\t\tlog.Error(\"Failed to write jsonResponse: %v\", err)", + "\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)", + "\t\treturn", + "\t}", + "}" + ] + } + ], + "usesTypes": null, + "usesGlobals": null, + "source": [ + "func updateTnf(tnfConfig []byte, data *RequestedData) []byte {", + "\t// Unmarshal the YAML data into a Config struct", + "\tvar config configuration.TestConfiguration", + "", + "\terr := yaml.Unmarshal(tnfConfig, \u0026config)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error unmarshalling YAML: %v\", err)", + "\t}", + "", + "\t// Modify the configuration", + "\tvar namespace []configuration.Namespace", + "\tfor _, tnamespace := range data.TargetNameSpaces {", + "\t\tnamespace = append(namespace, configuration.Namespace{Name: tnamespace})", + "\t}", + "\tconfig.TargetNameSpaces = namespace", + "", + "\tconfig.PodsUnderTestLabels = data.PodsUnderTestLabels", + "", + "\tconfig.OperatorsUnderTestLabels = data.OperatorsUnderTestLabels", + "", + "\tvar managedDeployments []configuration.ManagedDeploymentsStatefulsets", + "\tfor _, val := range data.ManagedDeployments {", + "\t\tmanagedDeployments = append(managedDeployments, configuration.ManagedDeploymentsStatefulsets{Name: val})", + "\t}", + "\tconfig.ManagedDeployments = managedDeployments", + "", + "\tvar managedStatefulsets []configuration.ManagedDeploymentsStatefulsets", + "\tfor _, val := range data.ManagedDeployments {", + "\t\tmanagedStatefulsets = append(managedStatefulsets, configuration.ManagedDeploymentsStatefulsets{Name: val})", + "\t}", + "\tconfig.ManagedStatefulsets = managedStatefulsets", + "", + "\tvar crdFilter []configuration.CrdFilter", + "\tfor i := range data.TargetCrdFiltersnameSuffix {", + "\t\tval := true", + "\t\tif data.TargetCrdFiltersscalable[i] == \"false\" {", + "\t\t\tval = false", + "\t\t}", + "\t\tcrdFilter = append(crdFilter, configuration.CrdFilter{NameSuffix: data.TargetCrdFiltersnameSuffix[i],", + "\t\t\tScalable: val})", + "\t}", + "\tconfig.CrdFilters = crdFilter", + "", + "\tvar acceptedKernelTaints []configuration.AcceptedKernelTaintsInfo", + "\tfor _, val := range data.AcceptedKernelTaints {", + "\t\tacceptedKernelTaints = append(acceptedKernelTaints, configuration.AcceptedKernelTaintsInfo{Module: val})", + "\t}", + "\tconfig.AcceptedKernelTaints = acceptedKernelTaints", + "", + "\tvar skipHelmChartList []configuration.SkipHelmChartList", + "\tfor _, val := range data.SkipHelmChartList {", + "\t\tskipHelmChartList = append(skipHelmChartList, configuration.SkipHelmChartList{Name: val})", + "\t}", + "\tconfig.SkipHelmChartList = skipHelmChartList", + "", + "\tvar skipScalingTestDeployments []configuration.SkipScalingTestDeploymentsInfo", + "\tfor i := range data.SkipScalingTestDeploymentsname {", + "\t\tskipScalingTestDeployments = append(skipScalingTestDeployments, configuration.SkipScalingTestDeploymentsInfo{Name: data.SkipScalingTestDeploymentsname[i],", + "\t\t\tNamespace: data.SkipScalingTestDeploymentsnamespace[i]})", + "\t}", + "\tconfig.SkipScalingTestDeployments = skipScalingTestDeployments", + "", + "\tvar skipScalingTestStatefulSets []configuration.SkipScalingTestStatefulSetsInfo", + "\tfor i := range data.SkipScalingTestStatefulsetsname {", + "\t\tskipScalingTestStatefulSets = append(skipScalingTestStatefulSets, configuration.SkipScalingTestStatefulSetsInfo{Name: data.SkipScalingTestStatefulsetsname[i],", + "\t\t\tNamespace: data.SkipScalingTestStatefulsetsnamespace[i]})", + "\t}", + "\tconfig.SkipScalingTestStatefulSets = skipScalingTestStatefulSets", + "", + "\tconfig.ServicesIgnoreList = data.Servicesignorelist", + "\tconfig.ValidProtocolNames = data.ValidProtocolNames", + "\tif len(data.CollectorAppPassword) \u003e 0 {", + "\t\tconfig.CollectorAppPassword = data.CollectorAppPassword[0]", + "\t}", + "\tif len(data.ExecutedBy) \u003e 0 {", + "\t\tconfig.ExecutedBy = data.ExecutedBy[0]", + "\t}", + "\tif len(data.PartnerName) \u003e 0 {", + "\t\tconfig.PartnerName = data.PartnerName[0]", + "\t}", + "\tif len(data.ProbeDaemonSetNamespace) \u003e 0 {", + "\t\tconfig.ProbeDaemonSetNamespace = data.ProbeDaemonSetNamespace[0]", + "\t}", + "\tif len(data.ConnectAPIKey) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.APIKey = data.ConnectAPIKey[0]", + "\t}", + "\tif len(data.ConnectProjectID) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.ProjectID = data.ConnectProjectID[0]", + "\t}", + "\tif len(data.ConnectAPIBaseURL) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.BaseURL = data.ConnectAPIBaseURL[0]", + "\t}", + "\tif len(data.ConnectAPIProxyURL) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.ProxyURL = data.ConnectAPIProxyURL[0]", + "\t}", + "\tif len(data.ConnectAPIProxyPort) \u003e 0 {", + "\t\tconfig.ConnectAPIConfig.ProxyPort = data.ConnectAPIProxyPort[0]", + "\t}", + "", + "\t// Serialize the modified config back to YAML format", + "\tnewData, err := yaml.Marshal(\u0026config)", + "\tif err != nil {", + "\t\tlog.Fatal(\"Error marshaling YAML: %v\", err)", + "\t}", + "\treturn newData", + "}" + ] + } + ], + "globals": [ + { + "name": "buf", + "exported": false, + "type": "*bytes.Buffer", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:60" + }, + { + "name": "index", + "exported": false, + "type": "[]byte", + "doc": "go:embed index.js", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:58" + }, + { + "name": "indexHTML", + "exported": false, + "type": "[]byte", + "doc": "go:embed index.html", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:46" + }, + { + "name": "logs", + "exported": false, + "type": "[]byte", + "doc": "go:embed logs.js", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:52" + }, + { + "name": "outputFolderCtxKey", + "exported": false, + "type": "webServerContextKey", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:42" + }, + { + "name": "submit", + "exported": false, + "type": "[]byte", + "doc": "go:embed submit.js", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:49" + }, + { + "name": "toast", + "exported": false, + "type": "[]byte", + "doc": "go:embed toast.js", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:55" + }, + { + "name": "upgrader", + "exported": false, + "type": "", + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:62" + } + ], + "consts": [ + { + "name": "logTimeout", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:36" + }, + { + "name": "readTimeoutSeconds", + "exported": false, + "position": "/Users/deliedit/dev/certsuite/webserver/webserver.go:38" + } + ] + } +] diff --git a/docs/cmd/certsuite/check/check.md b/docs/cmd/certsuite/check/check.md new file mode 100644 index 000000000..2934625cf --- /dev/null +++ b/docs/cmd/certsuite/check/check.md @@ -0,0 +1,102 @@ +# Package check + +**Path**: `cmd/certsuite/check` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) + +## Overview + +The `check` package provides the top‑level *check* command for the Certsuite CLI, exposing subcommands that perform image certificate status checks and display results. It is used when a user wants to run certification checks from the command line. + +### Key Features + +- Instantiates a Cobra command hierarchy with child commands for image status and result reporting +- Encapsulates command registration logic in a single `NewCommand` function +- Keeps CLI command definitions modular by importing subcommand packages + +### Design Notes + +- Uses Cobra to manage command parsing and execution, keeping the package focused on orchestration rather than business logic +- The global `checkCmd` variable is unexported to prevent external modification of the command state +- Best practice: call `NewCommand()` during application initialization and add it to the root command with `AddCommand` + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand() *cobra.Command](#newcommand) | Instantiates the top‑level *check* command for the Certsuite CLI and registers its child commands. | + +## Exported Functions + +### NewCommand + +**NewCommand** - Instantiates the top‑level *check* command for the Certsuite CLI and registers its child commands. + +#### Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates the top‑level *check* command for the Certsuite CLI and registers its child commands. | +| **Parameters** | None | +| **Return value** | A pointer to a `cobra.Command` representing the *check* command, ready for inclusion in the root command tree. | +| **Key dependencies** | • `github.com/spf13/cobra` – command construction and registration.
• Calls `imagecert.NewCommand()` and `results.NewCommand()` from subpackages `image_cert_status` and `results`. | +| **Side effects** | Modifies the internal state of the returned `cobra.Command` by adding child commands; no external I/O or concurrency. | +| **How it fits the package** | Provides the central entry point for all “check”‑related functionality, grouping image certification checks and result handling under a single command in the Certsuite CLI. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + check.NewCommand --> cobra.Command{"Create"} + cobra.Command{"Create"} --> AddChild(imagecert.NewCommand) + cobra.Command{"Create"} --> AddChild(results.NewCommand) +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_imagecert.NewCommand + func_NewCommand --> func_results.NewCommand +``` + +#### Functions calling `check.NewCommand` (Mermaid) + +```mermaid +graph TD + newRootCmd --> check.NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking check.NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check" + "github.com/spf13/cobra" +) + +func main() { + root := &cobra.Command{Use: "certsuite"} + root.AddCommand(check.NewCommand()) + if err := root.Execute(); err != nil { + panic(err) + } +} +``` + +--- + +--- diff --git a/docs/cmd/certsuite/check/image_cert_status/imagecert.md b/docs/cmd/certsuite/check/image_cert_status/imagecert.md new file mode 100644 index 000000000..b514995f9 --- /dev/null +++ b/docs/cmd/certsuite/check/image_cert_status/imagecert.md @@ -0,0 +1,222 @@ +# Package imagecert + +**Path**: `cmd/certsuite/check/image_cert_status` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) +- [Local Functions](#local-functions) + - [checkImageCertStatus](#checkimagecertstatus) + +## Overview + +The imagecert package provides a Cobra command that registers persistent flags for specifying container images and validates those inputs against the certification database before reporting whether an image is certified. + +### Key Features + +- Registers and enforces flag constraints (required together, mutually exclusive) for image name, registry, tag, digest, and offline DB path using Cobra. +- Retrieves and validates user input, then queries the certification database to determine if the specified image is certified. +- Formats console output with colored status indicators via fatih/color for clear visual feedback. + +### Design Notes + +- Uses Cobra’s PersistentFlags to apply flags across subcommands, ensuring consistency. +- Validates that only one of name/tag/digest can be used at a time, preventing ambiguous queries. +- Outputs human‑readable status; errors are returned to the caller for proper command exit handling. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand() *cobra.Command](#newcommand) | Registers persistent flags for image name, registry, tag, digest, and offline DB path; sets flag constraints; returns the configured command. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func checkImageCertStatus(cmd *cobra.Command, _ []string) error](#checkimagecertstatus) | Determines if a specified image (by name or digest) is certified, then outputs formatted status. | + +## Exported Functions + +### NewCommand + +**NewCommand** - Registers persistent flags for image name, registry, tag, digest, and offline DB path; sets flag constraints; returns the configured command. + +Creates and configures the Cobra command for checking an image’s certificate status, returning a fully prepared `*cobra.Command`. + +--- + +#### Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Registers persistent flags for image name, registry, tag, digest, and offline DB path; sets flag constraints; returns the configured command. | +| **Parameters** | none | +| **Return value** | `*cobra.Command` – the initialized command ready to be added to a parent command. | +| **Key dependencies** | • `PersistentFlags().String()`
• `MarkFlagsRequiredTogether()`
• `MarkFlagsMutuallyExclusive()` | +| **Side effects** | Mutates the global flag set of the returned command; no I/O or concurrency. | +| **How it fits the package** | Provides the CLI entry point for the image‑certificate‑status subcommand within the `imagecert` package, which is later added to the top‑level check command. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Define flags"} + B --> C["Name flag"] + B --> D["Registry flag"] + B --> E["Tag flag"] + B --> F["Digest flag"] + B --> G["Offline‑DB flag"] + C --> H["Set required together: name, registry"] + D --> H + E --> I["Set mutually exclusive: name, digest"] + F --> I + H --> J["Return command"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_PersistentFlags + func_NewCommand --> func_String + func_NewCommand --> func_MarkFlagsRequiredTogether + func_NewCommand --> func_MarkFlagsMutuallyExclusive +``` + +--- + +#### Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + func_checkCmd_AddCommand --> func_NewCommand +``` + +(Only the parent check command adds this subcommand.) + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/image_cert_status" + "github.com/spf13/cobra" +) + +func main() { + rootCmd := &cobra.Command{Use: "certsuite"} + // Add the image‑certificate‑status subcommand + rootCmd.AddCommand(imagecert.NewCommand()) + + if err := rootCmd.Execute(); err != nil { + panic(err) + } +} +``` + +--- + +--- + +## Local Functions + +### checkImageCertStatus + +**checkImageCertStatus** - Determines if a specified image (by name or digest) is certified, then outputs formatted status. + +Checks whether a container image is certified by querying a validator database and prints the result. + +#### Signature (Go) + +```go +func checkImageCertStatus(cmd *cobra.Command, _ []string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if a specified image (by name or digest) is certified, then outputs formatted status. | +| **Parameters** | `cmd *cobra.Command` – command context; `_ []string` – unused arguments placeholder. | +| **Return value** | `error` – non‑nil when missing required input or validator retrieval fails. | +| **Key dependencies** | • `cmd.Flags().GetString` (retrieves flag values)
• `certdb.GetValidator` (loads validation DB)
• `validator.IsContainerCertified` (checks certification)
• `fmt.Printf/Println` (output formatting)
• `color.GreenString` / `color.RedString` (colored status) | +| **Side effects** | Writes to standard output; may return an error but does not modify global state. | +| **How it fits the package** | Implements the `image-cert-status` sub‑command for the CertSuite CLI, providing a user‑facing check of image certification status. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Get flag values"] --> B["Load validator"] + B -->|"error"| C{"Return error"} + B --> D["Determine display context"] + D --> E["Print header info"] + E --> F["Check certification"] + F --> G{"Certified?"} + G -- Yes --> H["Print green “Image certified”"] + G -- No --> I["Print red “Image not certified”"] + H & I --> J["Return nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_checkImageCertStatus --> certdb.GetValidator + func_checkImageCertStatus --> validator.IsContainerCertified + func_checkImageCertStatus --> fmt.Printf + func_checkImageCertStatus --> fmt.Println + func_checkImageCertStatus --> color.GreenString + func_checkImageCertStatus --> color.RedString +``` + +#### Functions calling `checkImageCertStatus` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking checkImageCertStatus +package main + +import ( + "github.com/spf13/cobra" +) + +func main() { + cmd := &cobra.Command{} + // Simulate flag setup + _ = cmd.Flags().String("name", "", "image name") + _ = cmd.Flags().String("registry", "", "image registry") + _ = cmd.Flags().String("tag", "", "image tag") + _ = cmd.Flags().String("digest", "", "image digest") + _ = cmd.Flags().String("offline-db", "", "path to offline DB") + + // Invoke the function + if err := checkImageCertStatus(cmd, nil); err != nil { + fmt.Println("Error:", err) + } +} +``` + +--- diff --git a/docs/cmd/certsuite/check/results/results.md b/docs/cmd/certsuite/check/results/results.md new file mode 100644 index 000000000..de75feab2 --- /dev/null +++ b/docs/cmd/certsuite/check/results/results.md @@ -0,0 +1,576 @@ +# Package results + +**Path**: `cmd/certsuite/check/results` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [TestCaseList](#testcaselist) + - [TestResults](#testresults) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) +- [Local Functions](#local-functions) + - [checkResults](#checkresults) + - [generateTemplateFile](#generatetemplatefile) + - [getExpectedTestResults](#getexpectedtestresults) + - [getTestResultsDB](#gettestresultsdb) + - [printTestResultsMismatch](#printtestresultsmismatch) + +## Overview + +The results package provides utilities for managing and validating test result data in the Certsuite CLI, enabling users to generate reference templates, compare actual log outputs against expected outcomes, and report mismatches. + +### Key Features + +- Command construction with Cobra that exposes flags for specifying input files and template generation +- Parsing of log files into a map of test case names to results (pass/skip/fail) +- YAML template creation and validation against actual results + +### Design Notes + +- Result status values are hard‑coded constants: pass, skip, fail, miss for missing entries +- Mismatches trigger an exit with status 1; the CLI prints a formatted table of discrepancies +- Flags are marked mutually exclusive to prevent conflicting operations (e.g., generating and checking simultaneously) + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**TestCaseList**](#testcaselist) | One-line purpose | +| [**TestResults**](#testresults) | Holds the collection of individual test case outcomes | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func() *cobra.Command](#newcommand) | Builds and returns a Cobra command that manages result‑related flags for the Certsuite CLI. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func checkResults(cmd *cobra.Command, _ []string) error](#checkresults) | Reads actual test outcomes from a log file, optionally generates a YAML reference template, and verifies that the recorded results match an expected set defined in a template. Exits with status 1 on mismatch. | +| [func generateTemplateFile(resultsDB map[string]string) error](#generatetemplatefile) | Builds a `TestResults` struct from the supplied result database, serializes it to YAML, and writes the output to the designated template file. | +| [func getExpectedTestResults(templateFileName string) (map[string]string, error)](#getexpectedtestresults) | Reads a YAML template file and returns a map of test case names to their expected result (`pass`, `skip`, or `fail`). | +| [func getTestResultsDB(logFileName string) (map[string]string, error)](#gettestresultsdb) | Reads a log file and builds a map of test‑case names to their recorded results. | +| [func([]string, map[string]string, map[string]string)()](#printtestresultsmismatch) | Displays each mismatched test case in a human‑readable table with columns for the test name, expected result, and actual result. | + +## Structs + +### TestCaseList + +A container for grouping test case identifiers by their execution outcome. + +#### Fields + +| Field | Type | Description | +|-------|--------|-------------| +| Pass | []string | Names or IDs of test cases that completed successfully. | +| Fail | []string | Names or IDs of test cases that failed during execution. | +| Skip | []string | Names or IDs of test cases that were intentionally skipped. | + +#### Purpose + +`TestCaseList` aggregates the results of a test run, separating each case into one of three categories: passed, failed, or skipped. The struct is primarily used to marshal/unmarshal result data in YAML format for reporting and further analysis. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| *None* | | + +--- + +--- + +### TestResults + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| TestCaseList | `TestCaseList` (embedded) | A slice of `TestCase` structures representing each executed test. The embedded field is serialized under the YAML key **testCases**. | + +#### Purpose + +`TestResults` aggregates all results from a suite of tests into a single structure. It serves as the primary return value for test execution routines, allowing consumers to access the list of individual `TestCase` objects via the embedded `TestCaseList`. The struct is serialized/deserialized with YAML using the key `"testCases"`. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| *none* | No functions directly reference or manipulate this struct in the current codebase. | + +--- + +--- + +## Exported Functions + +### NewCommand + +**NewCommand** - Builds and returns a Cobra command that manages result‑related flags for the Certsuite CLI. + +#### Signature (Go) + +```go +func() *cobra.Command +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds and returns a Cobra command that manages result‑related flags for the Certsuite CLI. | +| **Parameters** | None | +| **Return value** | `*cobra.Command` – the configured results sub‑command. | +| **Key dependencies** | • `checkResultsCmd.PersistentFlags().String()`
• `checkResultsCmd.PersistentFlags().Bool()`
• `checkResultsCmd.MarkFlagsMutuallyExclusive()` | +| **Side effects** | Registers three persistent flags (`template`, `log-file`, `generate-template`) on the command and enforces mutual exclusivity between `template` and `generate-template`. No external I/O or state changes occur. | +| **How it fits the package** | Provides a reusable sub‑command that is added to the top‑level `check` command in `cmd/certsuite/check`. It encapsulates all flag configuration for result handling. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Define persistent flags"} + B --> C["Flag: template"] + B --> D["Flag: log-file"] + B --> E["Flag: generate-template"] + E --> F["MarkFlagsMutuallyExclusive(template, generate-template)"] + F --> G["Return checkResultsCmd"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_String + func_NewCommand --> func_PersistentFlags + func_NewCommand --> func_Bool + func_NewCommand --> func_MarkFlagsMutuallyExclusive +``` + +#### Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + func_CheckCmd.NewCommand --> func_NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/results" + "github.com/spf13/cobra" +) + +func main() { + // Create the results sub‑command + cmd := results.NewCommand() + + // The returned command can now be added to a parent command + var root = &cobra.Command{Use: "certsuite"} + root.AddCommand(cmd) + + // Execute the CLI (error handling omitted for brevity) + _ = root.Execute() +} +``` + +--- + +## Local Functions + +### checkResults + +**checkResults** - Reads actual test outcomes from a log file, optionally generates a YAML reference template, and verifies that the recorded results match an expected set defined in a template. Exits with status 1 on mismatch. + +#### Signature (Go) + +```go +func checkResults(cmd *cobra.Command, _ []string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads actual test outcomes from a log file, optionally generates a YAML reference template, and verifies that the recorded results match an expected set defined in a template. Exits with status 1 on mismatch. | +| **Parameters** | `cmd *cobra.Command` – command instance providing flag values.
`_ []string` – unused argument slice (required by Cobra). | +| **Return value** | `error` – non‑nil if file I/O or parsing fails; otherwise nil after successful validation. | +| **Key dependencies** | • `GetString`, `GetBool`, `Flags` from `cobra.Command`
• `getTestResultsDB` (parses log)
• `generateTemplateFile` (writes YAML)
• `getExpectedTestResults` (reads YAML)
• `printTestResultsMismatch` (formats mismatches)
• `os.Exit`, `fmt.Println` | +| **Side effects** | • Reads from disk (`log-file`, template file).
• May write a generated template to disk.
• Prints diagnostic tables.
• Calls `os.Exit(1)` on mismatch. | +| **How it fits the package** | Central validation routine for the *results* sub‑command; orchestrates log parsing, optional template generation, and result comparison. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Get flags"] --> B{"Generate template?"} + B -- yes --> C["generateTemplateFile"] + B -- no --> D["getExpectedTestResults"] + C --> E["Exit with status 0"] + D --> F["Compare actual vs expected"] + F --> G{"Mismatches exist?"} + G -- yes --> H["printTestResultsMismatch"] + H --> I["os.Exit(1)"] + G -- no --> J["Print success"] + J --> K["Return nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_checkResults --> func_GetString + func_checkResults --> func_Flags + func_checkResults --> func_GetBool + func_checkResults --> func_getTestResultsDB + func_checkResults --> func_generateTemplateFile + func_checkResults --> func_getExpectedTestResults + func_checkResults --> func_printTestResultsMismatch + func_checkResults --> fmt_Errorf + func_checkResults --> os_Exit + func_checkResults --> fmt_Println +``` + +#### Functions calling `checkResults` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking checkResults +import ( + "github.com/spf13/cobra" +) + +func main() { + cmd := &cobra.Command{ + RunE: checkResults, + } + // Define flags expected by checkResults + cmd.Flags().StringP("template", "t", "", "YAML template with expected results") + cmd.Flags().BoolP("generate-template", "g", false, "Generate a reference YAML from the log file") + cmd.Flags().StringP("log-file", "l", "", "Path to the test log file") + + if err := cmd.Execute(); err != nil { + panic(err) + } +} +``` + +--- + +### generateTemplateFile + +**generateTemplateFile** - Builds a `TestResults` struct from the supplied result database, serializes it to YAML, and writes the output to the designated template file. + +#### Signature (Go) + +```go +func generateTemplateFile(resultsDB map[string]string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `TestResults` struct from the supplied result database, serializes it to YAML, and writes the output to the designated template file. | +| **Parameters** | `resultsDB map[string]string – mapping of test case names to their result status (`resultPass`,`resultSkip`, or`resultFail`). | +| **Return value** | `error` – non‑nil if an unknown result is encountered, YAML encoding fails, or the file cannot be written. | +| **Key dependencies** | • `append` (built‑in)
• `fmt.Errorf` (pkg: fmt)
• `yaml.NewEncoder`, `SetIndent`, `Encode` (gopkg.in/yaml.v2)
• `os.WriteFile` (pkg: os) | +| **Side effects** | Writes a file named by `TestResultsTemplateFileName`; may log errors via returned error. No global state changes. | +| **How it fits the package** | Used by the command‑line interface to generate a reference YAML template that represents expected test outcomes. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate resultsDB"} + B -->|"Pass"| C["Append to Pass list"] + B -->|"Skip"| D["Append to Skip list"] + B -->|"Fail"| E["Append to Fail list"] + B -->|"Unknown"| F["Return error “unknown test case result”"] + C --> G["Continue loop"] + D --> G + E --> G + G --> H["Encode resultsTemplate to YAML"] + H --> I{"Encoding OK?"} + I -->|"No"| J["Return encoding error"] + I -->|"Yes"| K["Write YAML bytes to file"] + K --> L{"Write OK?"} + L -->|"No"| M["Return write error"] + L -->|"Yes"| N["Return nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_generateTemplateFile --> fmt.Errorf + func_generateTemplateFile --> yaml.NewEncoder + func_generateTemplateFile --> yaml.SetIndent + func_generateTemplateFile --> yaml.Encode + func_generateTemplateFile --> os.WriteFile + func_generateTemplateFile --> bytes.Buffer +``` + +#### Functions calling `generateTemplateFile` (Mermaid) + +```mermaid +graph TD + func_checkResults --> func_generateTemplateFile +``` + +#### Usage example (Go) + +```go +// Minimal example invoking generateTemplateFile +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/results" +) + +func main() { + db := map[string]string{ + "testA": "pass", + "testB": "skip", + "testC": "fail", + } + if err := results.generateTemplateFile(db); err != nil { + fmt.Printf("Error generating template: %v\n", err) + } +} +``` + +--- + +### getExpectedTestResults + +**getExpectedTestResults** - Reads a YAML template file and returns a map of test case names to their expected result (`pass`, `skip`, or `fail`). + +#### Signature (Go) + +```go +func getExpectedTestResults(templateFileName string) (map[string]string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads a YAML template file and returns a map of test case names to their expected result (`pass`, `skip`, or `fail`). | +| **Parameters** | `templateFileName string` – path to the YAML template. | +| **Return value** | `map[string]string, error` – mapping from test case identifiers to expected results; an error if file read or YAML parsing fails. | +| **Key dependencies** | • `os.ReadFile` (file I/O)
• `fmt.Errorf` (error formatting)
• `yaml.Unmarshal` (YAML decoding)
• Built‑in `make` for map creation | +| **Side effects** | Reads the specified file; does not modify global state or perform network I/O. | +| **How it fits the package** | Provides the reference results used by the `checkResults` command to validate actual test outcomes against expectations. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Open template file"} + B -- Success --> C["Parse YAML into TestResults"] + B -- Failure --> D["Return error via fmt.Errorf"] + C --> E["Create empty map expectedTestResults"] + E --> F{"Iterate Pass list"} + F --> G["Set resultPass for each testCase"] + G --> H{"Iterate Skip list"} + H --> I["Set resultSkip for each testCase"] + I --> J{"Iterate Fail list"} + J --> K["Set resultFail for each testCase"] + K --> L["Return expectedTestResults, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getExpectedTestResults --> os_ReadFile + func_getExpectedTestResults --> fmt_Errorf + func_getExpectedTestResults --> yaml_Unmarshal + func_getExpectedTestResults --> make +``` + +#### Functions calling `getExpectedTestResults` (Mermaid) + +```mermaid +graph TD + func_checkResults --> func_getExpectedTestResults +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getExpectedTestResults +package main + +import ( + "fmt" + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/check/results" +) + +func main() { + templatePath := "expected.yaml" // Path to your YAML template + expected, err := results.getExpectedTestResults(templatePath) + if err != nil { + log.Fatalf("Failed to load expected results: %v", err) + } + fmt.Printf("Loaded %d expected test cases\n", len(expected)) +} +``` + +--- + +### getTestResultsDB + +**getTestResultsDB** - Reads a log file and builds a map of test‑case names to their recorded results. + +#### Signature (Go) + +```go +func getTestResultsDB(logFileName string) (map[string]string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads a log file and builds a map of test‑case names to their recorded results. | +| **Parameters** | `logFileName` – path to the log file to parse. | +| **Return value** | A `map[string]string` where keys are test case identifiers and values are result strings (e.g., `"PASS"`, `"FAIL"`). Returns an error if file access or scanning fails. | +| **Key dependencies** | • `os.Open` – open the log file.
• `regexp.MustCompile` – compile the regex used to extract data.
• `bufio.NewScanner` & `Scanner.Buffer` – read file line by line with a larger buffer.
• `re.FindStringSubmatch` – match each line against the pattern. | +| **Side effects** | • Opens and closes the specified file.
• Reads the entire file sequentially; no concurrent access. | +| **How it fits the package** | Provides the core data source for result comparison logic in the `check/results` command. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Open log file"] --> B["Compile regex"] + B --> C["Create scanner with large buffer"] + C --> D{"Scan each line"} + D -->|"Match found"| E["Extract test case & result"] + E --> F["Store in resultsDB map"] + D -->|"No match"| G["Ignore line"] + D --> H{"Scanner finished?"} + H -->|"Yes, error"| I["Return error"] + H -->|"No, end"| J["Close file & return map"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getTestResultsDB --> os_Open + func_getTestResultsDB --> regexp_MustCompile + func_getTestResultsDB --> bufio_NewScanner + func_getTestResultsDB --> bufio_Scanner_Buffer + func_getTestResultsDB --> re_FindStringSubmatch + func_getTestResultsDB --> fmt_Errorf +``` + +#### Functions calling `getTestResultsDB` (Mermaid) + +```mermaid +graph TD + checkResults --> getTestResultsDB +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getTestResultsDB +results, err := getTestResultsDB("test.log") +if err != nil { + fmt.Printf("Failed to load results: %v\n", err) + return +} +for testCase, result := range results { + fmt.Printf("%s -> %s\n", testCase, result) +} +``` + +--- + +### printTestResultsMismatch + +**printTestResultsMismatch** - Displays each mismatched test case in a human‑readable table with columns for the test name, expected result, and actual result. + +Prints a formatted table of test cases whose expected and actual results differ. + +#### Signature (Go) + +```go +func([]string, map[string]string, map[string]string)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Displays each mismatched test case in a human‑readable table with columns for the test name, expected result, and actual result. | +| **Parameters** | `mismatchedTestCases []string` – list of test case identifiers that differ.
`actualResults map[string]string` – map from test case to its actual outcome.
`expectedResults map[string]string` – map from test case to its expected outcome. | +| **Return value** | None (the function performs I/O only). | +| **Key dependencies** | • `fmt.Printf`, `fmt.Println`
• `strings.Repeat` | +| **Side effects** | Writes to standard output; no state mutation or concurrency. | +| **How it fits the package** | Used by the command‑line tool to report discrepancies between a reference YAML template and a log file containing test results. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate mismatchedTestCases"} + B -->|"for each case"| C["Retrieve expectedResult"] + C --> D["If missing, use resultMiss"] + D --> E["Retrieve actualResult"] + E --> F["If missing, use resultMiss"] + F --> G["Print formatted row"] + G --> H["Print separator line"] + H --> I{"End loop"} +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_printTestResultsMismatch --> fmt_Printf + func_printTestResultsMismatch --> fmt_Println + func_printTestResultsMismatch --> strings_Repeat +``` + +#### Functions calling `printTestResultsMismatch` (Mermaid) + +```mermaid +graph TD + func_checkResults --> func_printTestResultsMismatch +``` + +#### Usage example (Go) + +```go +// Minimal example invoking printTestResultsMismatch +mismatched := []string{"testA", "testB"} +actual := map[string]string{ + "testA": "FAIL", + "testB": "PASS", +} +expected := map[string]string{ + "testA": "PASS", + "testB": "PASS", +} +printTestResultsMismatch(mismatched, actual, expected) +``` + +--- diff --git a/docs/cmd/certsuite/claim/claim.md b/docs/cmd/certsuite/claim/claim.md new file mode 100644 index 000000000..a793bf069 --- /dev/null +++ b/docs/cmd/certsuite/claim/claim.md @@ -0,0 +1,92 @@ +# Package claim + +**Path**: `cmd/certsuite/claim` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) + +## Overview + +The `claim` package builds the Cobra command hierarchy for the top‑level `claim` subcommand of certsuite, wiring together its comparison and display subcommands. + +### Key Features + +- Provides a single entry point (`NewCommand`) that returns a fully configured cobra.Command with nested children + +### Design Notes + +- Relies on the Cobra library to manage CLI flags and execution flow +- Keeps command construction isolated from business logic by delegating to subpackages + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand() *cobra.Command](#newcommand) | Builds a Cobra command tree for the `claim` sub‑command, adding both comparison and display sub‑commands. | + +## Exported Functions + +### NewCommand + +**NewCommand** - Builds a Cobra command tree for the `claim` sub‑command, adding both comparison and display sub‑commands. + +#### Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a Cobra command tree for the `claim` sub‑command, adding both comparison and display sub‑commands. | +| **Parameters** | None | +| **Return value** | A pointer to a configured `*cobra.Command` ready to be added to the application root. | +| **Key dependencies** | • `github.com/spf13/cobra` for command construction
• Calls to `compare.NewCommand()` and `show.NewCommand()` from sub‑packages | +| **Side effects** | Registers two child commands (`compare` and `show`) on the returned command; no global state changes. | +| **How it fits the package** | Serves as the entry point for the *claim* feature, exposing comparison and display functionality under the CLI hierarchy. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + claim.NewCommand --> compare.NewCommand + claim.NewCommand --> show.NewCommand +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + claim.NewCommand --> compare.NewCommand + claim.NewCommand --> show.NewCommand +``` + +#### Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + certsuite.newRootCmd --> claim.NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim" +) + +func main() { + cmd := claim.NewCommand() + // Normally this command would be added to the root CLI and executed. +} +``` + +--- diff --git a/docs/cmd/certsuite/claim/compare/compare.md b/docs/cmd/certsuite/claim/compare/compare.md new file mode 100644 index 000000000..88e831fbc --- /dev/null +++ b/docs/cmd/certsuite/claim/compare/compare.md @@ -0,0 +1,349 @@ +# Package compare + +**Path**: `cmd/certsuite/claim/compare` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) +- [Local Functions](#local-functions) + - [claimCompare](#claimcompare) + - [claimCompareFilesfunc](#claimcomparefilesfunc) + - [unmarshalClaimFile](#unmarshalclaimfile) + +## Overview + +Provides a CLI command that compares two claim files, reporting differences in versioning, test cases, configurations and nodes. + +### Key Features + +- Builds a cobra.Command accepting two required file path flags +- Parses claim JSON/YAML into structured Schema objects +- Compares each major component (versions, test‑case results, configuration, node data) and prints a formatted diff report + +### Design Notes + +- Flags are defined as global variables for easy access by the command handler +- Comparison logic is encapsulated in a helper that returns errors to allow graceful logging +- The package focuses on read‑only comparison; it does not modify input files + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand() *cobra.Command](#newcommand) | Builds a `*cobra.Command` that accepts two file paths (`claim1`, `claim2`) as required flags, then returns it for use in the CLI. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func claimCompare(_ *cobra.Command, _ []string) error](#claimcompare) | Invokes the file‑comparison routine for two claim JSON/YAML files specified by global flags and handles errors via logging. | +| [func claimCompareFilesfunc(claim1, claim2 string) error](#claimcomparefilesfunc) | Reads two JSON claim files, unmarshals them into `claim.Schema`, compares their versions, test‑case results, configuration and node data, printing a structured diff report for each category. | +| [func([]byte)(claim.Schema, error)](#unmarshalclaimfile) | Parses raw JSON data from a claim file into the `claim.Schema` structure. | + +## Exported Functions + +### NewCommand + +**NewCommand** - Builds a `*cobra.Command` that accepts two file paths (`claim1`, `claim2`) as required flags, then returns it for use in the CLI. + +Creates and configures the Cobra command used to compare two claim files. + +```go +func NewCommand() *cobra.Command +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `*cobra.Command` that accepts two file paths (`claim1`, `claim2`) as required flags, then returns it for use in the CLI. | +| **Parameters** | None | +| **Return value** | A pointer to a configured `cobra.Command`. If flag validation fails, it logs an error and returns `nil`. | +| **Key dependencies** | *`StringVarP` – defines string flags.
* `Flags` – retrieves the command’s flag set.
*`MarkFlagRequired` – enforces required flags.
* `log.Error` – reports failures. | +| **Side effects** | Adds two persistent flags to the command; marks them as required; logs errors if marking fails. No external I/O is performed here. | +| **How it fits the package** | This function supplies the top‑level “compare” subcommand for the `claim` command hierarchy, enabling users to run `certsuite claim compare …`. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B["StringVarP: claim1 flag"] + B --> C["StringVarP: claim2 flag"] + C --> D["MarkFlagRequired: claim1"] + D --> E{"Error?"} + E -- Yes --> F["log.Error & return nil"] + E -- No --> G["MarkFlagRequired: claim2"] + G --> H{"Error?"} + H -- Yes --> I["log.Error & return nil"] + H -- No --> J["Return command"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_NewCommand --> func_StringVarP + func_NewCommand --> func_Flags + func_NewCommand --> func_MarkFlagRequired + func_NewCommand --> log_Error +``` + +#### Functions calling `NewCommand` + +```mermaid +graph TD + claim.NewCommand --> func_NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare" + "github.com/spf13/cobra" +) + +func main() { + root := &cobra.Command{Use: "certsuite"} + // Register the compare subcommand + root.AddCommand(compare.NewCommand()) + + // Execute the command line interface + if err := root.Execute(); err != nil { + panic(err) + } +} +``` + +--- + +## Local Functions + +### claimCompare + +**claimCompare** - Invokes the file‑comparison routine for two claim JSON/YAML files specified by global flags and handles errors via logging. + +#### Signature (Go) + +```go +func claimCompare(_ *cobra.Command, _ []string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Invokes the file‑comparison routine for two claim JSON/YAML files specified by global flags and handles errors via logging. | +| **Parameters** | `_ *cobra.Command` – command context (unused).
`_ []string` – arguments list (unused). | +| **Return value** | `error` – always `nil`; any error is logged as fatal and terminates the program. | +| **Key dependencies** | • `claimCompareFilesfunc` – performs file I/O, unmarshalling, and diff generation.
• `log.Fatal` from the internal logging package – outputs errors and exits. | +| **Side effects** | Reads two claim files, prints diffs to stdout, logs fatal error on failure, and terminates the process if an error occurs. | +| **How it fits the package** | Entry point for the `compare` sub‑command of the certsuite CLI; orchestrates the comparison workflow without exposing details to the user. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + claimCompare --> claimCompareFilesfunc + claimCompare --> log.Fatal +``` + +#### Function dependencies + +```mermaid +graph TD + func_claimCompare --> func_claimCompareFilesfunc + func_claimCompare --> func_log.Fatal +``` + +#### Functions calling `claimCompare` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking claimCompare +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare" + cobra "github.com/spf13/cobra" +) + +func main() { + cmd := &cobra.Command{Use: "compare"} + // The command expects no arguments; flags Claim1FilePathFlag and Claim2FilePathFlag are set elsewhere. + err := compare.claimCompare(cmd, []string{}) + if err != nil { + // handle error (though claimCompare logs fatal on failure) + fmt.Println("Unexpected error:", err) + } +} +``` + +--- + +### claimCompareFilesfunc + +**claimCompareFilesfunc** - Reads two JSON claim files, unmarshals them into `claim.Schema`, compares their versions, test‑case results, configuration and node data, printing a structured diff report for each category. + +#### Signature (Go) + +```go +func claimCompareFilesfunc(claim1, claim2 string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads two JSON claim files, unmarshals them into `claim.Schema`, compares their versions, test‑case results, configuration and node data, printing a structured diff report for each category. | +| **Parameters** | `claim1 string – path to first claim file`
`claim2 string – path to second claim file` | +| **Return value** | `error` – non‑nil if any I/O or unmarshalling error occurs; otherwise `nil`. | +| **Key dependencies** | • `os.ReadFile` (twice)
• `fmt.Errorf`, `fmt.Println`, `fmt.Print`
• `unmarshalClaimFile` (internal helper)
• `versions.Compare`
• `testcases.GetDiffReport`
• `configurations.GetDiffReport`
• `nodes.GetDiffReport` | +| **Side effects** | Reads files from disk; writes formatted diff reports to standard output. No global state is mutated. | +| **How it fits the package** | Internal implementation of the CLI command that performs a comprehensive comparison between two claim snapshots, producing human‑readable diagnostics for developers and operators. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Read claim1 file"] --> B{"Success?"} + B -- No --> C["Return error"] + B -- Yes --> D["Read claim2 file"] + D --> E{"Success?"} + E -- No --> F["Return error"] + E -- Yes --> G["Unmarshal claim1 data"] + G --> H{"Success?"} + H -- No --> I["Return error"] + H -- Yes --> J["Unmarshal claim2 data"] + J --> K{"Success?"} + K -- No --> L["Return error"] + K -- Yes --> M["Compare versions"] + M --> N["Print versions diff"] + N --> O["Generate test‑case diff report"] + O --> P["Print test‑case diff"] + P --> Q["Generate configuration diff report"] + Q --> R["Print configuration diff"] + R --> S["Generate node diff report"] + S --> T["Print node diff"] + T --> U["Return nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_claimCompareFilesfunc --> os_ReadFile + func_claimCompareFilesfunc --> fmt_Errorf + func_claimCompareFilesfunc --> unmarshalClaimFile + func_claimCompareFilesfunc --> versions_Compare + func_claimCompareFilesfunc --> testcases_GetDiffReport + func_claimCompareFilesfunc --> configurations_GetDiffReport + func_claimCompareFilesfunc --> nodes_GetDiffReport + func_claimCompareFilesfunc --> fmt_Println + func_claimCompareFilesfunc --> fmt_Print +``` + +#### Functions calling `claimCompareFilesfunc` + +```mermaid +graph TD + claimCompare --> claimCompareFilesfunc +``` + +#### Usage example (Go) + +```go +// Minimal example invoking claimCompareFilesfunc +package main + +import ( + "log" +) + +func main() { + if err := claimCompareFilesfunc("path/to/claim1.json", "path/to/claim2.json"); err != nil { + log.Fatalf("Comparison failed: %v", err) + } +} +``` + +--- + +--- + +### unmarshalClaimFile + +**unmarshalClaimFile** - Parses raw JSON data from a claim file into the `claim.Schema` structure. + +#### 1) Signature (Go) + +```go +func([]byte)(claim.Schema, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses raw JSON data from a claim file into the `claim.Schema` structure. | +| **Parameters** | `claimdata []byte` – Byte slice containing the JSON representation of a claim. | +| **Return value** | `claim.Schema, error` – The unmarshaled schema object and an error if parsing fails. | +| **Key dependencies** | • `encoding/json.Unmarshal` – Deserialises the byte slice into the struct. | +| **Side effects** | None. The function is pure; it does not modify global state or perform I/O. | +| **How it fits the package** | Used by `claimCompareFilesfunc` to read and interpret claim files before comparison operations. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Read byte slice"} + B --> C["Call json.Unmarshal"] + C --> D{"Error?"} + D -- Yes --> E["Return empty Schema, error"] + D -- No --> F["Return populated Schema, nil"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_unmarshalClaimFile --> func_EncodingJSON_Unmarshal +``` + +#### 5) Functions calling `unmarshalClaimFile` (Mermaid) + +```mermaid +graph TD + func_claimCompareFilesfunc --> func_unmarshalClaimFile +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking unmarshalClaimFile +import ( + "fmt" + "io/ioutil" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim" +) + +// Assume `pathToClaim` is a string containing the file path. +data, err := ioutil.ReadFile(pathToClaim) +if err != nil { + log.Fatalf("failed to read claim file: %v", err) +} + +schema, err := unmarshalClaimFile(data) +if err != nil { + log.Fatalf("failed to parse claim: %v", err) +} + +fmt.Printf("Parsed claim version: %s\n", schema.Claim.Versions[0]) +``` + +--- diff --git a/docs/cmd/certsuite/claim/compare/configurations/configurations.md b/docs/cmd/certsuite/claim/compare/configurations/configurations.md new file mode 100644 index 000000000..a4db1a930 --- /dev/null +++ b/docs/cmd/certsuite/claim/compare/configurations/configurations.md @@ -0,0 +1,275 @@ +# Package configurations + +**Path**: `cmd/certsuite/claim/compare/configurations` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [AbnormalEventsCount](#abnormaleventscount) + - [DiffReport](#diffreport) +- [Exported Functions](#exported-functions) + - [AbnormalEventsCount.String](#abnormaleventscount.string) + - [DiffReport.String](#diffreport.string) + - [GetDiffReport](#getdiffreport) + +## Overview + +Provides utilities to compare two claim configurations, summarizing configuration differences and abnormal event counts. + +### Key Features + +- Generates a DiffReport struct containing diff.Diffs and abnormal event statistics +- String methods produce human‑readable tables for reports +- GetDiffReport orchestrates comparison of two claim.Configurations + +### Design Notes + +- Relies on external diff package to compute configuration differences +- Assumes Claim1/Claim2 fields represent counts from separate contexts +- Best practice: use GetDiffReport before displaying or storing the report + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**AbnormalEventsCount**](#abnormaleventscount) | Struct definition | +| [**DiffReport**](#diffreport) | Summary of configuration differences and abnormal event counts | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func (c *AbnormalEventsCount) String() string](#abnormaleventscount.string) | Builds a human‑readable table that lists the number of abnormal events detected in two separate claim contexts. | +| [func (d *DiffReport) String() string](#diffreport.string) | Builds and returns a formatted text block that lists configuration differences and any abnormal events. | +| [func GetDiffReport(claim1Configurations, claim2Configurations *claim.Configurations) *DiffReport](#getdiffreport) | Compares two `claim.Configurations` objects and returns a summary of differences in configuration settings and abnormal event counts. | + +## Structs + +### AbnormalEventsCount + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Claim1` | `int` | Field documentation | +| `Claim2` | `int` | Field documentation | + +--- + +### DiffReport + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `Config` | `*diff.Diffs` | Holds the detailed differences between two Cert Suite configuration objects, produced by the diffing library. | +| `AbnormalEvents` | `AbnormalEventsCount` | Stores the number of abnormal events observed in each claim (`Claim1` and `Claim2`). | + +#### Purpose + +`DiffReport` encapsulates a comparison result for two Cert Suite claims. It contains both the structural differences of their configuration sections and the counts of any abnormal events recorded during execution. The struct is used to generate human‑readable summaries (via its `String()` method) and can be serialized as JSON for reporting or further analysis. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetDiffReport` | Creates a new `DiffReport` by comparing the configurations of two claims and counting their abnormal events. | +| `DiffReport.String` | Returns a formatted string that lists the configuration differences followed by the abnormal event counts for display or logging. | + +--- + +--- + +## Exported Functions + +### AbnormalEventsCount.String + +**String** - Builds a human‑readable table that lists the number of abnormal events detected in two separate claim contexts. + +Displays a formatted string summarizing abnormal event counts for two claims. + +```go +func (c *AbnormalEventsCount) String() string +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a human‑readable table that lists the number of abnormal events detected in two separate claim contexts. | +| **Parameters** | `c` – pointer to an `AbnormalEventsCount` struct containing integer fields `Claim1` and `Claim2`. | +| **Return value** | A string formatted as:
``Cluster abnormal events count\nCLAIM 1 CLAIM 2\n\n``` | +| **Key dependencies** | • `fmt.Sprintf` (two calls)
• Standard library only | +| **Side effects** | None – purely functional; no mutation or I/O. | +| **How it fits the package** | Provides a convenient textual representation for reporting or logging within the `configurations` comparison tooling. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B["Define row formats"] + B --> C["Initialize header string"] + C --> D["Append formatted headers using fmt.Sprintf"] + D --> E["Append formatted data rows using fmt.Sprintf"] + E --> F["Return concatenated string"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_AbnormalEventsCount.String --> func_fmt.Sprintf +``` + +#### Functions calling `AbnormalEventsCount.String` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking AbnormalEventsCount.String +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/configurations" +) + +func main() { + c := configurations.AbnormalEventsCount{Claim1: 3, Claim2: 5} + fmt.Print(c.String()) +} +``` + +--- + +### DiffReport.String + +**String** - Builds and returns a formatted text block that lists configuration differences and any abnormal events. + +Converts a `DiffReport` into a human‑readable string representation. + +#### Signature (Go) + +```go +func (d *DiffReport) String() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds and returns a formatted text block that lists configuration differences and any abnormal events. | +| **Parameters** | `d *DiffReport` – the report instance to be rendered. | +| **Return value** | `string` – concatenated textual representation of the report. | +| **Key dependencies** | • Calls `d.Config.String()`
• Calls `d.AbnormalEvents.String()` | +| **Side effects** | None; purely functional, no mutation or I/O. | +| **How it fits the package** | Provides a convenient way to display comparison results in logs or user interfaces within the `configurations` comparison module. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + start("Start") --> init["Initialize string with header"] + init --> addConfig["Append d.Config.String()"] + addConfig --> newline["Add newline"] + newline --> addEvents["Append d.AbnormalEvents.String()"] + addEvents --> finish["Return final string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_DiffReport_String --> func_Config_String + func_DiffReport_String --> func_AbnormalEvents_String +``` + +#### Functions calling `DiffReport.String` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking DiffReport.String +report := &configurations.DiffReport{ + Config: myConfig, + AbnormalEvents: myEvents, +} +fmt.Println(report.String()) +``` + +--- + +### GetDiffReport + +**GetDiffReport** - Compares two `claim.Configurations` objects and returns a summary of differences in configuration settings and abnormal event counts. + +#### Signature (Go) + +```go +func GetDiffReport(claim1Configurations, claim2Configurations *claim.Configurations) *DiffReport +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Compares two `claim.Configurations` objects and returns a summary of differences in configuration settings and abnormal event counts. | +| **Parameters** | `claim1Configurations *claim.Configurations` – first claim’s configurations.
`claim2Configurations *claim.Configurations` – second claim’s configurations. | +| **Return value** | `*DiffReport` – a struct containing the diff of config values (`Config`) and counts of abnormal events for each claim. | +| **Key dependencies** | • `diff.Compare` from `github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff`.
• `len` (built‑in). | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a high‑level API for the *configurations* comparison module, used by higher‑level claim comparison logic to produce human‑readable diff reports. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetDiffReport --> diff.Compare["diff.Compare"] + GetDiffReport --> len["len"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetDiffReport --> func_Compare["func Compare"] + func_GetDiffReport --> func_len["func len"] +``` + +#### Functions calling `GetDiffReport` + +```mermaid +graph TD + func_claimCompareFilesfunc --> func_GetDiffReport +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetDiffReport +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/configurations" +) + +func main() { + // Assume cfg1 and cfg2 are populated *claim.Configurations instances + var cfg1, cfg2 *claim.Configurations + + diffReport := configurations.GetDiffReport(cfg1, cfg2) + fmt.Printf("%+v\n", diffReport) +} +``` + +--- diff --git a/docs/cmd/certsuite/claim/compare/diff/diff.md b/docs/cmd/certsuite/claim/compare/diff/diff.md new file mode 100644 index 000000000..950819ea3 --- /dev/null +++ b/docs/cmd/certsuite/claim/compare/diff/diff.md @@ -0,0 +1,354 @@ +# Package diff + +**Path**: `cmd/certsuite/claim/compare/diff` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [Diffs](#diffs) + - [FieldDiff](#fielddiff) +- [Exported Functions](#exported-functions) + - [Compare](#compare) + - [Diffs.String](#diffs.string) +- [Local Functions](#local-functions) + - [traverse](#traverse) + +## Overview + +The diff package provides utilities for comparing two unmarshaled JSON‑like structures and summarizing their differences in a tabular form. + +### Key Features + +- Recursively walks arbitrary maps or slices to produce a flat list of leaf nodes +- Compares two such trees, reporting matching fields, differing values, and fields present only in one tree +- Formats the diff into a human‑readable table with dynamic column widths + +### Design Notes + +- Comparison is value‑based using reflect.DeepEqual; order of keys in maps does not affect results +- The traversal function supports optional path filters to limit output to specific subtrees +- The String method calculates column width at runtime, which may be costly for very large diffs + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**Diffs**](#diffs) | Holds the differences between two JSON‑unmarshalled objects | +| [**FieldDiff**](#fielddiff) | Struct definition | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func Compare(objectName string, claim1Object, claim2Object interface{}, filters []string) *Diffs](#compare) | Walks two arbitrary `interface{}` trees (typically unmarshaled JSON), optionally filtering by subtree paths, and returns a `*Diffs` struct summarizing matching fields, differing values, and unique fields in each tree. | +| [func (d *Diffs) String() string](#diffs.string) | Returns a formatted string that lists field differences, fields only in CLAIM 1 and fields only in CLAIM 2. The output is tabular with dynamic column widths based on the longest path or value. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func traverse(node interface{}, path string, filters []string) []field](#traverse) | Walks through an arbitrary JSON‑like tree (`map[string]interface{}` or `[]interface{}`), building a flat list of leaf nodes. Each leaf is represented as a `field` containing its full path and value. Optional filters restrict the paths that are returned. | + +## Structs + +### Diffs + +| Field | Type | Description | +|-------|------|-------------| +| `Name` | `string` | Identifier of the object being compared; used as a header when rendering results. | +| `Fields` | `[]FieldDiff` | Entries for fields that exist in both objects but have differing values. | +| `FieldsInClaim1Only` | `[]string` | List of field paths (with their values) present only in the first object. | +| `FieldsInClaim2Only` | `[]string` | List of field paths (with their values) present only in the second object. | + +#### Purpose + +`Diffs` aggregates all differences discovered when comparing two arbitrary Go structures that were decoded from JSON. It records: + +- Which fields differ and shows both values (`FieldDiff`), +- Which fields exist exclusively in each input. + +This struct is primarily used by the `Compare` function to produce a human‑readable report, and it implements `String()` for easy formatting. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `Compare(objectName string, claim1Object, claim2Object interface{}, filters []string) *Diffs` | Walks both objects, populates a `Diffs` instance with differing fields and exclusive fields. | +| `(*Diffs).String() string` | Formats the stored differences into a plain‑text table for display or logging. | + +--- + +--- + +### FieldDiff + + +**Purpose**: FieldDIff holds the field path and the values from both claim files +that have been found to be different. + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Claim2Value` | `interface{}` | Value of the field extracted from the second claim file. | +| `FieldPath` | `string` | JSON‑style path to the field within the claim structure (e.g., `"spec.template.spec.containers[0].image"`). | +| `Claim1Value` | `interface{}` | Value of the field extracted from the first claim file. | + +--- + +## Exported Functions + +### Compare + +**Compare** - Walks two arbitrary `interface{}` trees (typically unmarshaled JSON), optionally filtering by subtree paths, and returns a `*Diffs` struct summarizing matching fields, differing values, and unique fields in each tree. + +#### 1) Signature (Go) + +```go +func Compare(objectName string, claim1Object, claim2Object interface{}, filters []string) *Diffs +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Walks two arbitrary `interface{}` trees (typically unmarshaled JSON), optionally filtering by subtree paths, and returns a `*Diffs` struct summarizing matching fields, differing values, and unique fields in each tree. | +| **Parameters** | `objectName string –` identifier used in the resulting `Diffs`;
`claim1Object interface{} –` first tree to compare;
`claim2Object interface{} –` second tree;
`filters []string –` optional list of subtree names that should be traversed and compared. | +| **Return value** | `*Diffs` containing:
• `Name string` (the supplied object name)
• `Fields []FieldDiff` (paths where values differ)
• `FieldsInClaim1Only []string` (paths present only in the first tree)
• `FieldsInClaim2Only []string` (paths present only in the second tree). | +| **Key dependencies** | • `traverse` – recursively extracts fields from a tree.
• `reflect.DeepEqual` – checks value equality.
• `fmt.Sprintf` – formats missing‑field strings. | +| **Side effects** | None beyond constructing and returning the `Diffs` result; no global state is mutated, and no I/O occurs. | +| **How it fits the package** | Core comparison routine used by higher‑level diff reports for configurations, nodes, versions, etc., enabling consistent reporting across claim types. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Call traverse on claim1Object"] + B --> C["Call traverse on claim2Object"] + C --> D["Build maps of paths → values for each tree"] + D --> E["Iterate over claim1 fields"] + E --> F{"Field exists in claim2?"} + F -- Yes --> G["Compare values with reflect.DeepEqual"] + G --> H{"Values equal?"} + H -- No --> I["Add to Fields (diff)"] + H -- Yes --> J["Skip"] + F -- No --> K["Record field only in Claim1"] + E --> L["Iterate over claim2 fields"] + L --> M{"Field exists in claim1?"} + M -- No --> N["Record field only in Claim2"] + N --> O["Return &Diffs"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_Compare --> func_traverse + func_Compare --> pkg_reflect.DeepEqual + func_Compare --> pkg_fmt.Sprintf +``` + +#### 5) Functions calling `Compare` (Mermaid) + +```mermaid +graph TD + func_GetDiffReport --> func_Compare + func_GetDiffReport --> func_Compare + func_GetDiffReport --> func_Compare +``` + +*Note: The function is referenced by multiple callers in different subpackages.* + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Compare +package main + +import ( + "encoding/json" + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff" +) + +func main() { + // Example JSON objects unmarshaled into interface{} + obj1JSON := `{"name":"alpha","value":42,"labels":{"env":"prod"}}` + obj2JSON := `{"name":"alpha","value":43,"labels":{"env":"dev"}}` + + var obj1, obj2 interface{} + json.Unmarshal([]byte(obj1JSON), &obj1) + json.Unmarshal([]byte(obj2JSON), &obj2) + + // Compare all fields + result := diff.Compare("ExampleObject", obj1, obj2, nil) + + fmt.Printf("%+v\n", result) +} +``` + +This example demonstrates how `Compare` can be used to detect differences between two arbitrary JSON structures. + +--- + +### Diffs.String + +**String** - Returns a formatted string that lists field differences, fields only in CLAIM 1 and fields only in CLAIM 2. The output is tabular with dynamic column widths based on the longest path or value. + +Creates a human‑readable table summarising the differences between two claim files. + +```go +func (d *Diffs) String() string +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a formatted string that lists field differences, fields only in CLAIM 1 and fields only in CLAIM 2. The output is tabular with dynamic column widths based on the longest path or value. | +| **Parameters** | `d *Diffs` – receiver holding comparison data (Name, Fields, FieldsInClaim1Only, FieldsInClaim2Only). | +| **Return value** | `string` – the formatted diff report. | +| **Key dependencies** | • `len`, `fmt.Sprint`, `fmt.Sprintf` for calculating widths and formatting.
• Constants `noDiffs`, `columnsGapSize`. | +| **Side effects** | None; purely functional, no state mutation or I/O. | +| **How it fits the package** | Implements the `Stringer` interface for `Diffs`, enabling easy printing of comparison results within the `diff` package. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Calculate max widths"} + B --> C["Build format string"] + C --> D{"Generate difference rows"} + D --> E["Append CLAIM 1‑only rows"] + E --> F["Append CLAIM 2‑only rows"] + F --> G["Return final string"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Diffs.String --> len + func_Diffs.String --> fmt.Sprint + func_Diffs.String --> fmt.Sprintf +``` + +#### Functions calling `Diffs.String` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Diffs.String +diff := &diff.Diffs{ + Name: "Certificate", + Fields: []diff.FieldDiff{{ + FieldPath: "/metadata/name", + Claim1Value: "certA", + Claim2Value: "certB", + }}, +} +fmt.Println(diff.String()) +``` + +--- + +## Local Functions + +### traverse + +**traverse** - Walks through an arbitrary JSON‑like tree (`map[string]interface{}` or `[]interface{}`), building a flat list of leaf nodes. Each leaf is represented as a `field` containing its full path and value. Optional filters restrict the paths that are returned. + +#### Signature (Go) + +```go +func traverse(node interface{}, path string, filters []string) []field +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Walks through an arbitrary JSON‑like tree (`map[string]interface{}` or `[]interface{}`), building a flat list of leaf nodes. Each leaf is represented as a `field` containing its full path and value. Optional filters restrict the paths that are returned. | +| **Parameters** | - `node interface{}` – Current node to inspect.
- `path string` – Accumulated slash‑delimited path from the root.
- `filters []string` – Path fragments; only leaves whose path contains one of these fragments (surrounded by slashes) are included when filters are non‑empty. | +| **Return value** | `[]field` – Slice of all matching leaf nodes found under `node`. If `node` is `nil`, returns `nil`. | +| **Key dependencies** | • `make` to create slices and maps.
• `append` for slice manipulation.
• `sort.Strings` to visit map keys in deterministic order.
• `strconv.Itoa` to convert list indices into string form.
• `strings.Contains` to apply filter checks. | +| **Side effects** | None; the function is pure and does not modify any external state. | +| **How it fits the package** | Used by the public `Compare` function to flatten two claim objects into comparable lists of fields before performing a diff. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CheckNil{"node==nil?"} + CheckNil -- Yes --> ReturnNil["return nil"] + CheckNil -- No --> TypeSwitch((Type Switch)) + TypeSwitch --> MapObject((Map Object)) + TypeSwitch --> ListObject((List Object)) + TypeSwitch --> Leaf((Leaf Node)) + MapObject --> GetKeys((Get Keys)) + GetKeys --> SortKeys((Sort Keys)) + SortKeys --> ForEachKey((For Each Key)) + ForEachKey --> Recurse((Recurse with key)) + ListObject --> ForEachIndex((For Each Index)) + ForEachIndex --> RecurseIdx((Recurse with index)) + Leaf --> NoFilter{"len(filters)==0?"} + NoFilter -- Yes --> AppendAll((Append All)) + NoFilter -- No --> FilterLoop((Filter Loop)) + FilterLoop --> Contains{"strings.Contains(path,/+filter+/)?"} + Contains -- Yes --> AppendFiltered((Append Filtered)) + Contains -- No --> Skip["Skip"] + Recurse --> End["(End)"] + RecurseIdx --> End + AppendAll --> End + AppendFiltered --> End +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_traverse --> make + func_traverse --> append + func_traverse --> sort.Strings + func_traverse --> strconv.Itoa + func_traverse --> strings.Contains +``` + +#### Functions calling `traverse` (Mermaid) + +```mermaid +graph TD + func_Compare --> func_traverse +``` + +#### Usage example (Go) + +```go +// Minimal example invoking traverse +package main + +import ( + "fmt" +) + +func main() { + data := map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "example", + "labels": []interface{}{"app", "demo"}, + }, + "spec": map[string]interface{}{ + "replicas": 3, + }, + } + + fields := traverse(data, "", nil) + for _, f := range fields { + fmt.Printf("%s = %v\n", f.Path, f.Value) + } +} +``` + +--- diff --git a/docs/cmd/certsuite/claim/compare/nodes/nodes.md b/docs/cmd/certsuite/claim/compare/nodes/nodes.md new file mode 100644 index 000000000..7e33d7fcc --- /dev/null +++ b/docs/cmd/certsuite/claim/compare/nodes/nodes.md @@ -0,0 +1,232 @@ +# Package nodes + +**Path**: `cmd/certsuite/claim/compare/nodes` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [DiffReport](#diffreport) +- [Exported Functions](#exported-functions) + - [DiffReport.String](#diffreport.string) + - [GetDiffReport](#getdiffreport) + +## Overview + +The nodes package builds a human‑readable report summarizing differences between two claim node sets—covering node lists, CNI networks, CSI drivers and hardware details. + +### Key Features + +- Computes structured diffs via the diff package for each component (Nodes, CNI, CSI, Hardware) +- Encapsulates results in DiffReport with a String() method for easy display +- Provides GetDiffReport to construct reports from claim.Nodes objects + +### Design Notes + +- Assumes both input claims are fully populated and comparable; nil pointers result in empty diffs +- Diff generation is delegated to the diff package, keeping comparison logic separate +- String() aggregates multiple diff sections into one string, suitable for CLI output + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**DiffReport**](#diffreport) | Summary of node differences between two claims | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func (d DiffReport) String() string](#diffreport.string) | Creates a human‑readable summary of cluster node differences, including nodes, CNI, CSI and hardware comparisons. | +| [func GetDiffReport(claim1Nodes, claim2Nodes *claim.Nodes) *DiffReport](#getdiffreport) | Builds a `DiffReport` that contains the differences between two sets of claim nodes. It compares node summaries, CNI networks, CSI drivers and hardware information. | + +## Structs + +### DiffReport + +#### Fields + +| Field | Type | Description | +|----------|-------------|-------------| +| `Nodes` | `*diff.Diffs` | Differences in node roles and summaries, one entry per node that appears in both claim files. If a node exists only in one claim it is marked “not found in claim[1|2]”. | +| `CNI` | `*diff.Diffs` | Differences between CNI network configurations across the two claims. | +| `CSI` | `*diff.Diffs` | Differences between CSI driver definitions in the two claims. | +| `Hardware` | `*diff.Diffs` | Differences in hardware information reported by the nodes in each claim. | + +#### Purpose + +`DiffReport` aggregates the results of comparing node-related data from two Kubernetes cluster claims. It holds separate diff summaries for general node information, CNI networks, CSI drivers, and hardware specs, allowing consumers to inspect which aspects differ between the clusters. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetDiffReport` | Creates a new `DiffReport` by comparing corresponding fields of two `claim.Nodes` objects. | +| `DiffReport.String` | Returns a formatted string representation of the report, listing differences for each section if present. | + +--- + +--- + +## Exported Functions + +### DiffReport.String + +**String** - Creates a human‑readable summary of cluster node differences, including nodes, CNI, CSI and hardware comparisons. + +Outputs a formatted report of node differences between two claim files. + +--- + +#### Signature (Go) + +```go +func (d DiffReport) String() string +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a human‑readable summary of cluster node differences, including nodes, CNI, CSI and hardware comparisons. | +| **Parameters** | `d DiffReport` – the report to format (receiver). | +| **Return value** | `string` – multiline text ready for printing or logging. | +| **Key dependencies** | Calls `String()` on the nested fields: `d.Nodes`, `d.CNI`, `d.CSI`, and `d.Hardware`. | +| **Side effects** | None; purely functional, no state mutation or I/O. | +| **How it fits the package** | Serves as the stringer for `DiffReport`, enabling concise output when comparing claims in the *nodes* comparison tool. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> InitHeader["str = \CLUSTER NODES DIFFERENCES\\n\ + \-------------------------\\n\\n\"] + InitHeader --> CheckNodes{"d.Nodes != nil"} + CheckNodes -- true --> AppendNodes["str += d.Nodes.String() + \\\n\"] + CheckNodes -- false --> SkipNodes + AppendNodes --> CheckCNI{"d.CNI != nil"} + SkipNodes --> CheckCNI + CheckCNI -- true --> AppendCNI["str += d.CNI.String() + \\\n\"] + CheckCNI -- false --> SkipCNI + AppendCNI --> CheckCSI{"d.CSI != nil"} + SkipCNI --> CheckCSI + CheckCSI -- true --> AppendCSI["str += d.CSI.String() + \\\n\"] + CheckCSI -- false --> SkipCSI + AppendCSI --> CheckHardware{"d.Hardware != nil"} + SkipCSI --> CheckHardware + CheckHardware -- true --> AppendHardware["str += d.Hardware.String() + \\\n\"] + CheckHardware -- false --> SkipHardware + AppendHardware --> Return["return str"] + SkipHardware --> Return +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + DiffReport_String --> Nodes_String + DiffReport_String --> CNI_String + DiffReport_String --> CSI_String + DiffReport_String --> Hardware_String +``` + +--- + +#### Functions calling `DiffReport.String` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking DiffReport.String +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/nodes" +) + +func main() { + var report nodes.DiffReport + // populate report with data... + fmt.Println(report.String()) +} +``` + +--- + +### GetDiffReport + +**GetDiffReport** - Builds a `DiffReport` that contains the differences between two sets of claim nodes. It compares node summaries, CNI networks, CSI drivers and hardware information. + +#### Signature (Go) + +```go +func GetDiffReport(claim1Nodes, claim2Nodes *claim.Nodes) *DiffReport +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `DiffReport` that contains the differences between two sets of claim nodes. It compares node summaries, CNI networks, CSI drivers and hardware information. | +| **Parameters** | `claim1Nodes *claim.Nodes – first set of nodes to compare`
`claim2Nodes *claim.Nodes – second set of nodes to compare` | +| **Return value** | `*DiffReport – a report with fields for Nodes, CNI, CSI and Hardware diffs` | +| **Key dependencies** | • `diff.Compare` (from `github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff`)
• Types from the `claim` package (`Nodes`, `NodesSummary`, etc.) | +| **Side effects** | None – pure function, no I/O or global state modification. | +| **How it fits the package** | This function is the public entry point for producing node‑level differences used by higher‑level comparison utilities (e.g., `claimCompareFilesfunc`). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A(Start) --> B{"Prepare diff calls"} + B --> C1["Compare Nodes"] + B --> C2["Compare CNIs"] + B --> C3["Compare CSIs"] + B --> C4["Compare Hardware"] + C1 & C2 & C3 & C4 --> D(Return DiffReport) +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetDiffReport --> diff.Compare +``` + +#### Functions calling `GetDiffReport` (Mermaid) + +```mermaid +graph TD + claimCompareFilesfunc --> func_GetDiffReport +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetDiffReport + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/nodes" +) + +func main() { + // Assume node1 and node2 are already populated *claim.Nodes + var node1, node2 *claim.Nodes + + diffReport := nodes.GetDiffReport(node1, node2) + fmt.Printf("%+v\n", diffReport) +} +``` + +--- diff --git a/docs/cmd/certsuite/claim/compare/testcases/testcases.md b/docs/cmd/certsuite/claim/compare/testcases/testcases.md new file mode 100644 index 000000000..e6e7b341d --- /dev/null +++ b/docs/cmd/certsuite/claim/compare/testcases/testcases.md @@ -0,0 +1,478 @@ +# Package testcases + +**Path**: `cmd/certsuite/claim/compare/testcases` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [DiffReport](#diffreport) + - [TcResultDifference](#tcresultdifference) + - [TcResultsSummary](#tcresultssummary) +- [Exported Functions](#exported-functions) + - [DiffReport.String](#diffreport.string) + - [GetDiffReport](#getdiffreport) +- [Local Functions](#local-functions) + - [getMergedTestCasesNames](#getmergedtestcasesnames) + - [getTestCasesResultsMap](#gettestcasesresultsmap) + - [getTestCasesResultsSummary](#gettestcasesresultssummary) + +## Overview + +The testcases package compares two claim result sets and produces a structured diff report, summarising pass/fail/skipped counts per claim and listing any differing test case outcomes. + +### Key Features + +- Generates a DiffReport struct that contains summary statistics for each claim and a list of individual test‑case differences +- Provides helper functions to map raw results into lookup tables and compute summaries efficiently +- Sorts merged test‑case names so the diff report is deterministic and easy to read + +### Design Notes + +- Assumes claim.TestSuiteResults are maps keyed by test case ID; conversion to string maps abstracts away internal structure +- The DiffReport.String method formats output with tables, using fmt.Sprintf for alignment; missing differences are shown as +- Best practice: call GetDiffReport before rendering or serialising the report; avoid direct manipulation of DiffReport fields + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**DiffReport**](#diffreport) | Struct definition | +| [**TcResultDifference**](#tcresultdifference) | One‑line purpose | +| [**TcResultsSummary**](#tcresultssummary) | Summary of test case execution results | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func (r *DiffReport) String() string](#diffreport.string) | Builds a formatted string containing:
• A summary table of passed, skipped, and failed counts for CLAIM‑1 and CLAIM‑2.
• A differences table listing each test case name with its result in both claims. If no differences exist, it shows “”. | +| [func GetDiffReport(resultsClaim1, resultsClaim2 claim.TestSuiteResults) *DiffReport](#getdiffreport) | Produces a `DiffReport` comparing test case outcomes from two claim files. Each differing result is recorded, and summary statistics for each claim are included. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func getMergedTestCasesNames(results1, results2 map[string]string) []string](#getmergedtestcasesnames) | Combines the keys from two result maps into a unique, sorted slice of test‑case names. | +| [func getTestCasesResultsMap(testSuiteResults claim.TestSuiteResults) map[string]string](#gettestcasesresultsmap) | Builds a map from each test case ID to its result state for quick comparison. | +| [func getTestCasesResultsSummary(results map[string]string) TcResultsSummary](#gettestcasesresultssummary) | Aggregates the outcome of each test case into a `TcResultsSummary` struct, counting passed, skipped and failed cases. | + +## Structs + +### DiffReport + + +**Purpose**: Holds the results summary and the list of test cases whose result +is different. + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `DifferentTestCasesResults` | `int` | Field documentation | +| `Claim1ResultsSummary` | `TcResultsSummary` | Field documentation | +| `Claim2ResultsSummary` | `TcResultsSummary` | Field documentation | +| `TestCases` | `[]TcResultDifference` | Field documentation | + +--- + +### TcResultDifference + +Represents a discrepancy between the results of two claim evaluations for a single test case. + +#### Fields + +| Field | Type | Description | +|---------------|--------|-------------| +| `Name` | string | Identifier of the test case that produced the difference. | +| `Claim1Result` | string | Result value returned by the first claim under comparison. | +| `Claim2Result` | string | Result value returned by the second claim under comparison. | + +#### Purpose + +In a comparison workflow, each test case is evaluated against two different claims (e.g., expected versus actual). When the outcomes differ, an instance of `TcResultDifference` captures the test case name and both result strings, enabling downstream reporting or debugging. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| *none* | No package‑level functions directly operate on this struct. | + +--- + +--- + +### TcResultsSummary + +#### Fields + +| Field | Type | Description | +|---------|------|-------------| +| Passed | int | Number of test cases that completed successfully. | +| Skipped | int | Number of test cases that were skipped during the run. | +| Failed | int | Number of test cases that did not pass. | + +#### Purpose + +`TcResultsSummary` aggregates the outcome counts of a set of test case executions, providing a quick overview of how many tests passed, were skipped, or failed. It is used to report overall test suite health and to drive decision‑making based on test results. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `getTestCasesResultsSummary` | Computes a `TcResultsSummary` from a map of test case names to result strings by incrementing the appropriate counter (`Passed`, `Skipped`, or `Failed`) for each reported outcome. | + +--- + +--- + +## Exported Functions + +### DiffReport.String + +**String** - Builds a formatted string containing:
• A summary table of passed, skipped, and failed counts for CLAIM‑1 and CLAIM‑2.
• A differences table listing each test case name with its result in both claims. If no differences exist, it shows “”. + +Generates a human‑readable report summarising test case outcomes for two claims and listing differences between them. + +#### Signature (Go) + +```go +func (r *DiffReport) String() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a formatted string containing:
• A summary table of passed, skipped, and failed counts for CLAIM‑1 and CLAIM‑2.
• A differences table listing each test case name with its result in both claims. If no differences exist, it shows “”. | +| **Parameters** | `r *DiffReport` – receiver holding results data. | +| **Return value** | `string` – the fully formatted report. | +| **Key dependencies** | • `fmt.Sprintf` for string formatting.
• `len` to detect empty difference list. | +| **Side effects** | None – purely functional; does not modify state or perform I/O. | +| **How it fits the package** | Provides the standard library interface (`String()`) so a `DiffReport` can be printed directly (e.g., with `fmt.Println`). It is used when generating command‑line output for claim comparison. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Build header"] + B --> C["Format status summary rows"] + C --> D{"TestCases empty?"} + D -- Yes --> E["Append and return"] + D -- No --> F["Format diff table header"] + F --> G["Iterate over TestCases"] + G --> H["Append each diff row"] + H --> I["Return complete string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_DiffReport.String --> fmt.Sprintf + func_DiffReport.String --> len +``` + +#### Functions calling `DiffReport.String` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking DiffReport.String +report := &testcases.DiffReport{ + Claim1ResultsSummary: testcases.ResultsSummary{Passed: 22, Skipped: 62, Failed: 3}, + Claim2ResultsSummary: testcases.ResultsSummary{Passed: 21, Skipped: 62, Failed: 4}, + TestCases: []testcases.TestCaseDiff{ + {Name: "access-control-net-admin-capability-check", Claim1Result: "failed", Claim2Result: "passed"}, + // … additional diffs … + }, +} + +fmt.Println(report.String()) +``` + +--- + +### GetDiffReport + +**GetDiffReport** - Produces a `DiffReport` comparing test case outcomes from two claim files. Each differing result is recorded, and summary statistics for each claim are included. + +#### Signature (Go) + +```go +func GetDiffReport(resultsClaim1, resultsClaim2 claim.TestSuiteResults) *DiffReport +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a `DiffReport` comparing test case outcomes from two claim files. Each differing result is recorded, and summary statistics for each claim are included. | +| **Parameters** | `resultsClaim1`, `resultsClaim2` – maps of test case IDs to their execution results (`claim.TestSuiteResults`). | +| **Return value** | Pointer to a populated `DiffReport` struct. | +| **Key dependencies** | • `getTestCasesResultsMap` (builds ID→result map)
• `getMergedTestCasesNames` (collects all test case IDs)
• `append` (adds differences to slice)
• `getTestCasesResultsSummary` (counts passed/failed/skipped) | +| **Side effects** | No external I/O or state mutation; operates solely on its inputs and returns a new report. | +| **How it fits the package** | Central routine in the `testcases` comparison sub‑package, used by higher‑level comparison commands to present test outcome differences between two claim files. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["getTestCasesResultsMap for Claim1"] + B --> C["getTestCasesResultsMap for Claim2"] + C --> D["getMergedTestCasesNames"] + D --> E["Iterate over all test case names"] + E --> F{"Result differs?"} + F -- Yes --> G["Append difference to report.TestCases"] + F -- No --> H["Continue loop"] + G --> H + H --> I{"End of list?"} + I -- Yes --> J["getTestCasesResultsSummary for Claim1"] + J --> K["getTestCasesResultsSummary for Claim2"] + K --> L["Return &report"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetDiffReport --> func_getTestCasesResultsMap + func_GetDiffReport --> func_getMergedTestCasesNames + func_GetDiffReport --> func_append + func_GetDiffReport --> func_getTestCasesResultsSummary +``` + +#### Functions calling `GetDiffReport` (Mermaid) + +```mermaid +graph TD + func_claimCompareFilesfunc --> func_GetDiffReport +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetDiffReport +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/testcases" +) + +func main() { + // Assume results1 and results2 are populated claim.TestSuiteResults maps + var results1, results2 claim.TestSuiteResults + + diffReport := testcases.GetDiffReport(results1, results2) + fmt.Printf("Differing tests: %d\n", diffReport.DifferentTestCasesResults) +} +``` + +--- + +## Local Functions + +### getMergedTestCasesNames + +**getMergedTestCasesNames** - Combines the keys from two result maps into a unique, sorted slice of test‑case names. + +#### Signature (Go) + +```go +func getMergedTestCasesNames(results1, results2 map[string]string) []string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Combines the keys from two result maps into a unique, sorted slice of test‑case names. | +| **Parameters** | `results1` – `map[string]string`: first set of test results.
`results2` – `map[string]string`: second set of test results. | +| **Return value** | `[]string`: all distinct test‑case names, sorted alphabetically. | +| **Key dependencies** | *append* (built‑in)
*sort.Strings* (`"sort"` package) | +| **Side effects** | None – purely functional; no state mutation or I/O. | +| **How it fits the package** | Used by `GetDiffReport` to iterate over every test case when computing differences between two claim results. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph BuildMap["Create unique name map"] + A1["Iterate results1"] --> B1{"Add to map"} + A2["Iterate results2"] --> B2{"Add to map"} + end + C["Collect keys into slice"] --> D["Sort slice alphabetically"] + E["Return sorted slice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getMergedTestCasesNames --> builtin_append + func_getMergedTestCasesNames --> sort_StringStrings +``` + +#### Functions calling `getMergedTestCasesNames` (Mermaid) + +```mermaid +graph TD + func_GetDiffReport --> func_getMergedTestCasesNames +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getMergedTestCasesNames +resultsA := map[string]string{ + "tc1": "pass", + "tc2": "fail", +} +resultsB := map[string]string{ + "tc2": "pass", + "tc3": "skip", +} + +merged := getMergedTestCasesNames(resultsA, resultsB) +// merged == []string{"tc1", "tc2", "tc3"} +``` + +--- + +### getTestCasesResultsMap + +**getTestCasesResultsMap** - Builds a map from each test case ID to its result state for quick comparison. + +#### Signature (Go) + +```go +func getTestCasesResultsMap(testSuiteResults claim.TestSuiteResults) map[string]string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a map from each test case ID to its result state for quick comparison. | +| **Parameters** | `testSuiteResults` claim.TestSuiteResults – the original mapping of test cases within a suite. | +| **Return value** | `map[string]string` – keys are test‑case identifiers (`TestID.ID`), values are their states (`State`). | +| **Key dependencies** | Uses the fields `TestID.ID` and `State` from the structs inside `claim.TestSuiteResults`. No external function calls. | +| **Side effects** | None; purely functional transformation. | +| **How it fits the package** | Supplies `GetDiffReport` with a convenient lookup structure to detect differences between two claim result sets. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start["Start"] --> Iterate{"for each testCase in testSuiteResults"} + Iterate --> Assign["Add entry: key = testCase.TestID.ID, value = testCase.State"] + Assign --> End["Return map"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `getTestCasesResultsMap` (Mermaid) + +```mermaid +graph TD + func_GetDiffReport --> func_getTestCasesResultsMap +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getTestCasesResultsMap +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim" + // Assume the package containing the function is imported as testcasespkg + testcasespkg "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/testcases" +) + +func main() { + // Construct a dummy TestSuiteResults map for illustration + results := claim.TestSuiteResults{ + "tc1": {TestID: claim.TestID{ID: "test-001"}, State: "passed"}, + "tc2": {TestID: claim.TestID{ID: "test-002"}, State: "failed"}, + } + + // Call the helper + resultMap := testcasespkg.GetTestCasesResultsMap(results) // note: function is unexported; this call would be in same package + + fmt.Printf("%+v\n", resultMap) +} +``` + +*Note:* The function is unexported, so it can only be called from within the `testcases` package. The example demonstrates its intended usage pattern when available.* + +--- + +### getTestCasesResultsSummary + +**getTestCasesResultsSummary** - Aggregates the outcome of each test case into a `TcResultsSummary` struct, counting passed, skipped and failed cases. + +#### Signature (Go) + +```go +func getTestCasesResultsSummary(results map[string]string) TcResultsSummary +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Aggregates the outcome of each test case into a `TcResultsSummary` struct, counting passed, skipped and failed cases. | +| **Parameters** | `results` – `map[string]string`: mapping from test‑case name to its result string (`claim.TestCaseResultPassed`, `Skipped`, or `Failed`). | +| **Return value** | `TcResultsSummary`: a struct with integer counters for each outcome category. | +| **Key dependencies** | Uses the constants `claim.TestCaseResultPassed`, `claim.TestCaseResultSkipped`, and `claim.TestCaseResultFailed`. | +| **Side effects** | None – purely functional; does not modify its arguments or external state. | +| **How it fits the package** | Provides a reusable routine for summarizing test‑case results, used by the diff report generator to populate per‑claim summaries. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over `results` map"} + B --> C{"Check result value"} + C -->|"Passed"| D["Increment summary.Passed"] + C -->|"Skipped"| E["Increment summary.Skipped"] + C -->|"Failed"| F["Increment summary.Failed"] + D & E & F --> G["Return summary"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_getTestCasesResultsSummary --> claim.TestCaseResultPassed + func_getTestCasesResultsSummary --> claim.TestCaseResultSkipped + func_getTestCasesResultsSummary --> claim.TestCaseResultFailed +``` + +#### Functions calling `getTestCasesResultsSummary` (Mermaid) + +```mermaid +graph TD + func_GetDiffReport --> func_getTestCasesResultsSummary +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getTestCasesResultsSummary +results := map[string]string{ + "tc1": claim.TestCaseResultPassed, + "tc2": claim.TestCaseResultFailed, + "tc3": claim.TestCaseResultSkipped, +} + +summary := getTestCasesResultsSummary(results) +fmt.Printf("Passed: %d, Skipped: %d, Failed: %d\n", + summary.Passed, summary.Skipped, summary.Failed) +``` + +--- diff --git a/docs/cmd/certsuite/claim/compare/versions/versions.md b/docs/cmd/certsuite/claim/compare/versions/versions.md new file mode 100644 index 000000000..39cd42d84 --- /dev/null +++ b/docs/cmd/certsuite/claim/compare/versions/versions.md @@ -0,0 +1,208 @@ +# Package versions + +**Path**: `cmd/certsuite/claim/compare/versions` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [DiffReport](#diffreport) +- [Exported Functions](#exported-functions) + - [Compare](#compare) + - [DiffReport.String](#diffreport.string) + +## Overview + +The versions package provides utilities to compare two claim version structures, serialising them to JSON and diffing the resulting maps to generate a structured report of differences. + +### Key Features + +- Serialises officialClaimScheme.Versions into generic maps via json.Marshal for comparison +- Uses a generic diff routine from github.com/.../diff to produce a DiffReport +- Offers a human‑readable String method on DiffReport that returns an empty representation when no diffs exist + +### Design Notes + +- Comparison is performed by converting structs to JSON rather than field‑by‑field; assumes both inputs are serialisable +- DiffReport contains a pointer to diff.Diffs, so nil indicates no differences +- Best practice: call Compare then inspect DiffReport.Diffs or use its String method for output + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**DiffReport**](#diffreport) | One‑line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func Compare(claim1Versions, claim2Versions *officialClaimScheme.Versions) *DiffReport](#compare) | Serialises two `officialClaimScheme.Versions` structs to JSON, deserialises them into generic maps, and uses a generic diff routine to produce a structured report of differences. | +| [func (d *DiffReport) String() string](#diffreport.string) | Provides a human‑readable description of a version comparison result. If no differences are stored, it returns the empty diff representation. | + +## Structs + +### DiffReport + +A container that holds the result of comparing two sets of claim versions. + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `Diffs` | `*diff.Diffs` | Pointer to a `diff.Diffs` structure (from the external diff package) representing all detected differences. The JSON tag `differences` indicates how it is marshalled/unmarshalled. | + +#### Purpose + +`DiffReport` encapsulates the outcome of the `Compare` function, providing an easy way to inspect or serialize the differences between two `officialClaimScheme.Versions` objects. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `Compare(claim1Versions, claim2Versions *officialClaimScheme.Versions) *DiffReport` | Serialises both version sets to JSON, unmarshals them into generic interfaces, and uses the diff package to compute differences. The resulting `Diffs` are stored in a new `DiffReport`. | +| `(*DiffReport).String() string` | Returns a human‑readable string representation of the contained diffs; if no diffs exist it returns an empty `diff.Diffs` string. | + +--- + +--- + +## Exported Functions + +### Compare + +**Compare** - Serialises two `officialClaimScheme.Versions` structs to JSON, deserialises them into generic maps, and uses a generic diff routine to produce a structured report of differences. + +#### 1) Signature (Go) + +```go +func Compare(claim1Versions, claim2Versions *officialClaimScheme.Versions) *DiffReport +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Serialises two `officialClaimScheme.Versions` structs to JSON, deserialises them into generic maps, and uses a generic diff routine to produce a structured report of differences. | +| **Parameters** | `claim1Versions *officialClaimScheme.Versions –` first version set
`claim2Versions *officialClaimScheme.Versions –` second version set | +| **Return value** | `*DiffReport –` a pointer containing the diff result produced by `diff.Compare`. | +| **Key dependencies** | • `encoding/json.Marshal`, `encoding/json.Unmarshal`
• `log.Fatalf` for error handling
• `github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/diff.Compare` | +| **Side effects** | No state mutation; only logs fatal errors and returns a diff report. | +| **How it fits the package** | Acts as the public entry point for comparing claim version data within the `versions` comparison module, delegating actual field‑level logic to the generic diff package. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive *Versions objects"] --> B["Marshal claim1 to JSON"] + B --> C["Marshal claim2 to JSON"] + C --> D["Unmarshal JSON of claim1 into interface{} v1"] + D --> E["Unmarshal JSON of claim2 into interface{} v2"] + E --> F["Call diff.Compare(VERSIONS, v1, v2, nil)"] + F --> G["Return &DiffReport{Diffs: result}"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_Compare --> func_Marshal + func_Compare --> func_Fatalf + func_Compare --> func_Unmarshal + func_Compare --> func_differ +``` + +*(Note: `func_differ` refers to the generic `diff.Compare` function.)* + +#### 5) Functions calling `Compare` (Mermaid) + +```mermaid +graph TD + func_claimCompareFilesfunc --> func_Compare +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Compare +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/versions" + officialClaimScheme "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheme" +) + +func main() { + v1 := &officialClaimScheme.Versions{ /* populate fields */ } + v2 := &officialClaimScheme.Versions{ /* populate fields differently */ } + + diffReport := versions.Compare(v1, v2) + fmt.Printf("%+v\n", diffReport) +} +``` + +--- + +### DiffReport.String + +**String** - Provides a human‑readable description of a version comparison result. If no differences are stored, it returns the empty diff representation. + +Converts a `DiffReport` into its string representation, delegating to the embedded `diff.Diffs`. + +```go +func (d *DiffReport) String() string +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides a human‑readable description of a version comparison result. If no differences are stored, it returns the empty diff representation. | +| **Parameters** | `d *DiffReport` – receiver holding the comparison data. | +| **Return value** | `string` – formatted text produced by `diff.Diffs.String()`. | +| **Key dependencies** | - Calls `(*diff.Diffs).String()` to format differences.
- Uses the nil‑check on `d.Diffs`. | +| **Side effects** | None. The function is pure; it reads state but does not modify it or perform I/O. | +| **How it fits the package** | In the *versions* comparison subsystem, this method enables users and other components to obtain a concise textual summary of the diff report. | + +#### Internal workflow + +```mermaid +flowchart TD + subgraph Receiver["d"] + Diffs --> StringNode["String()"] + end + Diffs -- "== nil" --> NilCheck["NilCheck"] --> EmptyDiffString["(\&diff.Diffs{}).String()"] + Diffs -- "!= nil" --> NonNilCheck["NonNilCheck"] --> DiffsString["String of Diffs"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_DiffReport_String --> func_Diff_Diffs_String +``` + +#### Functions calling `DiffReport.String` + +```mermaid +graph TD + func_DiffReport_String_in_versions_pkg --> func_DiffReport_String +``` + +#### Usage example (Go) + +```go +// Minimal example invoking DiffReport.String +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/compare/versions" +) + +func main() { + var report versions.DiffReport + // Assume report is populated elsewhere + fmt.Println(report.String()) +} +``` + +--- diff --git a/docs/cmd/certsuite/claim/show/csv/csv.md b/docs/cmd/certsuite/claim/show/csv/csv.md new file mode 100644 index 000000000..7b746ad99 --- /dev/null +++ b/docs/cmd/certsuite/claim/show/csv/csv.md @@ -0,0 +1,441 @@ +# Package csv + +**Path**: `cmd/certsuite/claim/show/csv` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) +- [Local Functions](#local-functions) + - [buildCSV](#buildcsv) + - [buildCatalogByID](#buildcatalogbyid) + - [dumpCsv](#dumpcsv) + - [loadCNFTypeMap](#loadcnftypemap) + +## Overview + +Provides a Cobra command that reads a certsuite claim JSON file, validates it, maps CNF names to types using a supplied mapping file, converts the claim schema into CSV rows (including remediation and test case details), and writes the result to stdout. + +### Key Features + +- Registers required flags for claim path, CNF name, CNF type map, and an optional header flag; builds a global catalog of test cases for lookup. +- Reads and validates the claim JSON against its version; loads a CNF type mapping from a JSON file; constructs CSV rows with remediation info and mandatory/optional status. +- Writes the generated CSV to standard output using encoding/csv writer, handling errors and flushing the buffer before exiting. + +### Design Notes + +- All flags are marked required except the optional header flag, ensuring callers provide necessary context; misuse results in immediate log fatal. +- The command depends on a global catalog of test cases; if a test case ID is missing, its description defaults to empty strings—this edge case can produce incomplete rows but does not crash. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand() *cobra.Command](#newcommand) | Builds and configures a `*cobra.Command` that dumps claim data to CSV. It registers required flags for claim file path, CNF name, CNF type mapping file, and an optional header flag. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func buildCSV(claimScheme *claim.Schema, cnfType string, catalogMap map[string]claimschema.TestCaseDescription) (resultsCSVRecords [][]string)](#buildcsv) | Transforms a parsed claim schema into CSV rows, adding remediation, CNF type, and mandatory/optional flags. | +| [func buildCatalogByID() (catalogMap map[string]claimschema.TestCaseDescription)](#buildcatalogbyid) | Constructs a mapping from each test case identifier to its corresponding `TestCaseDescription` by iterating over the global catalog. | +| [func dumpCsv(_ *cobra.Command, _ []string) error](#dumpcsv) | Reads a claim JSON file, validates its version, maps CNF names to types, builds a CSV representation of test results, and writes it to standard output. | +| [func loadCNFTypeMap(path string) (CNFTypeMap map[string]string, err error)](#loadcnftypemap) | Reads a JSON file located at `path` and unmarshals its contents into a `map[string]string` that associates CNF names with their types. | + +## Exported Functions + +### NewCommand + +**NewCommand** - Builds and configures a `*cobra.Command` that dumps claim data to CSV. It registers required flags for claim file path, CNF name, CNF type mapping file, and an optional header flag. + +#### 1. Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### 2. Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds and configures a `*cobra.Command` that dumps claim data to CSV. It registers required flags for claim file path, CNF name, CNF type mapping file, and an optional header flag. | +| **Parameters** | None | +| **Return value** | A fully‑configured `*cobra.Command` instance ready for use in the CLI hierarchy. | +| **Key dependencies** | - `CSVDumpCommand.Flags().StringVarP`
- `CSVDumpCommand.MarkFlagRequired`
- `log.Fatalf` (for error handling)
- `CSVDumpCommand.Flags().BoolVarP` | +| **Side effects** | Registers command flags and marks them as required; logs fatal errors if flag configuration fails. No state mutation beyond the returned command. | +| **How it fits the package** | Serves as the entry point for the CSV sub‑command under `cmd/certsuite/claim/show`. It is invoked by the parent `show` command to expose CSV functionality. | + +#### 3. Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Configure claim-file flag"] + B --> C["Mark claim-file required"] + C --> D{"Error?"} + D -- Yes --> E["log.Fatalf & return nil"] + D -- No --> F["Configure cnf-name flag"] + F --> G["Mark cnf-name required"] + G --> H{"Error?"} + H -- Yes --> I["log.Fatalf & return nil"] + H -- No --> J["Configure cnf-type flag"] + J --> K["Mark cnf-type required"] + K --> L{"Error?"} + L -- Yes --> M["log.Fatalf & return nil"] + L -- No --> N["Configure add-header flag"] + N --> O["Return CSVDumpCommand"] +``` + +#### 4. Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_StringVarP + func_NewCommand --> func_Flags + func_NewCommand --> func_MarkFlagRequired + func_NewCommand --> pkg_log_Fatalf + func_NewCommand --> func_BoolVarP +``` + +#### 5. Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + func_Show_NewCommand --> func_NewCommand +``` + +#### 6. Usage example (Go) + +```go +// Minimal example invoking NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/csv" + "github.com/spf13/cobra" +) + +func main() { + // Retrieve the CSV command + csvCmd := csv.NewCommand() + + // Add it to a root or parent command as needed + var rootCmd = &cobra.Command{Use: "certsuite"} + rootCmd.AddCommand(csvCmd) + + // Execute the CLI + if err := rootCmd.Execute(); err != nil { + panic(err) + } +} +``` + +--- + +## Local Functions + +### buildCSV + +**buildCSV** - Transforms a parsed claim schema into CSV rows, adding remediation, CNF type, and mandatory/optional flags. + +#### Signature (Go) + +```go +func buildCSV(claimScheme *claim.Schema, cnfType string, catalogMap map[string]claimschema.TestCaseDescription) (resultsCSVRecords [][]string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Transforms a parsed claim schema into CSV rows, adding remediation, CNF type, and mandatory/optional flags. | +| **Parameters** | `claimScheme *claim.Schema` – the parsed claim data.
`cnfType string` – optional CNF type; defaults to `"NonTelco"` if empty.
`catalogMap map[string]claimschema.TestCaseDescription` – mapping of test IDs to catalog metadata. | +| **Return value** | `[][]string` – slice of CSV records ready for writing. | +| **Key dependencies** | Calls to the built‑in `append` function (three times). | +| **Side effects** | None; purely functional. Generates data but does not modify globals or perform I/O. | +| **How it fits the package** | Used by `dumpCsv` to produce the CSV output shown to users; central to the CSV generation flow. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"cnfType empty?"} + B -- Yes --> C["Set cnfType = identifiers.NonTelco"] + B -- No --> D["Use provided cnfType"] + C --> E + D --> E + E --> F{"addHeaderFlag set?"} + F -- Yes --> G["Append header row to resultsCSVRecords"] + F -- No --> H + G --> I + H --> I + I --> J["Collect operator versions"] + J --> K["Iterate over claim.Results"] + K --> L["Create record slice"] + L --> M["Append CNFName, opVers, testID, suite"] + M --> N["Append description, state, times, skip reason"] + N --> O["Append check details, output"] + O --> P["Append catalog remediation, cnfType"] + P --> Q["Append mandatory/optional flag"] + Q --> R["Append record to resultsCSVRecords"] + R --> K + K --> S["Return resultsCSVRecords"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_buildCSV --> append +``` + +#### Functions calling `buildCSV` (Mermaid) + +```mermaid +graph TD + func_dumpCsv --> func_buildCSV +``` + +#### Usage example (Go) + +```go +// Minimal example invoking buildCSV +claimScheme, _ := claim.Parse("path/to/claim.yaml") +cnfType := "MyCNF" +catalogMap := map[string]claimschema.TestCaseDescription{ + "test1": {Remediation: "restart pod"}, +} +csvRows := buildCSV(claimScheme, cnfType, catalogMap) +// csvRows can now be written with a csv.Writer +``` + +--- + +### buildCatalogByID + +**buildCatalogByID** - Constructs a mapping from each test case identifier to its corresponding `TestCaseDescription` by iterating over the global catalog. + +#### Signature (Go) + +```go +func buildCatalogByID() (catalogMap map[string]claimschema.TestCaseDescription) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Constructs a mapping from each test case identifier to its corresponding `TestCaseDescription` by iterating over the global catalog. | +| **Parameters** | None | +| **Return value** | A map whose keys are test case IDs (`string`) and values are `claimschema.TestCaseDescription`. | +| **Key dependencies** | - `make(map[string]claimschema.TestCaseDescription)`
- Iteration over `identifiers.Catalog` (global slice) | +| **Side effects** | No external state changes; purely functional. | +| **How it fits the package** | Supplies the catalog lookup table used by CSV generation logic (`dumpCsv`). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B["Initialize empty map"] + B --> C{"Iterate over identifiers.Catalog"} + C --> D["Add entry: key = index.Id, value = index"] + D --> E["Return catalogMap"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_buildCatalogByID --> func_make +``` + +#### Functions calling `buildCatalogByID` + +```mermaid +graph TD + func_dumpCsv --> func_buildCatalogByID +``` + +#### Usage example (Go) + +```go +// Minimal example invoking buildCatalogByID +func main() { + catalog := buildCatalogByID() + // Example: look up a test case by ID + if tc, ok := catalog["TC-001"]; ok { + fmt.Printf("Test case %s: %+v\n", "TC-001", tc) + } +} +``` + +--- + +### dumpCsv + +**dumpCsv** - Reads a claim JSON file, validates its version, maps CNF names to types, builds a CSV representation of test results, and writes it to standard output. + +#### Signature (Go) + +```go +func dumpCsv(_ *cobra.Command, _ []string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads a claim JSON file, validates its version, maps CNF names to types, builds a CSV representation of test results, and writes it to standard output. | +| **Parameters** | `_ *cobra.Command` – command context (unused)
`_ []string` – command arguments (unused) | +| **Return value** | `error` – non‑nil if any step fails (parsing, version check, file loading, CSV writing). | +| **Key dependencies** |
  • `log.SetOutput`, `log.Fatalf` – for logging to stderr
  • `claim.Parse` – loads the claim schema from JSON
  • `claim.CheckVersion` – validates claim format version
  • `loadCNFTypeMap` – reads CNF name‑to‑type mapping from a CSV file
  • `buildCatalogByID` – constructs a map of test case descriptions
  • `buildCSV` – builds the CSV records slice
  • `csv.NewWriter`, `writer.WriteAll`, `writer.Flush`, `writer.Error` – CSV I/O to stdout
| +| **Side effects** | Writes log messages to stderr; outputs CSV data to stdout; may terminate program via `log.Fatalf`. | +| **How it fits the package** | Serves as the action handler for the “show csv” sub‑command of the certsuite CLI, converting claim results into a consumable tabular format. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Set log output to stderr"] --> B["Parse claim file"] + B --> C{"Parse success?"} + C -- No --> D["Return parse error"] + C -- Yes --> E["Check claim version"] + E --> F{"Version ok?"} + F -- No --> G["Return version error"] + F -- Yes --> H["Load CNF type map"] + H --> I{"Load ok?"} + I -- No --> J["Fatal log & exit"] + I -- Yes --> K["Build catalog map"] + K --> L["Determine CNF type"] + L --> M["Build CSV records"] + M --> N["Create CSV writer to stdout"] + N --> O["Write all records"] + O --> P{"Write success?"} + P -- No --> Q["Fatal log & exit"] + P -- Yes --> R["Flush writer"] + R --> S["Check write error"] + S -- Error --> T["Panic with error"] + S -- OK --> U["Return nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_dumpCsv --> log.SetOutput + func_dumpCsv --> claim.Parse + func_dumpCsv --> claim.CheckVersion + func_dumpCsv --> loadCNFTypeMap + func_dumpCsv --> buildCatalogByID + func_dumpCsv --> buildCSV + func_dumpCsv --> csv.NewWriter + func_dumpCsv --> writer.WriteAll + func_dumpCsv --> writer.Flush + func_dumpCsv --> writer.Error +``` + +#### Functions calling `dumpCsv` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking dumpCsv +import ( + "github.com/spf13/cobra" +) + +func main() { + cmd := &cobra.Command{ + RunE: dumpCsv, + } + if err := cmd.Execute(); err != nil { + fmt.Println("Error:", err) + } +} +``` + +--- + +### loadCNFTypeMap + +**loadCNFTypeMap** - Reads a JSON file located at `path` and unmarshals its contents into a `map[string]string` that associates CNF names with their types. + +#### Signature (Go) + +```go +func loadCNFTypeMap(path string) (CNFTypeMap map[string]string, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads a JSON file located at `path` and unmarshals its contents into a `map[string]string` that associates CNF names with their types. | +| **Parameters** | `path string – filesystem path to the JSON mapping file` | +| **Return value** | `CNFTypeMap map[string]string, err error – the populated map or an error if any step fails` | +| **Key dependencies** | • `os.Open`, `file.Close`
• `io.ReadAll`
• `encoding/json.Unmarshal`
• `fmt.Errorf`, `fmt.Println` | +| **Side effects** | Opens and reads a file; no global state is modified. | +| **How it fits the package** | Supplies the CNF name‑to‑type mapping used by higher‑level CSV generation logic (`dumpCsv`). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Open file at `path`"] --> B{"Error?"} + B -- Yes --> C["Return error via fmt.Errorf"] + B -- No --> D["Read all bytes with io.ReadAll"] + D --> E{"Read error?"} + E -- Yes --> F["Return error via fmt.Errorf"] + E -- No --> G["Unmarshal JSON into CNFTypeMap"] + G --> H{"Unmarshal error?"} + H -- Yes --> I["Print error, return zero values"] + H -- No --> J["Return CNFTypeMap, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_loadCNFTypeMap --> os.Open + func_loadCNFTypeMap --> fmt.Errorf + func_loadCNFTypeMap --> file.Close + func_loadCNFTypeMap --> make + func_loadCNFTypeMap --> io.ReadAll + func_loadCNFTypeMap --> encoding/json.Unmarshal + func_loadCNFTypeMap --> fmt.Println +``` + +#### Functions calling `loadCNFTypeMap` (Mermaid) + +```mermaid +graph TD + func_dumpCsv --> func_loadCNFTypeMap +``` + +#### Usage example (Go) + +```go +// Minimal example invoking loadCNFTypeMap +package main + +import ( + "fmt" + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/csv" +) + +func main() { + cnfMap, err := csv.loadCNFTypeMap("cnf_types.json") + if err != nil { + log.Fatalf("Failed to load CNF type map: %v", err) + } + fmt.Println("Loaded mapping:", cnfMap) +} +``` + +--- + +--- diff --git a/docs/cmd/certsuite/claim/show/failures/failures.md b/docs/cmd/certsuite/claim/show/failures/failures.md new file mode 100644 index 000000000..c552d7ea8 --- /dev/null +++ b/docs/cmd/certsuite/claim/show/failures/failures.md @@ -0,0 +1,812 @@ +# Package failures + +**Path**: `cmd/certsuite/claim/show/failures` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [FailedTestCase](#failedtestcase) + - [FailedTestSuite](#failedtestsuite) + - [NonCompliantObject](#noncompliantobject) + - [ObjectSpec](#objectspec) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) + - [ObjectSpec.AddField](#objectspec.addfield) + - [ObjectSpec.MarshalJSON](#objectspec.marshaljson) +- [Local Functions](#local-functions) + - [getFailedTestCasesByTestSuite](#getfailedtestcasesbytestsuite) + - [getNonCompliantObjectsFromFailureReason](#getnoncompliantobjectsfromfailurereason) + - [parseOutputFormatFlag](#parseoutputformatflag) + - [parseTargetTestSuitesFlag](#parsetargettestsuitesflag) + - [printFailuresJSON](#printfailuresjson) + - [printFailuresText](#printfailurestext) + - [showFailures](#showfailures) + +## Overview + +Handles displaying failed test cases from a claim file in either JSON or plain text, allowing filtering by specific test suites. + +### Key Features + +- Parses and validates claim files against expected format version +- Filters failures per user‑supplied test suite list +- Outputs results in pretty‑printed JSON or human‑readable text + +### Design Notes + +- Uses cobra commands for CLI integration; flags are parsed globally before execution +- Error handling is strict – fatal logs abort on invalid input +- JSON output requires all fields present, otherwise empty objects are returned + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**FailedTestCase**](#failedtestcase) | One-line purpose | +| [**FailedTestSuite**](#failedtestsuite) | Represents a test suite containing one or more failed test cases | +| [**NonCompliantObject**](#noncompliantobject) | Struct definition | +| [**ObjectSpec**](#objectspec) | One‑line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand()(*cobra.Command)](#newcommand) | Function implementation | +| [func (spec *ObjectSpec) AddField(key, value string)](#objectspec.addfield) | Adds a field to the `Fields` slice of an `ObjectSpec`, storing a key/value pair. | +| [func (spec *ObjectSpec) MarshalJSON() ([]byte, error)](#objectspec.marshaljson) | Converts an `ObjectSpec` into its JSON representation. If the spec has no fields it returns an empty JSON object (`{}`). | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func getFailedTestCasesByTestSuite( claimResultsByTestSuite map[string][]*claim.TestCaseResult, targetTestSuites map[string]bool, ) []FailedTestSuite](#getfailedtestcasesbytestsuite) | Transforms raw test‑case results into a slice of `FailedTestSuite`, each containing only the failed cases that belong to suites requested by the caller. | +| [func getNonCompliantObjectsFromFailureReason(string) ([]NonCompliantObject, error)](#getnoncompliantobjectsfromfailurereason) | Converts the `checkDetails` string of a failed test case into a slice of `NonCompliantObject`s by decoding JSON and mapping report fields. | +| [func parseOutputFormatFlag() (string, error)](#parseoutputformatflag) | Validates that the global flag `outputFormatFlag` matches one of the formats listed in `availableOutputFormats`. Returns the format string if valid. | +| [func parseTargetTestSuitesFlag() map[string]bool](#parsetargettestsuitesflag) | Parses the global flag that lists test suite names, creating a lookup map where each key is a suite name and the value indicates inclusion. | +| [func printFailuresJSON(testSuites []FailedTestSuite)](#printfailuresjson) | Serialises a slice of `FailedTestSuite` into pretty‑printed JSON and writes it to standard output. | +| [func(testSuites []FailedTestSuite)()](#printfailurestext) | Iterates over failed test suites and prints each suite, its failing test cases, and detailed failure reasons to standard output. | +| [func showFailures(_ *cobra.Command, _ []string) error](#showfailures) | Parses a claim file, validates its format version, extracts failed test cases per suite, and prints them in the requested output format (JSON or plain text). | + +## Structs + +### FailedTestCase + +A representation of a test case that has failed during a certification run. + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `TestCaseName` | `string` | The unique identifier for the test case. | +| `TestCaseDescription` | `string` | A human‑readable explanation of what the test verifies. | +| `CheckDetails` | `string` | Optional details about why the test failed, typically including diagnostic messages or error summaries. | +| `NonCompliantObjects` | `[]NonCompliantObject` | Optional list of objects that did not satisfy the test’s compliance criteria; each entry provides context for the failure. | + +#### Purpose + +The `FailedTestCase` struct encapsulates all information related to a single failed certification test. It is used when generating reports or displaying failures to users, allowing consumers to understand which test failed, why it failed, and which Kubernetes objects contributed to the non‑compliance. + +#### Related functions (if any) + +| Function | Purpose | +|----------|---------| +| *none* | | + +--- + +--- + +### FailedTestSuite + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `TestSuiteName` | `string` | The name of the test suite that produced failures. Serialized as JSON key `name`. | +| `FailingTestCases` | `[]FailedTestCase` | Slice holding each failed test case within this suite. Serialized as JSON key `failures`. | + +#### Purpose + +The `FailedTestSuite` struct aggregates all failing test cases for a particular test suite. It is used when summarizing claim results, allowing callers to easily report or serialize failures per suite. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `getFailedTestCasesByTestSuite` | Builds a slice of `FailedTestSuite` objects by iterating over claim results and collecting only failed test cases. | +| `printFailuresJSON` | Marshals a slice of `FailedTestSuite` into JSON for output. | +| `printFailuresText` | Formats and prints each `FailedTestSuite` and its contained failures to standard output in human‑readable form. | + +--- + +--- + +### NonCompliantObject + + +**Purpose**: Custom object type needed to provide a different JSON serialization than +the one in claim's test cases' skipReason field. + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Type` | `string` | The Kubernetes resource kind (e.g., `"Pod"`, `"Deployment"`). | +| `Reason` | `string` | Human‑readable explanation of why the object is non‑compliant. | +| `Spec` | `ObjectSpec` | A map of field names to values that describe the object's specification at the time of failure. | + +--- + +### ObjectSpec + +Represents a generic specification consisting of an arbitrary list of key/value pairs. + +--- + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `Fields` | `[]struct{ Key, Value string }` | A slice holding zero or more name–value entries that describe the object. Each entry has a `Key` and a corresponding `Value`, both strings. + +--- + +#### Purpose + +`ObjectSpec` serves as a lightweight container for metadata about an object. By storing arbitrary key/value pairs it can be used to attach additional information without defining a rigid schema. The struct is primarily manipulated through its helper methods, which allow adding fields and serializing the collection into JSON. + +--- + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `AddField` | Appends a new key/value pair to the `Fields` slice. | +| `MarshalJSON` | Serializes the `ObjectSpec` into a compact JSON object; if no fields are present it returns an empty object (`{}`). | + +--- + +## Exported Functions + +### NewCommand + + +**Signature**: `func()(*cobra.Command)` + +**Purpose**: + +--- + +### ObjectSpec.AddField + +**AddField** - Adds a field to the `Fields` slice of an `ObjectSpec`, storing a key/value pair. + +#### Signature (Go) + +```go +func (spec *ObjectSpec) AddField(key, value string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Adds a field to the `Fields` slice of an `ObjectSpec`, storing a key/value pair. | +| **Parameters** | `key string` – the field name; `value string` – the corresponding value. | +| **Return value** | None (the method mutates the receiver). | +| **Key dependencies** | Calls the built‑in `append` function to extend the slice. | +| **Side effects** | Mutates the `spec.Fields` slice in place, appending a new struct containing the key and value. No external I/O or concurrency concerns. | +| **How it fits the package** | Used when building `NonCompliantObject` instances from parsed JSON; each object’s spec is populated with relevant fields that caused non‑compliance. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive ObjectSpec pointer"] --> B{"Append new field"} + B --> C["Create struct {Key, Value}"] + C --> D["Call append on spec.Fields"] + D --> E["Return to caller"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ObjectSpec.AddField --> func_append +``` + +#### Functions calling `ObjectSpec.AddField` (Mermaid) + +```mermaid +graph TD + func_getNonCompliantObjectsFromFailureReason --> func_ObjectSpec.AddField +``` + +#### Usage example (Go) + +```go +// Minimal example invoking ObjectSpec.AddField +var spec failures.ObjectSpec +spec.AddField("replicas", "3") +spec.AddField("image", "nginx:latest") + +fmt.Printf("%+v\n", spec) +// Output: {Fields:[{Key:replicas Value:3} {Key:image Value:nginx:latest}]} +``` + +--- + +### ObjectSpec.MarshalJSON + +**MarshalJSON** - Converts an `ObjectSpec` into its JSON representation. If the spec has no fields it returns an empty JSON object (`{}`). + +#### Signature (Go) + +```go +func (spec *ObjectSpec) MarshalJSON() ([]byte, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Converts an `ObjectSpec` into its JSON representation. If the spec has no fields it returns an empty JSON object (`{}`). | +| **Parameters** | *none* – operates on the receiver `spec`. | +| **Return value** | `[]byte` containing the JSON string; `error` is always `nil` in current implementation. | +| **Key dependencies** | • `len` (builtin) to check field count.
• `fmt.Sprintf` from the standard library for formatting key/value pairs. | +| **Side effects** | None – purely functional, no state mutation or I/O. | +| **How it fits the package** | Used by the failures sub‑package to provide a JSON representation of claim failure objects when displaying results. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Fields exist?"} + B -- No --> C["Return {}"] + B -- Yes --> D["Build string"] + D --> E["Loop over Fields"] + E --> F["Append key/value pairs"] + F --> G["Close braces"] + G --> H["Convert to []byte"] + H --> I["Return result"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ObjectSpec.MarshalJSON --> builtin_len + func_ObjectSpec.MarshalJSON --> fmt_Sprintf +``` + +#### Functions calling `ObjectSpec.MarshalJSON` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ObjectSpec.MarshalJSON +spec := &failures.ObjectSpec{ + Fields: []failures.Field{ + {Key: "name", Value: "example"}, + {Key: "status", Value: "failed"}, + }, +} +data, err := spec.MarshalJSON() +if err != nil { + log.Fatalf("marshal error: %v", err) +} +fmt.Println(string(data)) // {"name":"example","status":"failed"} +``` + +--- + +## Local Functions + +### getFailedTestCasesByTestSuite + +**getFailedTestCasesByTestSuite** - Transforms raw test‑case results into a slice of `FailedTestSuite`, each containing only the failed cases that belong to suites requested by the caller. + +#### Signature (Go) + +```go +func getFailedTestCasesByTestSuite( + claimResultsByTestSuite map[string][]*claim.TestCaseResult, + targetTestSuites map[string]bool, +) []FailedTestSuite +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Transforms raw test‑case results into a slice of `FailedTestSuite`, each containing only the failed cases that belong to suites requested by the caller. | +| **Parameters** | `claimResultsByTestSuite` – map from suite name to all test‑case results;
`targetTestSuites` – optional set of suite names to include (nil means “all”). | +| **Return value** | Slice of `FailedTestSuite`, each holding the suite name and its failing test cases. | +| **Key dependencies** | *`getNonCompliantObjectsFromFailureReason` – parses detailed failure reasons.
* Standard library: `fmt.Fprintf`, `os.Stderr`. | +| **Side effects** | Writes error messages to standard error when a failure reason cannot be parsed. No mutation of input maps. | +| **How it fits the package** | Used by `showFailures` to prepare data for JSON or text output, filtering out passing cases and non‑relevant suites. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate suites"} + B -->|"suite in target"| C["Collect failures"] + B -->|"suite not in target"| D["Skip suite"] + C --> E{"For each test case"} + E -->|"state != failed"| F["Skip case"] + E -->|"state == failed"| G["Parse CheckDetails"] + G -->|"parse error"| H["Log to stderr, use raw details"] + G -->|"success"| I["Attach NonCompliantObjects"] + I --> J["Append FailedTestCase"] + J --> K["End loop"] + K --> L{"Any failures?"} + L -->|"yes"| M["Append FailedTestSuite"] + L -->|"no"| N["Skip suite"] + M --> O["Continue suites"] + N --> O + O --> P["Return slice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getFailedTestCasesByTestSuite --> func_getNonCompliantObjectsFromFailureReason + func_getFailedTestCasesByTestSuite --> fmt.Fprintf + func_getFailedTestCasesByTestSuite --> os.Stderr +``` + +#### Functions calling `getFailedTestCasesByTestSuite` (Mermaid) + +```mermaid +graph TD + func_showFailures --> func_getFailedTestCasesByTestSuite +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getFailedTestCasesByTestSuite + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim" +) + +// Assume claimResults and targetSuites are already populated. +failedSuites := getFailedTestCasesByTestSuite(claimResults, targetSuites) +for _, suite := range failedSuites { + fmt.Printf("Suite: %s\n", suite.TestSuiteName) + for _, tc := range suite.FailingTestCases { + fmt.Printf("- %s: %s\n", tc.TestCaseName, tc.CheckDetails) + } +} +``` + +--- + +### getNonCompliantObjectsFromFailureReason + +**getNonCompliantObjectsFromFailureReason** - Converts the `checkDetails` string of a failed test case into a slice of `NonCompliantObject`s by decoding JSON and mapping report fields. + +#### Signature (Go) + +```go +func getNonCompliantObjectsFromFailureReason(string) ([]NonCompliantObject, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Converts the `checkDetails` string of a failed test case into a slice of `NonCompliantObject`s by decoding JSON and mapping report fields. | +| **Parameters** | `checkDetails string – JSON payload containing compliant and non‑compliant objects from a test result.` | +| **Return value** | `([]NonCompliantObject, error) – slice of parsed objects or an error if the payload cannot be decoded.` | +| **Key dependencies** | • `encoding/json.Unmarshal`
• `fmt.Errorf`
• `len`
• `append`
• `ObjectSpec.AddField` (method on `NonCompliantObject.Spec`) | +| **Side effects** | No state mutation; only local variable creation and error handling. | +| **How it fits the package** | Used by the failure‑report generator to extract detailed non‑compliance information from test results, enabling richer output for end users. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Decode checkDetails JSON into objects struct"] + B --> C{"Decoding succeeded?"} + C -- No --> D["Return error via fmt.Errorf"] + C -- Yes --> E["Initialize empty nonCompliantObjects slice"] + E --> F["Iterate over objects.NonCompliant"] + F --> G["Create NonCompliantObject with Type and first Reason field"] + G --> H{"More fields exist?"} + H -- No --> I["Append to slice"] + H -- Yes --> J["Add remaining key/value pairs via Spec.AddField"] + J --> I + I --> K["Next object"] + K --> L["Return slice, nil error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getNonCompliantObjectsFromFailureReason --> func_Json.Unmarshal + func_getNonCompliantObjectsFromFailureReason --> fmt.Errorf + func_getNonCompliantObjectsFromFailureReason --> len + func_getNonCompliantObjectsFromFailureReason --> append + func_getNonCompliantObjectsFromFailureReason --> ObjectSpec.AddField +``` + +#### Functions calling `getNonCompliantObjectsFromFailureReason` (Mermaid) + +```mermaid +graph TD + func_getFailedTestCasesByTestSuite --> func_getNonCompliantObjectsFromFailureReason +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getNonCompliantObjectsFromFailureReason +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures" +) + +func main() { + checkDetails := `{ + "CompliantObjectsOut": [], + "NonCompliantObjectsOut": [ + { + "ObjectType": "Pod", + "ObjectFieldsKeys": ["name", "namespace"], + "ObjectFieldsValues": ["nginx-pod", "default"] + } + ] + }` + + objects, err := failures.getNonCompliantObjectsFromFailureReason(checkDetails) + if err != nil { + fmt.Println("Error:", err) + return + } + + for _, obj := range objects { + fmt.Printf("Type: %s, Reason: %s\n", obj.Type, obj.Reason) + } +} +``` + +--- + +### parseOutputFormatFlag + +**parseOutputFormatFlag** - Validates that the global flag `outputFormatFlag` matches one of the formats listed in `availableOutputFormats`. Returns the format string if valid. + +#### Signature (Go) + +```go +func parseOutputFormatFlag() (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates that the global flag `outputFormatFlag` matches one of the formats listed in `availableOutputFormats`. Returns the format string if valid. | +| **Parameters** | None | +| **Return value** | *string* – the chosen output format; *error* – non‑nil if the flag is not recognised. | +| **Key dependencies** | • `fmt.Errorf` – constructs error messages
• Access to package‑level variables: `outputFormatFlag`, `availableOutputFormats` | +| **Side effects** | None (pure function) | +| **How it fits the package** | Used by command handlers (e.g., `showFailures`) to decide how test results should be rendered. | + +#### Internal workflow + +```mermaid +flowchart TD + subgraph ValidateFormat["Validate input format"] + A1["Iterate over availableOutputFormats"] --> B1{"Match found?"} + B1 -- Yes --> C1["Return matched format, nil"] + B1 -- No --> D1["Return empty string + error"] + end +``` + +#### Function dependencies + +```mermaid +graph TD + func_parseOutputFormatFlag --> fmt.Errorf +``` + +#### Functions calling `parseOutputFormatFlag` + +```mermaid +graph TD + func_showFailures --> func_parseOutputFormatFlag +``` + +#### Usage example (Go) + +```go +// Minimal example invoking parseOutputFormatFlag +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures" +) + +func main() { + // Assume the global flag has been set elsewhere. + format, err := failures.ParseOutputFormatFlag() + if err != nil { + fmt.Println("Error:", err) + return + } + fmt.Println("Chosen output format:", format) +} +``` + +> **Note**: The function is unexported; callers must use a wrapper or be in the same package. + +--- + +### parseTargetTestSuitesFlag + +**parseTargetTestSuitesFlag** - Parses the global flag that lists test suite names, creating a lookup map where each key is a suite name and the value indicates inclusion. + +#### Signature (Go) + +```go +func parseTargetTestSuitesFlag() map[string]bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses the global flag that lists test suite names, creating a lookup map where each key is a suite name and the value indicates inclusion. | +| **Parameters** | None | +| **Return value** | `map[string]bool` – a map of target suite names to `true`; returns `nil` when no suites are specified. | +| **Key dependencies** | • `strings.Split` (splits comma‑separated string)
• `strings.TrimSpace` (removes surrounding whitespace) | +| **Side effects** | None; purely functional, only reads the global `testSuitesFlag`. | +| **How it fits the package** | Used by `showFailures` to determine which test suites should be displayed in failure reports. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Check if flag is empty"] -->|"empty"| B["Return nil"] + A -->|"not empty"| C["Split flag by , into list"] + C --> D{"Iterate over list"} + D --> E["Trim each entry"] + E --> F["Add to map with value true"] + F --> G["Return the constructed map"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_parseTargetTestSuitesFlag --> strings.Split + func_parseTargetTestSuitesFlag --> strings.TrimSpace +``` + +#### Functions calling `parseTargetTestSuitesFlag` + +```mermaid +graph TD + func_showFailures --> func_parseTargetTestSuitesFlag +``` + +#### Usage example (Go) + +```go +// Minimal example invoking parseTargetTestSuitesFlag +suites := parseTargetTestSuitesFlag() +if suites != nil { + fmt.Println("Target test suites:", suites) +} else { + fmt.Println("No target test suites specified") +} +``` + +--- + +### printFailuresJSON + +**printFailuresJSON** - Serialises a slice of `FailedTestSuite` into pretty‑printed JSON and writes it to standard output. + +#### Signature (Go) + +```go +func printFailuresJSON(testSuites []FailedTestSuite) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Serialises a slice of `FailedTestSuite` into pretty‑printed JSON and writes it to standard output. | +| **Parameters** | `testSuites []FailedTestSuite` – collection of failed test suites to serialise. | +| **Return value** | none (void). The function terminates the program on marshal error. | +| **Key dependencies** | *`encoding/json.MarshalIndent` – formats JSON with indentation.
* `log.Fatalf` – logs a fatal error and exits if marshalling fails.
*`fmt.Printf` – outputs the resulting JSON string.
* `string(bytes)` – converts the byte slice to a string. | +| **Side effects** | *Writes to stdout via `fmt.Printf`.
* Calls `log.Fatalf`, which aborts the process on error. | +| **How it fits the package** | Part of the command‑line tool for displaying claim failures; invoked by `showFailures` when JSON output is requested. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Create ClaimFailures struct"] + B --> C{"Marshal to JSON"} + C -- Success --> D["Print JSON"] + C -- Failure --> E["Fatal error via log.Fatalf"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_printFailuresJSON --> json.MarshalIndent + func_printFailuresJSON --> log.Fatalln + func_printFailuresJSON --> fmt.Printf + func_printFailuresJSON --> string +``` + +#### Functions calling `printFailuresJSON` (Mermaid) + +```mermaid +graph TD + func_showFailures --> func_printFailuresJSON +``` + +#### Usage example (Go) + +```go +// Minimal example invoking printFailuresJSON +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show/failures" +) + +func main() { + suites := []failures.FailedTestSuite{ + { /* populate fields */ }, + } + failures.printFailuresJSON(suites) +} +``` + +--- + +### printFailuresText + +**printFailuresText** - Iterates over failed test suites and prints each suite, its failing test cases, and detailed failure reasons to standard output. + +#### Signature (Go) + +```go +func(testSuites []FailedTestSuite)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over failed test suites and prints each suite, its failing test cases, and detailed failure reasons to standard output. | +| **Parameters** | `testSuites []FailedTestSuite` – A slice of custom structs representing test suites that contain failures. | +| **Return value** | None (side‑effect only). | +| **Key dependencies** | • `fmt.Printf` for formatted output.
• Built‑in `len` to determine if a test case has non‑compliant objects. | +| **Side effects** | Writes human‑readable failure information to `stdout`; does not modify input data or other global state. | +| **How it fits the package** | Serves as the text‑output handler for the *failures* subcommand, called by `showFailures` after parsing claim results. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over testSuites"} + B -->|"for each ts"| C["Print Test Suite: "] + C --> D{"Iterate over ts.FailingTestCases"} + D -->|"for each tc"| E["Print Test Case: "] + E --> F["Print Description: "] + F --> G{"len(tc.NonCompliantObjects) == 0"} + G -- Yes --> H["Print Failure reason: "] + G -- No --> I["Print Failure reasons:"] + I --> J{"Iterate over tc.NonCompliantObjects"} + J -->|"for each obj"| K["Print %2d - Type: %s, Reason: %s"] + K --> L["Print fields of obj.Spec.Fields"] + L --> J + H --> B + K --> J +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_printFailuresText --> fmt_Printf + func_printFailuresText --> builtin_len +``` + +#### Functions calling `printFailuresText` (Mermaid) + +```mermaid +graph TD + showFailures --> func_printFailuresText +``` + +#### Usage example (Go) + +```go +// Minimal example invoking printFailuresText +var suites []FailedTestSuite // assume this is populated elsewhere +printFailuresText(suites) +``` + +--- + +### showFailures + +**showFailures** - Parses a claim file, validates its format version, extracts failed test cases per suite, and prints them in the requested output format (JSON or plain text). + +#### Signature (Go) + +```go +func showFailures(_ *cobra.Command, _ []string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses a claim file, validates its format version, extracts failed test cases per suite, and prints them in the requested output format (JSON or plain text). | +| **Parameters** | `_ *cobra.Command` – command context (unused), `_ []string` – arguments (unused) | +| **Return value** | `error` – non‑nil if parsing, validation, or printing fails. | +| **Key dependencies** | • `parseOutputFormatFlag()` – obtains output format flag
• `claim.Parse()` – reads and unmarshals claim file
• `claim.CheckVersion()` – validates claim schema version
• `getFailedTestCasesByTestSuite()` – filters failed test cases
• `printFailuresJSON()` / `printFailuresText()` – renders results | +| **Side effects** | Reads a file from disk, writes to stdout/stderr (formatted failures or error messages). No global state mutation. | +| **How it fits the package** | Entry point for the `show failures` subcommand of the `certsuite claim show` CLI, orchestrating parsing, filtering, and output formatting. | + +#### Internal workflow + +```mermaid +flowchart TD + A["parseOutputFormatFlag"] --> B["claim.Parse"] + B --> C["claim.CheckVersion"] + C --> D["build resultsByTestSuite map"] + D --> E["parseTargetTestSuitesFlag"] + E --> F["getFailedTestCasesByTestSuite"] + F --> G{"outputFormat"} + G -->|"JSON"| H["printFailuresJSON"] + G -->|"Text"| I["printFailuresText"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_showFailures --> func_parseOutputFormatFlag + func_showFailures --> func_Parse + func_showFailures --> func_CheckVersion + func_showFailures --> func_getFailedTestCasesByTestSuite + func_showFailures --> func_printFailuresJSON + func_showFailures --> func_printFailuresText +``` + +#### Functions calling `showFailures` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking showFailures +cmd := &cobra.Command{Use: "failures"} +_ = cmd.Execute() // internally calls showFailures when the subcommand is run +``` + +--- + +--- diff --git a/docs/cmd/certsuite/claim/show/show.md b/docs/cmd/certsuite/claim/show/show.md new file mode 100644 index 000000000..a8924fab7 --- /dev/null +++ b/docs/cmd/certsuite/claim/show/show.md @@ -0,0 +1,95 @@ +# Package show + +**Path**: `cmd/certsuite/claim/show` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) + +## Overview + +Failed to parse JSON response, but content appears to contain package information. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand() *cobra.Command](#newcommand) | Creates a Cobra command that aggregates sub‑commands for displaying claim information. | + +## Exported Functions + +### NewCommand + +**NewCommand** - Creates a Cobra command that aggregates sub‑commands for displaying claim information. + +#### Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a Cobra command that aggregates sub‑commands for displaying claim information. | +| **Parameters** | None | +| **Return value** | A pointer to the root `*cobra.Command` representing the “show” command. | +| **Key dependencies** | • Calls `AddCommand` on the internal `showCommand`.
• Instantiates sub‑commands via `failures.NewCommand()` and `csv.NewCommand()`. | +| **Side effects** | No global state changes; only returns a new command structure. | +| **How it fits the package** | Serves as the entry point for the *show* feature, wiring together failure display and CSV dump capabilities under a single command group. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph ShowRoot["showCommand"] + A["NewCommand"] --> B["failureSubCmd := failures.NewCommand()"] + A --> C["csvSubCmd := csv.NewCommand()"] + B --> D["Add to showCommand"] + C --> E["Add to showCommand"] + end +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_AddCommand + func_NewCommand --> failures.NewCommand + func_NewCommand --> csv.NewCommand +``` + +#### Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + claim.NewCommand --> show.NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/claim/show" + "os" + + "github.com/spf13/cobra" +) + +func main() { + cmd := show.NewCommand() + if err := cmd.Execute(); err != nil { + os.Exit(1) + } +} +``` + +--- + +--- diff --git a/docs/cmd/certsuite/generate/catalog/catalog.md b/docs/cmd/certsuite/generate/catalog/catalog.md new file mode 100644 index 000000000..93fb85edc --- /dev/null +++ b/docs/cmd/certsuite/generate/catalog/catalog.md @@ -0,0 +1,1004 @@ +# Package catalog + +**Path**: `cmd/certsuite/generate/catalog` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [Entry](#entry) +- [Exported Functions](#exported-functions) + - [CreatePrintableCatalogFromIdentifiers](#createprintablecatalogfromidentifiers) + - [GetSuitesFromIdentifiers](#getsuitesfromidentifiers) + - [NewCommand](#newcommand) +- [Local Functions](#local-functions) + - [addPreflightTestsToCatalog](#addpreflightteststocatalog) + - [emitTextFromFile](#emittextfromfile) + - [generateJS](#generatejs) + - [outputIntro](#outputintro) + - [outputJS](#outputjs) + - [outputSccCategories](#outputscccategories) + - [outputTestCases](#outputtestcases) + - [runGenerateMarkdownCmd](#rungeneratemarkdowncmd) + - [scenarioIDToText](#scenarioidtotext) + - [summaryToMD](#summarytomd) + +## Overview + +The catalog package generates documentation for the Red Hat Best Practices Test Suite, producing Markdown and JSON representations that list all test cases, their metadata, and statistics. + +### Key Features + +- Creates a printable catalogue structure by grouping identifiers per suite +- Builds comprehensive Markdown output including intro, test details, SCC categories, and summary statistics +- Exports commands to generate catalog documentation via Cobra CLI + +### Design Notes + +- Assumes identifiers contain Suite field for grouping; unknown suites result in empty groups +- Handles missing scenario IDs gracefully by mapping to "Unknown Scenario" +- Best practice: invoke NewCommand() to register subcommands before execution + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**Entry**](#entry) | Represents a single catalog item | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func CreatePrintableCatalogFromIdentifiers(keys []claim.Identifier) map[string][]Entry](#createprintablecatalogfromidentifiers) | Groups identifiers by their `Suite` field to produce a printable catalogue structure. | +| [func GetSuitesFromIdentifiers(keys []claim.Identifier) []string](#getsuitesfromidentifiers) | Returns a slice containing the distinct `Suite` values found in the supplied `claim.Identifier` list. | +| [func NewCommand() *cobra.Command](#newcommand) | Constructs and returns a `*cobra.Command` that represents the top‑level CLI entry point for generating catalog documentation. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func addPreflightTestsToCatalog()](#addpreflightteststocatalog) | Gathers all preflight operator and container checks, extracts their metadata, and registers each as a catalog entry for the certsuite test suite. | +| [func emitTextFromFile(filename string) error](#emittextfromfile) | Reads the entire content of a given file and writes it directly to standard output, enabling inclusion of static text such as catalog documentation. | +| [func generateJS(_ *cobra.Command, _ []string) error](#generatejs) | Produces a JSON representation of the package’s classification identifiers and prints it to standard output. | +| [func outputIntro() (out string)](#outputintro) | Builds a static introduction for the Red Hat Best Practices Test Suite catalog, returning it as a single Markdown-formatted string. | +| [func()](#outputjs) | Serialises the global `identifiers.Classification` map to indented JSON, logs an error if marshalling fails, and writes the result to standard output. | +| [func outputSccCategories() (sccCategories string)](#outputscccategories) | Builds a Markdown block that explains the four security context categories used in the generated catalog, including introductory text and detailed descriptions for each category. | +| [func outputTestCases() (outString string, summary catalogSummary)](#outputtestcases) | Builds a Markdown string that documents all test cases in the catalog, including metadata such as ID, description, remediation, best‑practice references, impact statements, tags and scenario classifications. It also compiles statistics on tests per suite and per scenario into a `catalogSummary`. | +| [func runGenerateMarkdownCmd(_ *cobra.Command, _ []string) error](#rungeneratemarkdowncmd) | Creates a complete Markdown document describing the Red Hat Best Practices Test Suite for Kubernetes and writes it to standard output. | +| [func scenarioIDToText(id string) (text string)](#scenarioidtotext) | Maps known scenario IDs to friendly names; returns `"Unknown Scenario"` for unknown values. | +| [func(catalogSummary)(string)](#summarytomd) | Builds a Markdown string summarizing total test cases, suites, and per‑suite/per‑scenario counts for the certsuite catalog. | + +## Structs + +### Entry + +| Field | Type | Description | +|------------|----------------|-------------| +| `testName` | `string` | Human‑readable name of the test, derived from an identifier’s ID. | +| `identifier` | `claim.Identifier` | The underlying identifier containing metadata such as URL and version; used to locate the test definition. | + +#### Purpose + +The `Entry` struct aggregates the information required for generating a printable catalog of tests. Each instance maps a suite name (extracted from an `Identifier`) to its corresponding test details, enabling downstream formatting and output. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `CreatePrintableCatalogFromIdentifiers` | Builds a map from suite names to slices of `Entry`, populating each entry with the test’s ID and identifier. | + +--- + +## Exported Functions + +### CreatePrintableCatalogFromIdentifiers + +**CreatePrintableCatalogFromIdentifiers** - Groups identifiers by their `Suite` field to produce a printable catalogue structure. + +#### 1) Signature (Go) + +```go +func CreatePrintableCatalogFromIdentifiers(keys []claim.Identifier) map[string][]Entry +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Groups identifiers by their `Suite` field to produce a printable catalogue structure. | +| **Parameters** | `keys []claim.Identifier – list of test identifiers (URL, ID, suite name, etc.)` | +| **Return value** | `map[string][]Entry` – mapping from suite names to slices of `Entry`, each containing the test name and its identifier. | +| **Key dependencies** | • Calls built‑in `make` to create the map
• Uses slice `append` to populate entries | +| **Side effects** | None (pure function; no I/O, global state mutation, or concurrency) | +| **How it fits the package** | Provides a lightweight data structure used by higher‑level functions (e.g., `outputTestCases`) to format and output test case documentation. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over keys"} + B --> C["Create Entry from key"] + C --> D["Append entry to catalog"] + D --> E{"Next key?"} + E -- Yes --> B + E -- No --> F["Return catalog"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_CreatePrintableCatalogFromIdentifiers --> func_make + func_CreatePrintableCatalogFromIdentifiers --> func_append +``` + +#### 5) Functions calling `CreatePrintableCatalogFromIdentifiers` (Mermaid) + +```mermaid +graph TD + func_outputTestCases --> func_CreatePrintableCatalogFromIdentifiers +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking CreatePrintableCatalogFromIdentifiers +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog" + "github.com/redhat-best-practices-for-k8s/certsuite/claim" +) + +func main() { + // Example identifiers + ids := []claim.Identifier{ + {Id: "TestA", Suite: "Suite1"}, + {Id: "TestB", Suite: "Suite2"}, + {Id: "TestC", Suite: "Suite1"}, + } + + catalogMap := catalog.CreatePrintableCatalogFromIdentifiers(ids) + + for suite, entries := range catalogMap { + fmt.Printf("Suite: %s\n", suite) + for _, e := range entries { + fmt.Printf("- %s (ID: %s)\n", e.testName, e.identifier.Id) + } + } +} +``` + +--- + +--- + +### GetSuitesFromIdentifiers + +**GetSuitesFromIdentifiers** - Returns a slice containing the distinct `Suite` values found in the supplied `claim.Identifier` list. + +#### Signature (Go) + +```go +func GetSuitesFromIdentifiers(keys []claim.Identifier) []string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a slice containing the distinct `Suite` values found in the supplied `claim.Identifier` list. | +| **Parameters** | `keys []claim.Identifier` – identifiers to inspect | +| **Return value** | `[]string` – unique suite names | +| **Key dependencies** | • `append` (built‑in)
• `arrayhelper.Unique` from `github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper` | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by catalog generation routines to build a list of test‑suite names for display and summary statistics. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetSuitesFromIdentifiers --> iterateKeys + iterateKeys --> appendSuite + appendSuite --> collectAll + collectAll --> Unique + Unique --> returnResult +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetSuitesFromIdentifiers --> arrayhelper_Unique +``` + +#### Functions calling `GetSuitesFromIdentifiers` (Mermaid) + +```mermaid +graph TD + outputTestCases --> GetSuitesFromIdentifiers +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetSuitesFromIdentifiers +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claim" +) + +func main() { + ids := []claim.Identifier{ + {Suite: "Network"}, + {Suite: "Security"}, + {Suite: "Network"}, + } + suites := GetSuitesFromIdentifiers(ids) + fmt.Println(suites) // Output: [Network Security] +} +``` + +--- + +### NewCommand + +**NewCommand** - Constructs and returns a `*cobra.Command` that represents the top‑level CLI entry point for generating catalog documentation. + +#### Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Constructs and returns a `*cobra.Command` that represents the top‑level CLI entry point for generating catalog documentation. | +| **Parameters** | None | +| **Return value** | A pointer to a `cobra.Command` configured with subcommands for markdown generation. | +| **Key dependencies** | • Calls `generateCmd.AddCommand(markdownGenerateCmd)`
• Calls `generateCmd.AddCommand(markdownGenerateClassification)` | +| **Side effects** | Mutates the global `generateCmd` by registering two sub‑commands; no external I/O or concurrency is performed. | +| **How it fits the package** | Provides the command that the higher‑level generate CLI aggregates, enabling catalog‑specific generation features. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["NewCommand"] --> B["Add markdownGenerateCmd to generateCmd"] + A --> C["Add markdownGenerateClassification to generateCmd"] + A --> D["Return generateCmd"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_AddCommand +``` + +#### Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + func_generate.NewCommand --> func_NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog" +) + +func main() { + cmd := catalog.NewCommand() + // In a real application you would execute the command: + // if err := cmd.Execute(); err != nil { panic(err) } +} +``` + +--- + +## Local Functions + +### addPreflightTestsToCatalog + +**addPreflightTestsToCatalog** - Gathers all preflight operator and container checks, extracts their metadata, and registers each as a catalog entry for the certsuite test suite. + +#### Signature (Go) + +```go +func addPreflightTestsToCatalog() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Gathers all preflight operator and container checks, extracts their metadata, and registers each as a catalog entry for the certsuite test suite. | +| **Parameters** | None | +| **Return value** | None (side‑effect only) | +| **Key dependencies** | • `artifacts.NewMapWriter` – creates an in‑memory writer for preflight artifacts.
• `artifacts.ContextWithWriter` – injects the writer into a context.
• `plibOperator.NewCheck`, `plibContainer.NewCheck` – instantiate generic check objects.
• `check.List` – retrieves operator/container checks.
• `identifiers.AddCatalogEntry` – registers each test in the global catalog. | +| **Side effects** | • Populates the global `identifiers.Catalog` map with entries for every discovered preflight test.
• Emits error logs via `log.Error` when artifact creation or check listing fails. | +| **How it fits the package** | Called by `outputTestCases` to enrich the catalog before generating the final documentation and summary. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Create artifacts writer"] --> B{"Check error"} + B -- OK --> C["Build context with writer"] + C --> D["Instantiate dummy operator check"] + C --> E["Instantiate dummy container check"] + D --> F["List operator checks"] + E --> G["List container checks"] + F --> H{"Error?"} + G --> I{"Error?"} + H -- Error --> J["Log error"] + I -- Error --> K["Log error"] + H -- OK --> L["Append operator checks to allChecks"] + I -- OK --> M["Append container checks to allChecks"] + L & M --> N["Iterate over allChecks"] + N --> O["Extract remediation and override if needed"] + O --> P["Call identifiers.AddCatalogEntry"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_addPreflightTestsToCatalog --> func_artifacts.NewMapWriter + func_addPreflightTestsToCatalog --> func_artifacts.ContextWithWriter + func_addPreflightTestsToCatalog --> func_plibOperator.NewCheck + func_addPreflightTestsToCatalog --> func_plibContainer.NewCheck + func_addPreflightTestsToCatalog --> func_check.List + func_addPreflightTestsToCatalog --> func_identifiers.AddCatalogEntry +``` + +#### Functions calling `addPreflightTestsToCatalog` (Mermaid) + +```mermaid +graph TD + func_outputTestCases --> func_addPreflightTestsToCatalog +``` + +#### Usage example (Go) + +```go +// The function is invoked implicitly during catalog generation. +outputTestCases() // Internally calls addPreflightTestsToCatalog() +``` + +--- + +### emitTextFromFile + +**emitTextFromFile** - Reads the entire content of a given file and writes it directly to standard output, enabling inclusion of static text such as catalog documentation. + +#### Signature (Go) + +```go +func emitTextFromFile(filename string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads the entire content of a given file and writes it directly to standard output, enabling inclusion of static text such as catalog documentation. | +| **Parameters** | `filename string` – Path to the file whose contents are to be streamed. | +| **Return value** | `error` – Non‑nil if reading the file fails; otherwise nil after successful printing. | +| **Key dependencies** | • `os.ReadFile` – Reads file into memory.
• `fmt.Print` – Outputs text to stdout.
• `string` conversion of byte slice to string. | +| **Side effects** | Performs I/O by reading a file and writing to standard output; does not modify program state. | +| **How it fits the package** | Utility helper used during catalog generation to embed static Markdown files (e.g., CATALOG.md) into the generated output without requiring runtime parsing or templating. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Read file"} + B -->|"Success"| C["Convert bytes to string"] + C --> D["Print to stdout"] + D --> E["Return nil"] + B -->|"Error"| F["Return error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_emitTextFromFile --> func_ReadFile + func_emitTextFromFile --> func_Print + func_emitTextFromFile --> func_string +``` + +#### Functions calling `emitTextFromFile` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking emitTextFromFile +package main + +import ( + "log" +) + +func main() { + if err := emitTextFromFile(“example.txt”); err != nil { + log.Fatalf(“failed to emit text: %v”, err) + } +} +``` + +--- + +--- + +### generateJS + +**generateJS** - Produces a JSON representation of the package’s classification identifiers and prints it to standard output. + +#### Signature (Go) + +```go +func generateJS(_ *cobra.Command, _ []string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a JSON representation of the package’s classification identifiers and prints it to standard output. | +| **Parameters** | `*cobra.Command` – command context (unused), `[]string` – arguments slice (unused). | +| **Return value** | `error` – always `nil`; function does not propagate errors from its internal logic. | +| **Key dependencies** | Calls `outputJS()` which marshals `identifiers.Classification`. Uses standard library packages: `encoding/json`, `fmt`, and a logging package (`log`). | +| **Side effects** | Writes formatted JSON to stdout; logs error if marshalling fails. No state mutation in the caller’s context. | +| **How it fits the package** | Serves as a sub‑command handler within the `catalog` command group, enabling users to request classification data in JavaScript/JSON form. | + +#### Internal workflow + +```mermaid +flowchart TD + A["generateJS"] --> B["outputJS"] + B --> C["Marshal identifiers.Classification to JSON"] + C --> D["Print JSON or log error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_generateJS --> func_outputJS +``` + +#### Functions calling `generateJS` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking generateJS +cmd := &cobra.Command{} +err := generateJS(cmd, []string{}) +if err != nil { + fmt.Println("Error:", err) +} +``` + +--- + +--- + +### outputIntro + +**outputIntro** - Builds a static introduction for the Red Hat Best Practices Test Suite catalog, returning it as a single Markdown-formatted string. + +#### Signature (Go) + +```go +func outputIntro() (out string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a static introduction for the Red Hat Best Practices Test Suite catalog, returning it as a single Markdown-formatted string. | +| **Parameters** | None | +| **Return value** | `string` – concatenated header and introductory paragraph ready to be written to stdout or included in larger documentation. | +| **Key dependencies** | • Standard library string literals; no external packages are invoked. | +| **Side effects** | No state mutation, I/O, or concurrency; purely functional. | +| **How it fits the package** | Supplies the opening text that precedes all test‑case listings in the generated catalog markdown. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Build header string"} + B --> C["Concatenate title and intro paragraph"] + C --> D["Return concatenated string"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `outputIntro` (Mermaid) + +```mermaid +graph TD + func_runGenerateMarkdownCmd --> func_outputIntro +``` + +#### Usage example (Go) + +```go +// Minimal example invoking outputIntro (note: unexported within its package) +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog" +) + +func main() { + intro := catalog.outputIntro() + fmt.Println(intro) +} +``` + +--- + +### outputJS + +**outputJS** - Serialises the global `identifiers.Classification` map to indented JSON, logs an error if marshalling fails, and writes the result to standard output. + +#### Signature (Go) + +```go +func() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Serialises the global `identifiers.Classification` map to indented JSON, logs an error if marshalling fails, and writes the result to standard output. | +| **Parameters** | None | +| **Return value** | None (void) | +| **Key dependencies** | • `encoding/json.MarshalIndent` – formatting JSON
• `github.com/redhat-best-practices-for-k8s/certsuite/internal/log.Logger.Error` – error logging
• `fmt.Printf` – output to stdout | +| **Side effects** | Writes to stdout; may emit log entries on failure. No state mutation. | +| **How it fits the package** | Used by the command‑line generator (`generateJS`) to expose the classification data structure as JSON for downstream tools or users. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Marshal identifiers.Classification"} + B -- success --> C["Print JSON with fmt.Printf"] + B -- failure --> D["Log error via log.Logger.Error"] + D --> E["Return early"] + C --> F["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_outputJS --> json_MarshalIndent + func_outputJS --> log_Error + func_outputJS --> fmt_Printf +``` + +#### Functions calling `outputJS` (Mermaid) + +```mermaid +graph TD + func_generateJS --> func_outputJS +``` + +#### Usage example (Go) + +```go +// Minimal example invoking outputJS +func main() { + // In the real program this is called via a Cobra command. + outputJS() +} +``` + +--- + +### outputSccCategories + +**outputSccCategories** - Builds a Markdown block that explains the four security context categories used in the generated catalog, including introductory text and detailed descriptions for each category. + +#### 1) Signature (Go) + +```go +func outputSccCategories() (sccCategories string) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a Markdown block that explains the four security context categories used in the generated catalog, including introductory text and detailed descriptions for each category. | +| **Parameters** | None | +| **Return value** | `sccCategories` – a formatted string containing the complete Security Context Categories section. | +| **Key dependencies** | *None* – relies only on string concatenation and literal text. | +| **Side effects** | No state mutation or I/O; purely functional. | +| **How it fits the package** | Called by `runGenerateMarkdownCmd` to append category information to the final Markdown output of the catalog generator. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Initialize header string"] + B --> C["Define introductory paragraph"] + C --> D["Compose first category block"] + D --> E["Compose second category block"] + E --> F["Compose third category block"] + F --> G["Compose fourth category block"] + G --> H["Concatenate all parts"] + H --> I["Return result string"] +``` + +#### 4) Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_outputSccCategories +``` + +#### 5) Functions calling `outputSccCategories` (Mermaid) + +```mermaid +graph TD + func_runGenerateMarkdownCmd --> func_outputSccCategories +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking outputSccCategories +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog" +) + +func main() { + sccSection := catalog.OutputSccCategories() + fmt.Println(sccSection) +} +``` + +*(Note: `OutputSccCategories` would need to be exported for external use; in the original code it is unexported.)* + +--- + +### outputTestCases + +**outputTestCases** - Builds a Markdown string that documents all test cases in the catalog, including metadata such as ID, description, remediation, best‑practice references, impact statements, tags and scenario classifications. It also compiles statistics on tests per suite and per scenario into a `catalogSummary`. + +#### Signature (Go) + +```go +func outputTestCases() (outString string, summary catalogSummary) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a Markdown string that documents all test cases in the catalog, including metadata such as ID, description, remediation, best‑practice references, impact statements, tags and scenario classifications. It also compiles statistics on tests per suite and per scenario into a `catalogSummary`. | +| **Parameters** | none | +| **Return value** | *outString* – Markdown text to be printed;
*summary* – `catalogSummary` containing counts of suites, tests, and scenario‑level data. | +| **Key dependencies** | • `addPreflightTestsToCatalog()`
• `CreatePrintableCatalogFromIdentifiers(keys)`
• `GetSuitesFromIdentifiers(keys)`
• `sort.Slice`, `sort.Strings`
• `fmt.Sprintf`, `strings.ReplaceAll`, `strings.Contains`, `strings.ToLower`
• Global maps: `identifiers.Catalog`, `identifiers.ImpactMap` | +| **Side effects** | *Modifies* the global catalog by adding preflight tests.
*Writes* to standard output via callers (e.g., `runGenerateMarkdownCmd`).
No external I/O beyond logging and potential program exit on missing impact statement. | +| **How it fits the package** | Central routine for turning in‑memory test identifiers into human‑readable documentation used by the catalog generation command. It is invoked by `runGenerateMarkdownCmd` to produce the “Test Case list” section of the generated Markdown file. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["addPreflightTestsToCatalog"] + B --> C["Build keys slice from identifiers.Catalog"] + C --> D["Sort keys by Id"] + D --> E["CreatePrintableCatalogFromIdentifiers(keys)"] + E --> F{"catalog nil?"} + F -- Yes --> G["Return empty results"] + F -- No --> H["GetSuitesFromIdentifiers(keys)"] + H --> I["Sort suite names"] + I --> J["Initialize summary maps"] + J --> K["Loop over suites"] + K --> L["Append suite header to outString"] + L --> M["Loop over tests in suite"] + M --> N["Update per‑suite & total counters"] + N --> O["Build tags string"] + O --> P["Collect scenario keys"] + P --> Q["Update testPerScenario map"] + Q --> R["Sort scenario keys"] + R --> S["Create classification table"] + S --> T["Append test header and properties to outString"] + T --> U["Check impact statement; log error & exit if missing"] + U --> V["Append tags and classification string"] + V --> M + K --> G + G --> H +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_outputTestCases --> addPreflightTestsToCatalog + func_outputTestCases --> CreatePrintableCatalogFromIdentifiers + func_outputTestCases --> GetSuitesFromIdentifiers + func_outputTestCases --> sort_Slice + func_outputTestCases --> sort_Strings + func_outputTestCases --> fmt_Sprintf + func_outputTestCases --> strings_ReplaceAll + func_outputTestCases --> strings_Contains + func_outputTestCases --> strings_ToLower +``` + +#### Functions calling `outputTestCases` (Mermaid) + +```mermaid +graph TD + runGenerateMarkdownCmd --> outputTestCases +``` + +#### Usage example (Go) + +```go +// Minimal example invoking outputTestCases +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog" +) + +func main() { + md, summary := catalog.OutputTestCases() + // md contains the Markdown string + // summary holds aggregated statistics + println(md) + println("Total suites:", summary.TotalSuites()) +} +``` + +*(Note: `OutputTestCases` is a public wrapper that internally calls the unexported `outputTestCases`. If calling directly, use `catalog.outputTestCases()` within the same package.)* + +--- + +### runGenerateMarkdownCmd + +**runGenerateMarkdownCmd** - Creates a complete Markdown document describing the Red Hat Best Practices Test Suite for Kubernetes and writes it to standard output. + +#### Signature (Go) + +```go +func runGenerateMarkdownCmd(_ *cobra.Command, _ []string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a complete Markdown document describing the Red Hat Best Practices Test Suite for Kubernetes and writes it to standard output. | +| **Parameters** | `_ *cobra.Command` – command context (unused)
`_ []string` – arguments (unused) | +| **Return value** | `error` – always `nil`; errors are not expected in this routine. | +| **Key dependencies** | • `outputIntro()`
• `outputTestCases()`
• `summaryToMD(catalogSummary)`
• `outputSccCategories()`
• `fmt.Fprintf(os.Stdout, …)` | +| **Side effects** | Writes the assembled Markdown string to `os.Stdout`. No state is mutated. | +| **How it fits the package** | Entry point for the `generate catalog` command; orchestrates sub‑functions that build each section of the catalog. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + runGenerateMarkdownCmd --> outputIntro + runGenerateMarkdownCmd --> outputTestCases + runGenerateMarkdownCmd --> summaryToMD + runGenerateMarkdownCmd --> outputSccCategories + runGenerateMarkdownCmd --> fmt.Fprintf +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_runGenerateMarkdownCmd --> func_outputIntro + func_runGenerateMarkdownCmd --> func_outputTestCases + func_runGenerateMarkdownCmd --> func_summaryToMD + func_runGenerateMarkdownCmd --> func_outputSccCategories + func_runGenerateMarkdownCmd --> fmt.Fprintf +``` + +#### Functions calling `runGenerateMarkdownCmd` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking runGenerateMarkdownCmd +package main + +import ( + "github.com/spf13/cobra" +) + +func main() { + // In real usage, this would be bound to a cobra.Command. + cmd := &cobra.Command{} + if err := runGenerateMarkdownCmd(cmd, nil); err != nil { + panic(err) + } +} +``` + +--- + +### scenarioIDToText + +**scenarioIDToText** - Maps known scenario IDs to friendly names; returns `"Unknown Scenario"` for unknown values. + +#### Signature (Go) + +```go +func scenarioIDToText(id string) (text string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Maps known scenario IDs to friendly names; returns `"Unknown Scenario"` for unknown values. | +| **Parameters** | `id string` – the raw identifier (e.g., `identifiers.FarEdge`). | +| **Return value** | `text string` – a descriptive label suitable for display in output. | +| **Key dependencies** | Uses constants from the `identifiers` package (`FarEdge`, `Telco`, `NonTelco`, `Extended`). | +| **Side effects** | None. Pure function; no state changes or I/O. | +| **How it fits the package** | Provides a lookup helper for rendering scenario information in catalog output and summaries. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"switch id"} + B -->|"FarEdge"| C["text = \Far-Edge\"] + B -->|"Telco"| D["text = \Telco\"] + B -->|"NonTelco"| E["text = \Non-Telco\"] + B -->|"Extended"| F["text = \Extended\"] + B -->|"default"| G["text = \Unknown Scenario\"] + C --> H["Return text"] + D --> H + E --> H + F --> H + G --> H +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `scenarioIDToText` (Mermaid) + +```mermaid +graph TD + func_outputTestCases --> func_scenarioIDToText +``` + +#### Usage example (Go) + +```go +// Minimal example invoking scenarioIDToText +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog" +) + +func main() { + fmt.Println(catalog.ScenarioIDToText("FarEdge")) // prints: Far-Edge +} +``` + +--- + +### summaryToMD + +**summaryToMD** - Builds a Markdown string summarizing total test cases, suites, and per‑suite/per‑scenario counts for the certsuite catalog. + +#### Signature (Go) + +```go +func(catalogSummary)(string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a Markdown string summarizing total test cases, suites, and per‑suite/per‑scenario counts for the certsuite catalog. | +| **Parameters** | `aSummary` *catalogSummary – data structure holding aggregated statistics. | +| **Return value** | A formatted Markdown string (`out`). | +| **Key dependencies** | • `fmt.Sprintf` (multiple uses)
• `make`, `len`, `append` (slice construction)
• `sort.Strings` (ordering keys) | +| **Side effects** | None – purely functional; only returns a string. | +| **How it fits the package** | Used by the command that prints the full Markdown output for the catalog (`runGenerateMarkdownCmd`). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Add header text"] + B --> C["Insert totals"] + C --> D["Create suite table"] + D --> E["Sort suite keys"] + E --> F["Loop suites → append rows"] + F --> G["Blank line"] + G --> H["Create scenario tables"] + H --> I["Sort scenario keys"] + I --> J["Loop scenarios → append rows"] + J --> K["Return string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_summaryToMD --> fmt.Sprintf + func_summaryToMD --> make + func_summaryToMD --> len + func_summaryToMD --> append + func_summaryToMD --> sort.Strings +``` + +#### Functions calling `summaryToMD` (Mermaid) + +```mermaid +graph TD + func_runGenerateMarkdownCmd --> func_summaryToMD +``` + +#### Usage example (Go) + +```go +// Minimal example invoking summaryToMD +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog" +) + +func main() { + // Assume we have a populated catalogSummary instance + var s catalog.catalogSummary + // ... populate s ... + + md := summaryToMD(s) + println(md) // prints the Markdown summary +} +``` + +--- diff --git a/docs/cmd/certsuite/generate/config/config.md b/docs/cmd/certsuite/generate/config/config.md new file mode 100644 index 000000000..3f7232a6d --- /dev/null +++ b/docs/cmd/certsuite/generate/config/config.md @@ -0,0 +1,1581 @@ +# Package config + +**Path**: `cmd/certsuite/generate/config` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) +- [Local Functions](#local-functions) + - [createCertSuiteResourcesConfiguration](#createcertsuiteresourcesconfiguration) + - [createCollectorConfiguration](#createcollectorconfiguration) + - [createConfiguration](#createconfiguration) + - [createExceptionsConfiguration](#createexceptionsconfiguration) + - [createSettingsConfiguration](#createsettingsconfiguration) + - [generateConfig](#generateconfig) + - [getAnswer](#getanswer) + - [loadAcceptedKernelTaints](#loadacceptedkerneltaints) + - [loadCRDfilters](#loadcrdfilters) + - [loadHelmCharts](#loadhelmcharts) + - [loadManagedDeployments](#loadmanageddeployments) + - [loadManagedStatefulSets](#loadmanagedstatefulsets) + - [loadNamespaces](#loadnamespaces) + - [loadNonScalableDeployments](#loadnonscalabledeployments) + - [loadNonScalableStatefulSets](#loadnonscalablestatefulsets) + - [loadOperatorLabels](#loadoperatorlabels) + - [loadPodLabels](#loadpodlabels) + - [loadProbeDaemonSetNamespace](#loadprobedaemonsetnamespace) + - [loadProtocolNames](#loadprotocolnames) + - [loadServices](#loadservices) + - [saveConfiguration](#saveconfiguration) + - [showConfiguration](#showconfiguration) + +## Overview + +Provides a command‑line interface for creating, inspecting and saving a CertSuite test configuration. It presents interactive menus that let users specify namespaces, labels, CRD filters, excluded Helm charts, protocol names, services and other runtime settings, then serialises the resulting `configuration.TestConfiguration` to YAML. + +### Key Features + +- Interactive prompt UI built with promptui and custom text styling for selecting resources and exceptions +- Integration with cobra as a sub‑command of the Certsuite generate tool +- Automatic loading and parsing of user input into structured configuration objects + +### Design Notes + +- All configuration is held in a package‑level variable `certsuiteConfig` that is mutated by helper loaders; this simplifies interaction but limits concurrency safety +- Menus loop until the user chooses “previous menu” or an error occurs, providing a guided wizard experience +- The code relies on global state and external packages (color, promptui) for styling, which may affect testability + +### Structs Summary + +| Name | Purpose | +|------|----------| + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func() *cobra.Command](#newcommand) | Returns a pre‑configured `*cobra.Command` that implements the `config` subcommand of the Certsuite generate tool. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func createCertSuiteResourcesConfiguration()](#createcertsuiteresourcesconfiguration) | Presents an interactive menu allowing users to specify Kubernetes namespaces, pod/operator labels, CRD filters, and managed deployments/statefulsets for CertSuite runs. | +| [func createCollectorConfiguration()](#createcollectorconfiguration) | Builds a command‑line UI for selecting a collector configuration option, looping until the user selects “previous menu” or an error occurs. | +| [func()()](#createconfiguration) | Launches an interactive prompt that lets users configure various sections of the CertSuite settings. | +| [func createExceptionsConfiguration()](#createexceptionsconfiguration) | Presents a prompt‑based interface that allows users to configure various exception lists (kernel taints, Helm charts, protocol names, services, non‑scalable deployments and stateful sets). Each selection triggers the loading of user input into the global configuration. | +| [func createSettingsConfiguration()](#createsettingsconfiguration) | Presents an interactive prompt to configure runtime settings, currently only the Probe DaemonSet namespace. | +| [func generateConfig()](#generateconfig) | Provides a command‑line interface that lets the user create, view or save a test configuration for Cert Suite. | +| [func getAnswer(prompt, syntax, example string) []string](#getanswer) | Displays an interactive prompt with styled text, reads a line from standard input, splits it by commas into fields, trims whitespace, and returns the resulting slice. | +| [func loadAcceptedKernelTaints(taints []string)](#loadacceptedkerneltaints) | Stores a list of accepted kernel taint names in the global `certsuiteConfig`. | +| [func([]string)()](#loadcrdfilters) | Parses user‑supplied CRD filter strings, converts them into `CrdFilter` objects, and stores them in the global configuration. | +| [func loadHelmCharts(helmCharts []string)](#loadhelmcharts) | Populates the global configuration with a list of Helm chart names that should be excluded from scanning. | +| [func loadManagedDeployments([]string)](#loadmanageddeployments) | Populates the global configuration with user‑supplied managed deployment names, resetting any previous entries. | +| [func loadManagedStatefulSets(statefulSets []string)](#loadmanagedstatefulsets) | Stores a list of StatefulSet names that should be considered managed by CertSuite. The function clears any existing entries and replaces them with the supplied slice. | +| [func loadNamespaces(namespaces []string)](#loadnamespaces) | Builds a slice of `configuration.Namespace` objects from raw namespace strings and assigns it to `certsuiteConfig.TargetNameSpaces`. | +| [func loadNonScalableDeployments(nonScalableDeployments []string)](#loadnonscalabledeployments) | Parses a slice of strings describing non‑scalable deployments and populates the global configuration with structured objects for later use. | +| [func loadNonScalableStatefulSets(nonScalableStatefulSets []string)](#loadnonscalablestatefulsets) | Parses a slice of strings describing StatefulSets that should not be subjected to scaling tests and stores them in the global configuration. Each string is expected to contain `name/namespace`. | +| [func loadOperatorLabels(operatorLabels []string)](#loadoperatorlabels) | Stores a slice of operator label strings into the global configuration, replacing any existing list. | +| [func loadPodLabels(podLabels []string)](#loadpodlabels) | Stores a list of pod labels to be used as filters when selecting pods under test. It replaces any previously stored label set with the new slice. | +| [func loadProbeDaemonSetNamespace(namespace []string)](#loadprobedaemonsetnamespace) | Persists the first element of a string slice as the namespace used by the Probe DaemonSet in the global configuration. | +| [func loadProtocolNames(protocolNames []string)](#loadprotocolnames) | Replaces the current set of valid protocol names in `certsuiteConfig` with a new list supplied by the caller. | +| [func loadServices(services []string)](#loadservices) | Stores the supplied list of service names in the global configuration, resetting any previous ignore list. | +| [func (*configuration.TestConfiguration)()](#saveconfiguration) | Serialises the current `TestConfiguration`, prompts the user for an output file name, and writes the YAML to that file. | +| [func(*configuration.TestConfiguration)()](#showconfiguration) | Renders the current `TestConfiguration` as a pretty‑printed YAML string and writes it to standard output. | + +## Structs + +## Exported Functions + +### NewCommand + +**NewCommand** - Returns a pre‑configured `*cobra.Command` that implements the `config` subcommand of the Certsuite generate tool. + +#### Signature (Go) + +```go +func() *cobra.Command +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a pre‑configured `*cobra.Command` that implements the `config` subcommand of the Certsuite generate tool. | +| **Parameters** | None | +| **Return value** | A pointer to the Cobra command instance that can be added to the parent CLI hierarchy. | +| **Key dependencies** | • `github.com/spf13/cobra` (for `*cobra.Command`)
• The package’s own `generateConfigCmd` variable, which holds the actual command implementation | +| **Side effects** | No observable state changes or I/O; it merely provides a reference to an existing command object. | +| **How it fits the package** | Acts as the public factory function for the configuration subcommand, enabling other packages (e.g., `cmd/certsuite/generate`) to register this command within the overall CLI tree. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + NewCommand --> generateConfigCmd +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + cmd/certsuite/generate.NewCommand --> config.NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config" +) + +func main() { + cmd := config.NewCommand() + // cmd can now be added to a parent Cobra command or executed directly. +} +``` + +--- + +## Local Functions + +### createCertSuiteResourcesConfiguration + +**createCertSuiteResourcesConfiguration** - Presents an interactive menu allowing users to specify Kubernetes namespaces, pod/operator labels, CRD filters, and managed deployments/statefulsets for CertSuite runs. + +#### Signature (Go) + +```go +func createCertSuiteResourcesConfiguration() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Presents an interactive menu allowing users to specify Kubernetes namespaces, pod/operator labels, CRD filters, and managed deployments/statefulsets for CertSuite runs. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | `strings.ReplaceAll`, `strings.ToLower`, `strings.Contains`; `promptui.Select` (interactive prompt); `log.Printf`; helper functions: `loadNamespaces`, `loadPodLabels`, `loadOperatorLabels`, `loadCRDfilters`, `loadManagedDeployments`, `loadManagedStatefulSets`; `getAnswer`. | +| **Side effects** | Modifies global configuration (`certsuiteConfig`) via the load* helpers; writes to stdout/stderr through prompts and logs. | +| **How it fits the package** | Invoked from `createConfiguration` when the user selects the “CertSuite Resources” option, enabling dynamic runtime configuration of what resources CertSuite will evaluate. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Show menu"} + B -->|"Select namespaces"| C["Get answer → loadNamespaces"] + B -->|"Select pods"| D["Get answer → loadPodLabels"] + B -->|"Select operators"| E["Get answer → loadOperatorLabels"] + B -->|"Select CRD filters"| F["Get answer → loadCRDfilters"] + B -->|"Select managed deployments"| G["Get answer → loadManagedDeployments"] + B -->|"Select managed statefulsets"| H["Get answer → loadManagedStatefulSets"] + B -->|"Previous menu"| I["Exit loop"] + C & D & E & F & G & H --> B +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_createCertSuiteResourcesConfiguration --> func_loadNamespaces + func_createCertSuiteResourcesConfiguration --> func_loadPodLabels + func_createCertSuiteResourcesConfiguration --> func_loadOperatorLabels + func_createCertSuiteResourcesConfiguration --> func_loadCRDfilters + func_createCertSuiteResourcesConfiguration --> func_loadManagedDeployments + func_createCertSuiteResourcesConfiguration --> func_loadManagedStatefulSets + func_createCertSuiteResourcesConfiguration --> func_getAnswer +``` + +#### Functions calling `createCertSuiteResourcesConfiguration` (Mermaid) + +```mermaid +graph TD + func_createConfiguration --> func_createCertSuiteResourcesConfiguration +``` + +#### Usage example (Go) + +```go +// The function is called indirectly by createConfiguration when the user selects +// the “CertSuite Resources” menu option. A direct call would simply launch the +// interactive prompt for configuring resources. + +func main() { + // Trigger configuration flow + createConfiguration() +} +``` + +--- + +### createCollectorConfiguration + +**createCollectorConfiguration** - Builds a command‑line UI for selecting a collector configuration option, looping until the user selects “previous menu” or an error occurs. + +Creates an interactive prompt that lets the user choose a collector option from a predefined list and handles the selection loop until the user exits. + +```go +func createCollectorConfiguration() +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a command‑line UI for selecting a collector configuration option, looping until the user selects “previous menu” or an error occurs. | +| **Parameters** | none | +| **Return value** | none (the function performs side effects only) | +| **Key dependencies** | `strings.ReplaceAll`, `strings.ToLower`, `strings.Contains`, `promptui.Select.Run`, `log.Printf` | +| **Side effects** | Reads user input via terminal, logs errors to stdout, and exits the loop based on user choice. No state is returned or modified outside its local scope. | +| **How it fits the package** | Part of the `config` subpackage that assembles configuration prompts; this function specifically handles collector‑related options within a generation workflow. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Initialize Options"} + B --> C["Define Searcher"] + C --> D["Create Prompt"] + D --> E{"Loop"} + E -->|"Error"| F["Log Error & Return"] + E -->|"Select Option"| G["Handle Choice"] + G -->|"Exit"| H["Set exit=true"] + G -->|"Other"| I["Placeholder for future logic"] + I --> E + H --> J["End Loop"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_createCollectorConfiguration --> func_strings.ReplaceAll + func_createCollectorConfiguration --> func_strings.ToLower + func_createCollectorConfiguration --> func_strings.Contains + func_createCollectorConfiguration --> func_promptui.Select.Run + func_createCollectorConfiguration --> func_log.Printf +``` + +#### Functions calling `createCollectorConfiguration` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example + +```go +// Minimal example invoking createCollectorConfiguration +func main() { + // The function performs its work internally; no return value. + createCollectorConfiguration() +} +``` + +--- + +### createConfiguration + +**createConfiguration** - Launches an interactive prompt that lets users configure various sections of the CertSuite settings. + +#### Signature (Go) + +```go +func()() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Launches an interactive prompt that lets users configure various sections of the CertSuite settings. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | - `promptui.Select` for user interaction
- `createCertSuiteResourcesConfiguration`
- `createExceptionsConfiguration`
- `createSettingsConfiguration` | +| **Side effects** | Displays prompts, updates global configuration state via the called section‑specific functions. | +| **How it fits the package** | Acts as the entry point for manual configuration within the *config* subpackage, invoked by the higher‑level `generateConfig` routine. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start createConfiguration"] --> B["Build createMenu slice"] + B --> C["Instantiate promptui.Select (createPrompt)"] + C --> D{"User selects option"} + D -->|"certSuiteResources"| E["Call createCertSuiteResourcesConfiguration"] + D -->|"exceptions"| F["Call createExceptionsConfiguration"] + D -->|"settings"| G["Call createSettingsConfiguration"] + D -->|"previousMenu"| H["Exit loop"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_createConfiguration --> func_createCertSuiteResourcesConfiguration + func_createConfiguration --> func_createExceptionsConfiguration + func_createConfiguration --> func_createSettingsConfiguration +``` + +#### Functions calling `createConfiguration` (Mermaid) + +```mermaid +graph TD + func_generateConfig --> func_createConfiguration +``` + +#### Usage example (Go) + +```go +// Minimal example invoking createConfiguration +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config" +) + +func main() { + // This will launch the interactive configuration menu. + config.createConfiguration() +} +``` + +--- + +### createExceptionsConfiguration + +**createExceptionsConfiguration** - Presents a prompt‑based interface that allows users to configure various exception lists (kernel taints, Helm charts, protocol names, services, non‑scalable deployments and stateful sets). Each selection triggers the loading of user input into the global configuration. + +#### Signature (Go) + +```go +func createExceptionsConfiguration() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Presents a prompt‑based interface that allows users to configure various exception lists (kernel taints, Helm charts, protocol names, services, non‑scalable deployments and stateful sets). Each selection triggers the loading of user input into the global configuration. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | *`strings.ReplaceAll`, `strings.ToLower`, `strings.Contains`* – used for search filtering.
*`promptui.Select`* – drives the interactive menu.
*`log.Printf`* – error logging.
*`getAnswer`*, *`loadAcceptedKernelTaints`*, *`loadHelmCharts`*, *`loadProtocolNames`*, *`loadServices`*, *`loadNonScalableDeployments`*, *`loadNonScalableStatefulSets`* – functions invoked based on user selection. | +| **Side effects** | Mutates global configuration variables (`certsuiteConfig.*`) via the load functions.
Writes error messages to standard logging output.
Blocks execution until the user exits the menu loop. | +| **How it fits the package** | Part of the `config` sub‑package in the CertSuite command generator; it is called from `createConfiguration()` when the “exceptions” option is chosen, enabling end‑users to tailor exception rules for subsequent test runs. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Build options list"] + B --> C["Define searcher function"] + C --> D["Create promptui.Select with searcher"] + D --> E{"Loop until exit"} + E -->|"Select option"| F["Switch on chosen option"] + F --> G{"kernelTaints"} -->|"Yes"| H["Call loadAcceptedKernelTaints(getAnswer(...))"] + F --> I{"helmCharts"} -->|"Yes"| J["Call loadHelmCharts(getAnswer(...))"] + F --> K{"protocolNames"}-->|"Yes"| L["Call loadProtocolNames(getAnswer(...))"] + F --> M{"services"} -->|"Yes"| N["Call loadServices(getAnswer(...))"] + F --> O{"nonScalableDeployments"} -->|"Yes"| P["Call loadNonScalableDeployments(getAnswer(...))"] + F --> Q{"nonScalableStatefulSets"} -->|"Yes"| R["Call loadNonScalableStatefulSets(getAnswer(...))"] + F --> S{"previousMenu"} -->|"Yes"| T["Set exit = true"] + E --> U["If exit"] --> V["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_createExceptionsConfiguration --> strings.ReplaceAll + func_createExceptionsConfiguration --> strings.ToLower + func_createExceptionsConfiguration --> strings.Contains + func_createExceptionsConfiguration --> promptui.Select + func_createExceptionsConfiguration --> log.Printf + func_createExceptionsConfiguration --> getAnswer + func_createExceptionsConfiguration --> loadAcceptedKernelTaints + func_createExceptionsConfiguration --> loadHelmCharts + func_createExceptionsConfiguration --> loadProtocolNames + func_createExceptionsConfiguration --> loadServices + func_createExceptionsConfiguration --> loadNonScalableDeployments + func_createExceptionsConfiguration --> loadNonScalableStatefulSets +``` + +#### Functions calling `createExceptionsConfiguration` (Mermaid) + +```mermaid +graph TD + func_createConfiguration --> func_createExceptionsConfiguration +``` + +#### Usage example (Go) + +```go +// Minimal example invoking createExceptionsConfiguration +func main() { + // The function is normally called from the interactive menu system. + // Here we call it directly to trigger the exceptions configuration flow. + createExceptionsConfiguration() +} +``` + +--- + +### createSettingsConfiguration + +**createSettingsConfiguration** - Presents an interactive prompt to configure runtime settings, currently only the Probe DaemonSet namespace. + +#### Signature (Go) + +```go +func createSettingsConfiguration() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Presents an interactive prompt to configure runtime settings, currently only the Probe DaemonSet namespace. | +| **Parameters** | None | +| **Return value** | None (side‑effects only) | +| **Key dependencies** | `promptui.Select`, `log.Printf`, `loadProbeDaemonSetNamespace`, `getAnswer` | +| **Side effects** | Reads user input, updates global configuration (`certsuiteConfig.ProbeDaemonSetNamespace`), logs errors. | +| **How it fits the package** | Part of a CLI wizard that builds a complete configuration; called from `createConfiguration`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Show settings menu"] + B --> C{"User selects option"} + C -->|"probeDaemonSet"| D["Prompt for namespace"] + D --> E["Read input via getAnswer"] + E --> F["Call loadProbeDaemonSetNamespace"] + F --> G["Update global config"] + C -->|"previousMenu"| H["Exit loop"] + H --> I["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_createSettingsConfiguration --> func_loadProbeDaemonSetNamespace + func_createSettingsConfiguration --> func_getAnswer + func_createSettingsConfiguration --> func_promptui.Select + func_createSettingsConfiguration --> func_log.Printf +``` + +#### Functions calling `createSettingsConfiguration` (Mermaid) + +```mermaid +graph TD + func_createConfiguration --> func_createSettingsConfiguration +``` + +#### Usage example (Go) + +```go +// Minimal example invoking createSettingsConfiguration +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config" +) + +func main() { + // The function is part of a larger wizard; calling it directly will start the settings prompt. + config.CreateSettingsConfiguration() +} +``` + +--- + +--- + +### generateConfig + +**generateConfig** - Provides a command‑line interface that lets the user create, view or save a test configuration for Cert Suite. + +#### 1) Signature (Go) + +```go +func generateConfig() +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides a command‑line interface that lets the user create, view or save a test configuration for Cert Suite. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | • `promptui.Select` – builds menu prompts.
• `log.Printf` – logs prompt errors.
• `createConfiguration`, `showConfiguration`, `saveConfiguration` – sub‑functions that perform the actual actions. | +| **Side effects** | Interacts with the terminal, writes log output, and modifies global configuration state (`certsuiteConfig`). | +| **How it fits the package** | Acts as the top‑level driver for the configuration sub‑command in the Cert Suite CLI. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Menu loop"} + B --> C["Show menu"] + C --> D["Read selection"] + D --> E{"Switch on option"} + E -->|"create"| F(createConfiguration) + E -->|"show"| G(showConfiguration) + E -->|"save"| H(saveConfiguration) + E -->|"quit"| I["Exit loop"] + F --> B + G --> B + H --> B + I --> J["End"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_generateConfig --> func_createConfiguration + func_generateConfig --> func_showConfiguration + func_generateConfig --> func_saveConfiguration + func_generateConfig --> log_Printf +``` + +#### 5) Functions calling `generateConfig` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking generateConfig +func main() { + // The command line tool calls this during its configuration phase. + generateConfig() +} +``` + +--- + +### getAnswer + +**getAnswer** - Displays an interactive prompt with styled text, reads a line from standard input, splits it by commas into fields, trims whitespace, and returns the resulting slice. + +```go +func getAnswer(prompt, syntax, example string) []string +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Displays an interactive prompt with styled text, reads a line from standard input, splits it by commas into fields, trims whitespace, and returns the resulting slice. | +| **Parameters** | `prompt` (string) – question shown to the user;
`syntax` (string) – description of accepted syntax;
`example` (string) – example value for guidance | +| **Return value** | `[]string` – cleaned list of values entered by the user (or `nil` if input could not be read) | +| **Key dependencies** | • `github.com/fatih/color` (HiCyanString, CyanString, WhiteString)
• `fmt`
• `bufio.NewScanner`, `scanner.Scan()`, `scanner.Err()`
• `log.Printf`
• `strings.Split`, `strings.TrimSpace` | +| **Side effects** | Prints styled prompt to stdout; logs an error via the standard logger if input cannot be read. | +| **How it fits the package** | Central helper for interactive configuration of CertSuite resources, exceptions, and settings by converting user‑supplied CSV strings into slices that other loader functions consume. | + +```mermaid +flowchart TD + A["Display prompt"] --> B["Read line"] + B --> C{"Error?"} + C -- Yes --> D["Log error & return nil"] + C -- No --> E["Split by ,"] + E --> F["Trim spaces"] + F --> G["Return slice"] +``` + +```mermaid +graph TD + func_getAnswer --> func_HiCyanString + func_getAnswer --> func_CyanString + func_getAnswer --> func_WhiteString + func_getAnswer --> fmt_Print + func_getAnswer --> bufio_NewScanner + func_getAnswer --> scanner_Scan + func_getAnswer --> scanner_Err + func_getAnswer --> log_Printf + func_getAnswer --> strings_Split + func_getAnswer --> strings_TrimSpace +``` + +```mermaid +graph TD + func_createCertSuiteResourcesConfiguration --> func_getAnswer + func_createExceptionsConfiguration --> func_getAnswer + func_createSettingsConfiguration --> func_getAnswer +``` + +#### Usage example + +```go +// Minimal example invoking getAnswer +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config" +) + +func main() { + values := config.GetAnswer( + "Enter comma‑separated namespaces:", + ",", + "myapp,prod", + ) + fmt.Printf("You entered: %v\n", values) +} +``` + +*(Note: `GetAnswer` is unexported; the example assumes it is in the same package or exported for demonstration.)* + +--- + +### loadAcceptedKernelTaints + +**loadAcceptedKernelTaints** - Stores a list of accepted kernel taint names in the global `certsuiteConfig`. + +#### Signature (Go) + +```go +func loadAcceptedKernelTaints(taints []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores a list of accepted kernel taint names in the global `certsuiteConfig`. | +| **Parameters** | `taints []string` – slice of taint identifiers to be accepted. | +| **Return value** | None (side‑effect only). | +| **Key dependencies** | • `append`
• `configuration.AcceptedKernelTaintsInfo`
• global variable `certsuiteConfig.AcceptedKernelTaints` | +| **Side effects** | Mutates the slice `certsuiteConfig.AcceptedKernelTaints`; clears previous entries. | +| **How it fits the package** | Part of the interactive configuration wizard; called when the user selects kernel taints to accept. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive taint list"] --> B{"Clear existing config"} + B --> C["Iterate over each taint"] + C --> D["Create AcceptedKernelTaintsInfo struct"] + D --> E["Append to certsuiteConfig.AcceptedKernelTaints"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_loadAcceptedKernelTaints --> append +``` + +#### Functions calling `loadAcceptedKernelTaints` (Mermaid) + +```mermaid +graph TD + func_createExceptionsConfiguration --> func_loadAcceptedKernelTaints +``` + +#### Usage example (Go) + +```go +// Minimal example invoking loadAcceptedKernelTaints +func main() { + // Example taints entered by the user + taints := []string{"NoSchedule", "PreferNoExecute"} + loadAcceptedKernelTaints(taints) +} +``` + +--- + +### loadCRDfilters + +**loadCRDfilters** - Parses user‑supplied CRD filter strings, converts them into `CrdFilter` objects, and stores them in the global configuration. + +#### Signature (Go) + +```go +func([]string)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses user‑supplied CRD filter strings, converts them into `CrdFilter` objects, and stores them in the global configuration. | +| **Parameters** | `crdFilters []string` – each element is of the form `"name/scalable"`, where *scalable* is a boolean string (`"true"`/`"false"`). | +| **Return value** | None (updates package‑level state). | +| **Key dependencies** | • `strings.Split`
• `strconv.ParseBool`
• `log.Printf`
• `append`
• `configuration.CrdFilter` struct from the configuration package. | +| **Side effects** | 1. Resets `certsuiteConfig.CrdFilters` to an empty slice.
2. Logs a message if a filter cannot be parsed and aborts further processing.
3. Populates `certsuiteConfig.CrdFilters` with new entries. | +| **How it fits the package** | Called during interactive configuration to capture CRD filtering options that influence subsequent certificate suite generation. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over crdFilters"} + B --> C["Split string by /"] + C --> D["Extract name and scalable part"] + D --> E["Parse scalable to bool"] + E -->|"Error"| F["Log error & exit"] + E --> G["Create CrdFilter struct"] + G --> H["Append to certsuiteConfig.CrdFilters"] + H --> B +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_loadCRDfilters --> func_strings_Split + func_loadCRDfilters --> func_strconv_ParseBool + func_loadCRDfilters --> func_log_Printf + func_loadCRDfilters --> func_append + func_loadCRDfilters --> configuration_CrdFilter +``` + +#### Functions calling `loadCRDfilters` (Mermaid) + +```mermaid +graph TD + func_createCertSuiteResourcesConfiguration --> func_loadCRDfilters +``` + +#### Usage example (Go) + +```go +// Minimal example invoking loadCRDfilters +func main() { + // Example input: filter "deployment/true" means use only CRDs with suffix "deployment" that are scalable. + filters := []string{"deployment/true", "service/false"} + loadCRDfilters(filters) + + // certsuiteConfig.CrdFilters is now populated and can be inspected. +} +``` + +--- + +### loadHelmCharts + +**loadHelmCharts** - Populates the global configuration with a list of Helm chart names that should be excluded from scanning. + +#### 1) Signature (Go) + +```go +func loadHelmCharts(helmCharts []string) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Populates the global configuration with a list of Helm chart names that should be excluded from scanning. | +| **Parameters** | `helmCharts []string –` slice of chart names entered by the user. | +| **Return value** | None (void). | +| **Key dependencies** | • `append` function
• `configuration.SkipHelmChartList` type
• Global variable `certsuiteConfig.SkipHelmChartList` | +| **Side effects** | Resets and then mutates the global slice `certsuiteConfig.SkipHelmChartList`. No external I/O. | +| **How it fits the package** | Called from the interactive configuration routine to capture user‑specified Helm chart exclusions for later use during scan generation. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Reset SkipHelmChartList"} + B --> C["Iterate over helmCharts"] + C --> D["Create SkipHelmChartList entry"] + D --> E["Append to certsuiteConfig.SkipHelmChartList"] + E --> F["End"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_loadHelmCharts --> append +``` + +#### 5) Functions calling `loadHelmCharts` (Mermaid) + +```mermaid +graph TD + func_createExceptionsConfiguration --> func_loadHelmCharts +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking loadHelmCharts +func main() { + charts := []string{"nginx", "redis"} + loadHelmCharts(charts) +} +``` + +--- + +### loadManagedDeployments + +**loadManagedDeployments** - Populates the global configuration with user‑supplied managed deployment names, resetting any previous entries. + +#### Signature (Go) + +```go +func loadManagedDeployments([]string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Populates the global configuration with user‑supplied managed deployment names, resetting any previous entries. | +| **Parameters** | `deployments []string` – slice of deployment identifiers to be recorded. | +| **Return value** | None (the function modifies a package‑level variable). | +| **Key dependencies** | • Calls the built‑in `append` function to build a slice of `configuration.ManagedDeploymentsStatefulsets`.
• Relies on the global `certsuiteConfig` struct from the same package. | +| **Side effects** | Overwrites `certsuiteConfig.ManagedDeployments`, causing state mutation that other parts of the program will read later. No I/O or concurrency occurs. | +| **How it fits the package** | Part of an interactive configuration builder; invoked when a user selects "managed deployments" from the prompt menu to capture their choices into the cert‑suite configuration. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Reset ManagedDeployments"} + B --> C["Loop over input slice"] + C --> D["Create ManagedDeploymentsStatefulsets{Name: deployment}"] + D --> E["Append to certsuiteConfig.ManagedDeployments"] + E --> F["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_loadManagedDeployments --> func_append +``` + +#### Functions calling `loadManagedDeployments` (Mermaid) + +```mermaid +graph TD + func_createCertSuiteResourcesConfiguration --> func_loadManagedDeployments +``` + +#### Usage example (Go) + +```go +// Minimal example invoking loadManagedDeployments +deployments := []string{"nginx-deploy", "redis-operator"} +loadManagedDeployments(deployments) +``` + +--- + +### loadManagedStatefulSets + +**loadManagedStatefulSets** - Stores a list of StatefulSet names that should be considered managed by CertSuite. The function clears any existing entries and replaces them with the supplied slice. + +#### 1) Signature (Go) + +```go +func loadManagedStatefulSets(statefulSets []string) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores a list of StatefulSet names that should be considered managed by CertSuite. The function clears any existing entries and replaces them with the supplied slice. | +| **Parameters** | `statefulSets []string` – Slice containing the names of StatefulSets to manage. | +| **Return value** | None (void). Modifies package‑level configuration state. | +| **Key dependencies** | • Calls the built‑in `append` function.
• Relies on the global variable `certsuiteConfig`. | +| **Side effects** | *Resets `certsuiteConfig.ManagedStatefulsets` to an empty slice.
* Appends new entries, each wrapped in a `configuration.ManagedDeploymentsStatefulsets` struct. | +| **How it fits the package** | Part of the interactive configuration wizard; invoked when the user selects “managed StatefulSets” from the resource picker. It updates the runtime config used later to generate cert‑suite resources. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Clear existing list"} + B --> C["Iterate over input slice"] + C --> D["Create ManagedDeploymentsStatefulsets struct"] + D --> E["Append to certsuiteConfig.ManagedStatefulsets"] + E --> F["End"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_loadManagedStatefulSets --> append +``` + +#### 5) Functions calling `loadManagedStatefulSets` (Mermaid) + +```mermaid +graph TD + createCertSuiteResourcesConfiguration --> loadManagedStatefulSets +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking loadManagedStatefulSets +import "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config" + +func main() { + // Suppose the user entered these StatefulSet names: + statefulSets := []string{"redis", "mongo"} + + // Load them into the configuration + config.loadManagedStatefulSets(statefulSets) + + // certsuiteConfig.ManagedStatefulsets now contains two entries. +} +``` + +--- + +--- + +### loadNamespaces + +**loadNamespaces** - Builds a slice of `configuration.Namespace` objects from raw namespace strings and assigns it to `certsuiteConfig.TargetNameSpaces`. + +Collects a list of namespace names, converts them into the package’s configuration type, and stores them in the global configuration object. + +#### Signature (Go) + +```go +func loadNamespaces(namespaces []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a slice of `configuration.Namespace` objects from raw namespace strings and assigns it to `certsuiteConfig.TargetNameSpaces`. | +| **Parameters** | `namespaces []string – list of namespace names entered by the user.` | +| **Return value** | None. | +| **Key dependencies** | • `append` (built‑in)
• `configuration.Namespace` type
• global `certsuiteConfig` variable | +| **Side effects** | Mutates the global `certsuiteConfig.TargetNameSpaces`; no I/O or concurrency. | +| **How it fits the package** | Used by the interactive configuration flow to record which namespaces should be scanned for certificates. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Clear certsuiteConfig.TargetNameSpaces"] + B --> C{"Iterate over input slice"} + C -->|"For each namespace"| D["Create configuration.Namespace"] + D --> E["Append to certsuiteConfig.TargetNameSpaces"] + E --> F["Finish"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_loadNamespaces --> func_append +``` + +#### Functions calling `loadNamespaces` (Mermaid) + +```mermaid +graph TD + func_createCertSuiteResourcesConfiguration --> func_loadNamespaces +``` + +#### Usage example (Go) + +```go +// Minimal example invoking loadNamespaces +namespaces := []string{"default", "kube-system"} +loadNamespaces(namespaces) +// certsuiteConfig.TargetNameSpaces now contains two Namespace entries. +``` + +--- + +### loadNonScalableDeployments + +**loadNonScalableDeployments** - Parses a slice of strings describing non‑scalable deployments and populates the global configuration with structured objects for later use. + +#### 1️⃣ Signature (Go) + +```go +func loadNonScalableDeployments(nonScalableDeployments []string) +``` + +#### 2️⃣ Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses a slice of strings describing non‑scalable deployments and populates the global configuration with structured objects for later use. | +| **Parameters** | `nonScalableDeployments []string` – each element is expected to be in the format `"name/namespace"`. | +| **Return value** | None (the function mutates package‑level state). | +| **Key dependencies** | • `strings.Split`,
• `len`,
• `log.Println`,
• `append` on a slice. | +| **Side effects** | • Resets `certsuiteConfig.SkipScalingTestDeployments` to nil.
• Appends parsed deployment info to that slice.
• Logs an error and aborts parsing if any entry is malformed. | +| **How it fits the package** | Used by the interactive configuration builder (`createExceptionsConfiguration`) to store user‑provided exceptions for scaling tests. | + +#### 3️⃣ Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Reset config slice"} + B --> C["Iterate over input strings"] + C --> D{"Split by /"} + D -->|"valid"| E["Extract name & namespace"] + E --> F["Create SkipScalingTestDeploymentsInfo"] + F --> G["Append to config slice"] + D -->|"invalid"| H["Log error & exit"] + G --> I["Continue loop"] + I --> J{"Loop finished?"} + J -->|"yes"| K["End"] +``` + +#### 4️⃣ Function dependencies (Mermaid) + +```mermaid +graph TD + func_loadNonScalableDeployments --> strings.Split + func_loadNonScalableDeployments --> len + func_loadNonScalableDeployments --> log.Println + func_loadNonScalableDeployments --> append +``` + +#### 5️⃣ Functions calling `loadNonScalableDeployments` (Mermaid) + +```mermaid +graph TD + func_createExceptionsConfiguration --> func_loadNonScalableDeployments +``` + +#### 6️⃣ Usage example (Go) + +```go +// Minimal example invoking loadNonScalableDeployments +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config" +) + +func main() { + // Example input: deployments that should be skipped during scaling tests. + nonScalable := []string{ + "frontend/production", + "backend/testing", + } + + config.loadNonScalableDeployments(nonScalable) +} +``` + +--- + +### loadNonScalableStatefulSets + +**loadNonScalableStatefulSets** - Parses a slice of strings describing StatefulSets that should not be subjected to scaling tests and stores them in the global configuration. Each string is expected to contain `name/namespace`. + +#### Signature (Go) + +```go +func loadNonScalableStatefulSets(nonScalableStatefulSets []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses a slice of strings describing StatefulSets that should not be subjected to scaling tests and stores them in the global configuration. Each string is expected to contain `name/namespace`. | +| **Parameters** | `nonScalableStatefulSets []string` – list of identifiers in `"name/namespace"` format. | +| **Return value** | none (updates shared state). | +| **Key dependencies** | • `strings.Split` – split each entry on `/`.
• `len` – validate field count.
• `log.Println` – error logging for malformed entries.
• `append` – add parsed objects to the configuration slice. | +| **Side effects** | Mutates the global variable `certsuiteConfig.SkipScalingTestStatefulSets`, overwriting any previous value and appending new items. Logs a message if an entry cannot be parsed. | +| **How it fits the package** | Part of the interactive configuration flow; called from `createExceptionsConfiguration` when the user selects the “Non‑Scalable StatefulSets” option. It populates the set of StatefulSets that will bypass scaling tests during certsuite execution. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Reset SkipScalingTestStatefulSets"} + B --> C["Iterate over input slice"] + C --> D["Split entry on /"] + D --> E{"Check field count == 2"} + E -- No --> F["Log error & exit loop"] + E -- Yes --> G["Create SkipScalingTestStatefulSetsInfo"] + G --> H["Append to config.SkipScalingTestStatefulSets"] + H --> C + C --> I["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_loadNonScalableStatefulSets --> strings.Split + func_loadNonScalableStatefulSets --> log.Println + func_loadNonScalableStatefulSets --> append +``` + +#### Functions calling `loadNonScalableStatefulSets` (Mermaid) + +```mermaid +graph TD + func_createExceptionsConfiguration --> func_loadNonScalableStatefulSets +``` + +#### Usage example (Go) + +```go +// Minimal example invoking loadNonScalableStatefulSets +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config" +) + +func main() { + // Example entries: "my-app/default" and "db-prod/production" + inputs := []string{"my-app/default", "db-prod/production"} + config.loadNonScalableStatefulSets(inputs) +} +``` + +--- + +--- + +### loadOperatorLabels + +**loadOperatorLabels** - Stores a slice of operator label strings into the global configuration, replacing any existing list. + +#### Signature (Go) + +```go +func loadOperatorLabels(operatorLabels []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores a slice of operator label strings into the global configuration, replacing any existing list. | +| **Parameters** | `operatorLabels []string` – The new set of labels to apply to operators under test. | +| **Return value** | None (void). | +| **Key dependencies** | * `certsuiteConfig` – a package‑level variable holding the current configuration. | +| **Side effects** | Mutates the global `certsuiteConfig.OperatorsUnderTestLabels` field; no I/O or concurrency actions. | +| **How it fits the package** | Used by the interactive prompt workflow to update operator labels after user input, ensuring subsequent resource generation uses the latest values. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive slice of labels"] --> B["Clear existing config"] + B --> C["Assign new labels to certsuiteConfig.OperatorsUnderTestLabels"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `loadOperatorLabels` (Mermaid) + +```mermaid +graph TD + func_createCertSuiteResourcesConfiguration --> func_loadOperatorLabels +``` + +#### Usage example (Go) + +```go +// Minimal example invoking loadOperatorLabels +labels := []string{"app=nginx", "tier=backend"} +loadOperatorLabels(labels) +// certsuiteConfig.OperatorsUnderTestLabels now contains the provided labels +``` + +--- + +### loadPodLabels + +**loadPodLabels** - Stores a list of pod labels to be used as filters when selecting pods under test. It replaces any previously stored label set with the new slice. + +#### Signature (Go) + +```go +func loadPodLabels(podLabels []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores a list of pod labels to be used as filters when selecting pods under test. It replaces any previously stored label set with the new slice. | +| **Parameters** | `podLabels []string` – a slice containing pod label selectors (e.g., `"app=web"`, `"tier=frontend"`). | +| **Return value** | None. The function mutates global configuration state. | +| **Key dependencies** | • `certsuiteConfig.PodsUnderTestLabels` – the global configuration struct field that holds pod labels. | +| **Side effects** | Updates the global `certsuiteConfig.PodsUnderTestLabels` slice; no I/O or concurrency is involved. | +| **How it fits the package** | Part of the interactive CLI for building a certificate suite configuration; invoked when the user selects the “Pods” option in the resource picker. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Clear existing pod labels"] + B --> C["Assign new podLabels slice to config"] + C --> D["End"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `loadPodLabels` (Mermaid) + +```mermaid +graph TD + func_createCertSuiteResourcesConfiguration --> func_loadPodLabels +``` + +#### Usage example (Go) + +```go +// Minimal example invoking loadPodLabels +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config" +) + +func main() { + labels := []string{"app=web", "tier=frontend"} + config.LoadPodLabels(labels) // Note: the function is unexported; in real usage it is called from within the same package. +} +``` + +> **Note:** `loadPodLabels` is an unexported helper, so external packages cannot call it directly. It is used internally during the interactive configuration process. + +--- + +### loadProbeDaemonSetNamespace + +**loadProbeDaemonSetNamespace** - Persists the first element of a string slice as the namespace used by the Probe DaemonSet in the global configuration. + +```go +func loadProbeDaemonSetNamespace(namespace []string) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Persists the first element of a string slice as the namespace used by the Probe DaemonSet in the global configuration. | +| **Parameters** | `namespace []string` – list of namespaces provided by the user; only the first entry is considered. | +| **Return value** | None (void). | +| **Key dependencies** | Assigns to the exported variable `certsuiteConfig.ProbeDaemonSetNamespace`. | +| **Side effects** | Mutates the global configuration state; no I/O or concurrency operations occur. | +| **How it fits the package** | Part of an interactive CLI that gathers settings from the user and writes them into the shared configuration used to generate Kubernetes manifests. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Receive namespace slice"] -->|"set"| B["certsuiteConfig.ProbeDaemonSetNamespace = namespace["0"]"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `loadProbeDaemonSetNamespace` + +```mermaid +graph TD + func_createSettingsConfiguration --> func_loadProbeDaemonSetNamespace +``` + +#### Usage example (Go) + +```go +// Minimal example invoking loadProbeDaemonSetNamespace +func main() { + // Simulate user selecting a namespace + selected := []string{"my-namespace"} + loadProbeDaemonSetNamespace(selected) + + fmt.Println(certsuiteConfig.ProbeDaemonSetNamespace) // Output: my-namespace +} +``` + +--- + +### loadProtocolNames + +**loadProtocolNames** - Replaces the current set of valid protocol names in `certsuiteConfig` with a new list supplied by the caller. + +#### Signature (Go) + +```go +func loadProtocolNames(protocolNames []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Replaces the current set of valid protocol names in `certsuiteConfig` with a new list supplied by the caller. | +| **Parameters** | *protocolNames* []string – slice containing the protocol names to be accepted by the suite. | +| **Return value** | None. The function performs an assignment and has no return values. | +| **Key dependencies** | - `certsuiteConfig.ValidProtocolNames` (global configuration variable). | +| **Side effects** | Mutates the global configuration state; does not perform I/O or launch goroutines. | +| **How it fits the package** | Part of the interactive configuration builder that collects user‑supplied options and updates the shared configuration object used by subsequent generation logic. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive protocolNames slice"] --> B["Clear certsuiteConfig.ValidProtocolNames"] + B --> C["Assign new slice to certsuiteConfig.ValidProtocolNames"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `loadProtocolNames` (Mermaid) + +```mermaid +graph TD + func_createExceptionsConfiguration --> func_loadProtocolNames +``` + +#### Usage example (Go) + +```go +// Minimal example invoking loadProtocolNames +import ( + “fmt” +) + +func main() { + // Example list of protocol names to accept + protocols := []string{“http”, “https”, “tcp”} + + // Load them into the configuration + loadProtocolNames(protocols) + + // Verify that they were set + fmt.Println(certsuiteConfig.ValidProtocolNames) +} +``` + +--- + +### loadServices + +**loadServices** - Stores the supplied list of service names in the global configuration, resetting any previous ignore list. + +#### 1. Signature (Go) + +```go +func loadServices(services []string) +``` + +#### 2. Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores the supplied list of service names in the global configuration, resetting any previous ignore list. | +| **Parameters** | `services []string` – slice of service names to be ignored during generation. | +| **Return value** | None (void). | +| **Key dependencies** | - Direct assignment to `certsuiteConfig.ServicesIgnoreList`. | +| **Side effects** | Mutates the global `certsuiteConfig.ServicesIgnoreList` variable; no I/O or concurrency. | +| **How it fits the package** | Used by the interactive configuration flow to set which services should be excluded from certificate generation. | + +#### 3. Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph Load Services + A["Start"] --> B{"Reset Ignore List"} + B --> C["Assign New List"] + C --> D["End"] + end +``` + +#### 4. Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 5. Functions calling `loadServices` (Mermaid) + +```mermaid +graph TD + func_createExceptionsConfiguration --> func_loadServices +``` + +#### 6. Usage example (Go) + +```go +// Minimal example invoking loadServices +servicesToIgnore := []string{"kube-system", "metrics-server"} +loadServices(servicesToIgnore) +// certsuiteConfig.ServicesIgnoreList now contains the two services +``` + +--- + +### saveConfiguration + +**saveConfiguration** - Serialises the current `TestConfiguration`, prompts the user for an output file name, and writes the YAML to that file. + +#### Signature (Go) + +```go +func (*configuration.TestConfiguration)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Serialises the current `TestConfiguration`, prompts the user for an output file name, and writes the YAML to that file. | +| **Parameters** | `config *configuration.TestConfiguration` – the configuration instance to be saved. | +| **Return value** | None (the function performs side‑effects only). | +| **Key dependencies** | • `yaml.Marshal` – converts the struct to YAML
• `promptui.Prompt.Run` – obtains file name from user
• `os.WriteFile` – writes data to disk
• `log.Printf`, `fmt.Println`, `color.GreenString` – provide user feedback | +| **Side effects** | • Writes a YAML file to the filesystem.
• Prints status messages to standard output and error streams. | +| **How it fits the package** | Called from the interactive menu in `generateConfig`; allows users to persist their current configuration for later use or sharing. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Marshal config to YAML"] + B --> C{"Error?"} + C -- Yes --> D["Log error & exit"] + C -- No --> E["Prompt user for file name"] + E --> F{"Error?"} + F -- Yes --> G["Log prompt error & exit"] + F -- No --> H["Write YAML to file"] + H --> I{"Error?"} + I -- Yes --> J["Log write error & exit"] + I -- No --> K["Print success message"] + K --> L["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_saveConfiguration --> yaml.Marshal + func_saveConfiguration --> log.Printf + func_saveConfiguration --> promptui.Prompt.Run + func_saveConfiguration --> os.WriteFile + func_saveConfiguration --> fmt.Println + func_saveConfiguration --> color.GreenString +``` + +#### Functions calling `saveConfiguration` (Mermaid) + +```mermaid +graph TD + func_generateConfig --> func_saveConfiguration +``` + +#### Usage example (Go) + +```go +// Minimal example invoking saveConfiguration +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config" + configuration "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/configuration" +) + +func main() { + cfg := &configuration.TestConfiguration{ + // populate fields as needed + } + config.Save(cfg) // internally calls saveConfiguration via the public wrapper if available +} +``` + +--- + +### showConfiguration + +**showConfiguration** - Renders the current `TestConfiguration` as a pretty‑printed YAML string and writes it to standard output. + +#### Signature (Go) + +```go +func(*configuration.TestConfiguration)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Renders the current `TestConfiguration` as a pretty‑printed YAML string and writes it to standard output. | +| **Parameters** | ` *configuration.TestConfiguration` – pointer to the configuration instance to display. | +| **Return value** | None. | +| **Key dependencies** | • `yaml.Marshal` (gopkg.in/yaml.v2)
• `log.Printf` (standard library)
• `fmt.Println` (standard library)
• `string` conversion of YAML bytes | +| **Side effects** | Writes to stdout; logs an error if marshaling fails. No mutation of the input configuration. | +| **How it fits the package** | Used by the interactive CLI menu (`generateConfig`) to allow users to view the current configuration before saving or modifying it. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Marshal config"} + B -- success --> C["Convert bytes → string"] + C --> D["Print header"] + D --> E["Print YAML"] + E --> F["Print footer"] + B -- failure --> G["Log error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_showConfiguration --> yaml.Marshal + func_showConfiguration --> log.Printf + func_showConfiguration --> fmt.Println + func_showConfiguration --> string +``` + +#### Functions calling `showConfiguration` (Mermaid) + +```mermaid +graph TD + func_generateConfig --> func_showConfiguration +``` + +#### Usage example (Go) + +```go +// Minimal example invoking showConfiguration +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/config" + "gitlab.com/example/project/configuration" +) + +func main() { + cfg := &configuration.TestConfiguration{ + Name: "example", + Value: "demo", + } + config.showConfiguration(cfg) +} +``` + +--- diff --git a/docs/cmd/certsuite/generate/feedback/feedback.md b/docs/cmd/certsuite/generate/feedback/feedback.md new file mode 100644 index 000000000..75fef9f44 --- /dev/null +++ b/docs/cmd/certsuite/generate/feedback/feedback.md @@ -0,0 +1,200 @@ +# Package feedback + +**Path**: `cmd/certsuite/generate/feedback` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) +- [Local Functions](#local-functions) + - [runGenerateFeedbackJsFile](#rungeneratefeedbackjsfile) + +## Overview + +The feedback package provides a Cobra‑based CLI command that reads a JSON file containing user feedback and emits a JavaScript file suitable for inclusion in the CertSuite web UI. + +### Key Features + +- Creates a ‘generate‑feedback’ command with required flags for input and output paths +- Parses the JSON source, pretty‑prints it as JavaScript prefixed by `feedback=` +- Writes the resulting script to the specified directory while also printing it to stdout + +### Design Notes + +- The package relies on Cobra for flag handling and command structure; callers must invoke NewCommand to register it +- Error reporting is performed via fmt.Errorf, with fatal errors logged using log.Fatalf during option validation +- The runGenerateFeedbackJsFile function is intentionally unexported; it is used as the RunE handler of the command + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand() *cobra.Command](#newcommand) | Builds a CLI command named “generate‑feedback” that reads a JSON feedback source and writes a JavaScript file for use in the CertSuite web UI. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func runGenerateFeedbackJsFile(_ *cobra.Command, _ []string) error](#rungeneratefeedbackjsfile) | Reads a JSON feedback file, pretty‑prints its contents as JavaScript by prefixing with `feedback=`, writes the result to `feedback.js` in the output directory, and prints it to stdout. | + +## Exported Functions + +### NewCommand + +**NewCommand** - Builds a CLI command named “generate‑feedback” that reads a JSON feedback source and writes a JavaScript file for use in the CertSuite web UI. + +Creates and configures the Cobra command that generates a `feedback.js` file from a provided `feedback.json`. + +#### Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a CLI command named “generate‑feedback” that reads a JSON feedback source and writes a JavaScript file for use in the CertSuite web UI. | +| **Parameters** | None | +| **Return value** | A fully‑initialized `*cobra.Command` ready to be added to a parent command tree. | +| **Key dependencies** | • `github.com/spf13/cobra` (command creation, flag handling)
• `log.Fatalf` from the standard library for error reporting | +| **Side effects** | Registers required flags (`--feedback`, `-f`; `--outputPath`, `-o`) on the command and validates them; logs fatal errors if flag marking fails. No external I/O occurs at construction time. | +| **How it fits the package** | The function is the public entry point for the *feedback* sub‑command within the CertSuite generate CLI. It is invoked by the top‑level `generate.NewCommand` to compose the full command hierarchy. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Create cobra.Command"] --> B["Add --feedback flag"] + B --> C["Mark --feedback required"] + C --> D["Handle error with log.Fatalf"] + D --> E["Add --outputPath flag"] + E --> F["Mark --outputPath required"] + F --> G["Handle error with log.Fatalf"] + G --> H["Return configured command"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_StringVarP + func_NewCommand --> func_Flags + func_NewCommand --> func_MarkFlagRequired + func_NewCommand --> pkg_log.Fatalf +``` + +#### Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + func_generate.NewCommand --> func_feedback.NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/feedback" +) + +func main() { + cmd := feedback.NewCommand() + if err := cmd.Execute(); err != nil { + panic(err) + } +} +``` + +--- + +## Local Functions + +### runGenerateFeedbackJsFile + +**runGenerateFeedbackJsFile** - Reads a JSON feedback file, pretty‑prints its contents as JavaScript by prefixing with `feedback=`, writes the result to `feedback.js` in the output directory, and prints it to stdout. + +#### 1) Signature (Go) + +```go +func runGenerateFeedbackJsFile(_ *cobra.Command, _ []string) error +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads a JSON feedback file, pretty‑prints its contents as JavaScript by prefixing with `feedback=`, writes the result to `feedback.js` in the output directory, and prints it to stdout. | +| **Parameters** | `_ *cobra.Command` – command context (unused); `<- []string>` – command arguments (unused). | +| **Return value** | `error` – nil on success; otherwise an error describing any I/O or JSON processing failure. | +| **Key dependencies** | • `os.ReadFile`, `os.Create`, `file.WriteString` – file I/O
• `encoding/json.Unmarshal`, `encoding/json.MarshalIndent` – JSON parsing/formatting
• `fmt.Errorf`, `fmt.Println` – error handling and output
• `path/filepath.Join` – path construction | +| **Side effects** | • Creates or overwrites `/feedback.js`.
• Writes the JavaScript content to that file.
• Prints the same content to stdout. | +| **How it fits the package** | Part of the `generate/feedback` sub‑command; transforms stored JSON feedback into a form usable by client‑side scripts. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Read JSON file"] --> B{"Error?"} + B -- Yes --> C["Return fmt.Errorf"] + B -- No --> D["Unmarshal into map"] + D --> E{"Error?"} + E -- Yes --> F["Return fmt.Errorf"] + E -- No --> G["MarshalIndent to pretty JSON"] + G --> H{"Error?"} + H -- Yes --> I["Return fmt.Errorf"] + H -- No --> J["Join output path & create file"] + J --> K{"Error?"} + K -- Yes --> L["Return fmt.Errorf"] + K -- No --> M["Write feedback= + JSON string"] + M --> N{"Error?"} + N -- Yes --> O["Return fmt.Errorf"] + N -- No --> P["Print to stdout"] + P --> Q["Return nil"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_runGenerateFeedbackJsFile --> os_ReadFile + func_runGenerateFeedbackJsFile --> json_Unmarshal + func_runGenerateFeedbackJsFile --> json_MarshalIndent + func_runGenerateFeedbackJsFile --> filepath_Join + func_runGenerateFeedbackJsFile --> os_Create + func_runGenerateFeedbackJsFile --> fmt_Errorf + func_runGenerateFeedbackJsFile --> file_WriteString + func_runGenerateFeedbackJsFile --> fmt_Println +``` + +#### 5) Functions calling `runGenerateFeedbackJsFile` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking runGenerateFeedbackJsFile +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/feedback" + "github.com/spf13/cobra" +) + +func main() { + cmd := &cobra.Command{} + if err := feedback.runGenerateFeedbackJsFile(cmd, nil); err != nil { + panic(err) + } +} +``` + +--- + +--- diff --git a/docs/cmd/certsuite/generate/generate.md b/docs/cmd/certsuite/generate/generate.md new file mode 100644 index 000000000..939ea7042 --- /dev/null +++ b/docs/cmd/certsuite/generate/generate.md @@ -0,0 +1,104 @@ +# Package generate + +**Path**: `cmd/certsuite/generate` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) + +## Overview + +The generate package builds a top‑level Cobra command that groups all generation subcommands used by the certsuite CLI. + +### Key Features + +- Creates a single entry point for multiple generation tasks (catalog, feedback, config, qe_coverage). +- Uses Cobra to provide standard command-line flags and help output. +- Organises generation logic into separate packages for modularity. + +### Design Notes + +- Assumes internal use within the certsuite binary; no exported structs. +- Does not expose configuration beyond subcommand flags—limitations if external integration is needed. +- Best practice: call NewCommand from the root command builder to register all generate actions. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand() *cobra.Command](#newcommand) | Constructs and returns a Cobra command that aggregates all sub‑commands for the *generate* feature set. | + +## Exported Functions + +### NewCommand + +**NewCommand** - Constructs and returns a Cobra command that aggregates all sub‑commands for the *generate* feature set. + +#### Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Constructs and returns a Cobra command that aggregates all sub‑commands for the *generate* feature set. | +| **Parameters** | None | +| **Return value** | `*cobra.Command` – the root *generate* command ready to be added to the CLI tree. | +| **Key dependencies** | • `generate.AddCommand(catalog.NewCommand())`
• `generate.AddCommand(feedback.NewCommand())`
• `generate.AddCommand(config.NewCommand())`
• `generate.AddCommand(qecoverage.NewCommand())` | +| **Side effects** | Adds the sub‑commands to the local `generate` command instance. No external I/O or global state changes occur. | +| **How it fits the package** | Acts as the entry point for all *generate* related actions, wiring together catalog generation, feedback file creation, configuration exports, and QE coverage reporting. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["NewCommand"] --> B["catalog.NewCommand"] + A --> C["feedback.NewCommand"] + A --> D["config.NewCommand"] + A --> E["qecoverage.NewCommand"] + B -.-> F["generate.AddCommand"] + C -.-> F + D -.-> F + E -.-> F +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_AddCommand + func_NewCommand --> catalog_NewCommand + func_NewCommand --> feedback_NewCommand + func_NewCommand --> config_NewCommand + func_NewCommand --> qecoverage_NewCommand +``` + +#### Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + newRootCmd --> func_NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate" +) + +func main() { + generateCmd := generate.NewCommand() + // The returned command can now be added to a root command or executed directly. +} +``` + +--- diff --git a/docs/cmd/certsuite/generate/qe_coverage/qecoverage.md b/docs/cmd/certsuite/generate/qe_coverage/qecoverage.md new file mode 100644 index 000000000..4421149c4 --- /dev/null +++ b/docs/cmd/certsuite/generate/qe_coverage/qecoverage.md @@ -0,0 +1,388 @@ +# Package qecoverage + +**Path**: `cmd/certsuite/generate/qe_coverage` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [TestCoverageSummaryReport](#testcoveragesummaryreport) + - [TestSuiteQeCoverage](#testsuiteqecoverage) +- [Exported Functions](#exported-functions) + - [GetQeCoverage](#getqecoverage) + - [NewCommand](#newcommand) +- [Local Functions](#local-functions) + - [showQeCoverageForTestCaseName](#showqecoveragefortestcasename) + - [showQeCoverageSummaryReport](#showqecoveragesummaryreport) + +## Overview + +Provides a command‑line tool for reporting which test cases in a catalog are covered by QE (Quality Engineering) and summarises coverage statistics per suite and overall. + +### Key Features + +- Aggregates QE coverage across all catalog test cases into structured summary reports +- Outputs human‑readable coverage details for individual suites or the entire catalog +- Integrates with Cobra to expose a CLI command that accepts suite selection via flags + +### Design Notes + +- Coverage data is derived from claim identifiers; assumes each test case has a unique identifier and optional QE implementation flag +- The report generation sorts suites alphabetically for deterministic output, but does not handle missing coverage data gracefully – unknown entries are omitted +- Best practice: invoke `NewCommand()` during CLI initialization so the command appears under the generate sub‑command + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**TestCoverageSummaryReport**](#testcoveragesummaryreport) | One-line purpose | +| [**TestSuiteQeCoverage**](#testsuiteqecoverage) | One-line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GetQeCoverage(catalog map[claim.Identifier]claim.TestCaseDescription) TestCoverageSummaryReport](#getqecoverage) | Aggregates QE coverage metrics across all test cases in a catalog, producing per‑suite and overall statistics. | +| [func NewCommand() *cobra.Command](#newcommand) | Constructs and configures a `*cobra.Command` that reports which tests are not covered by QE for a specified suite. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func showQeCoverageForTestCaseName(suiteName string, qeCoverage TestCoverageSummaryReport)](#showqecoveragefortestcasename) | Outputs a human‑readable report detailing the number of test cases, overall QE coverage percentage, and any unimplemented tests for the specified suite. | +| [func showQeCoverageSummaryReport()](#showqecoveragesummaryreport) | Computes test‑suite level QE coverage from the catalog, orders suites alphabetically, and outputs a formatted summary to stdout. | + +## Structs + +### TestCoverageSummaryReport + +A summary of quality‑engineering (QE) coverage for a catalog of test cases. + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `CoverageByTestSuite` | `map[string]TestSuiteQeCoverage` | Mapping from each test suite name to its QE coverage details. Each entry contains the number of test cases, how many are QE‑covered, unimplemented ones, and the calculated percentage. | +| `TotalCoveragePercentage` | `float32` | Overall QE coverage across all suites, expressed as a percentage (0–100). Calculated as `multiplier * (totalTcsWithQe / totalTcs)`. | +| `TestCasesTotal` | `int` | Total number of test cases processed from the catalog. | +| `TestCasesWithQe` | `int` | Count of test cases that are marked as QE‑covered (`tcDescription.Qe == true`). | + +#### Purpose + +This struct aggregates coverage data after scanning a collection of test case descriptions. It enables downstream reporting and diagnostics by providing both per‑suite metrics and an overall summary, facilitating quick identification of suites lacking QE support. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetQeCoverage` | Builds a `TestCoverageSummaryReport` from a catalog of test cases, computing per‑suite and total coverage statistics. | +| `showQeCoverageForTestCaseName` | Prints human‑readable QE coverage information for a specific suite using the data stored in a `TestCoverageSummaryReport`. | + +--- + +--- + +### TestSuiteQeCoverage + +A container that holds metrics about the quality‑engine coverage of a test suite. + +| Field | Type | Description | +|-------|------|-------------| +| `TestCases` | `int` | Total number of test cases defined in the suite. | +| `TestCasesWithQe` | `int` | Number of those test cases that have been annotated or linked to quality‑engine (QE) data. | +| `Coverage` | `float32` | Proportion of QE‑linked test cases relative to the total, expressed as a decimal between 0 and 1. | +| `NotImplementedTestCases` | `[]string` | Slice containing identifiers of test cases that are present in the suite but lack any QE implementation or coverage data. | + +#### Purpose + +The `TestSuiteQeCoverage` struct aggregates coverage information for a single test suite. It is used to report how many tests have corresponding quality‑engine records, calculate overall coverage percentage, and list those tests that remain unimplemented from a QE perspective. + +#### Related functions (if any) + +| Function | Purpose | +|----------|---------| +| *None* | No specific functions are defined in the provided context. | + +--- + +--- + +## Exported Functions + +### GetQeCoverage + +**GetQeCoverage** - Aggregates QE coverage metrics across all test cases in a catalog, producing per‑suite and overall statistics. + +#### 1) Signature (Go) + +```go +func GetQeCoverage(catalog map[claim.Identifier]claim.TestCaseDescription) TestCoverageSummaryReport +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Aggregates QE coverage metrics across all test cases in a catalog, producing per‑suite and overall statistics. | +| **Parameters** | `catalog map[claim.Identifier]claim.TestCaseDescription` – mapping of claim identifiers to their descriptions (including whether the test case is QE‑enabled). | +| **Return value** | `TestCoverageSummaryReport` – contains coverage percentages per suite, total coverage, and counts of test cases with/without QE. | +| **Key dependencies** | • `append` (slice append)
• `float32` conversion
• `multiplier` constant (implicit in the package) | +| **Side effects** | None – purely functional; only reads input map and returns a new struct. | +| **How it fits the package** | Serves as the core calculation routine for generating QE coverage reports used by CLI utilities such as `showQeCoverageSummaryReport`. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate catalog"} + B --> C["Increment totalTcs"] + C --> D["Retrieve TestCaseDescription"] + D --> E["Get suite name"] + E --> F["Lookup or init TestSuiteQeCoverage"] + F --> G["Increment test case count"] + G --> H{"Is QE?"} + H -- Yes --> I["Increment TestCasesWithQe & totalTcsWithQe"] + H -- No --> J["Append to NotImplementedTestCases"] + I --> K + J --> K + K --> L["Compute suite coverage"] + L --> M["Store updated TestSuiteQeCoverage"] + M --> N{"Next catalog item?"} + N -- Yes --> B + N -- No --> O["Compute total coverage"] + O --> P["Return TestCoverageSummaryReport"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetQeCoverage --> func_append + func_GetQeCoverage --> func_float32 + func_GetQeCoverage --> func_float32 + func_GetQeCoverage --> func_float32 + func_GetQeCoverage --> func_float32 + func_GetQeCoverage --> func_float32 +``` + +#### 5) Functions calling `GetQeCoverage` (Mermaid) + +```mermaid +graph TD + func_showQeCoverageSummaryReport --> func_GetQeCoverage +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking GetQeCoverage +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/qe_coverage" + "github.com/redhat-best-practices-for-k8s/certsuite/claim" +) + +func main() { + // Assume identifiers.Catalog is a pre‑populated map[claim.Identifier]claim.TestCaseDescription + coverageReport := qe_coverage.GetQeCoverage(identifiers.Catalog) + + fmt.Printf("Total QE Coverage: %.2f%%\n", coverageReport.TotalCoveragePercentage) +} +``` + +--- + +### NewCommand + +**NewCommand** - Constructs and configures a `*cobra.Command` that reports which tests are not covered by QE for a specified suite. + +#### 1. Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### 2. Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Constructs and configures a `*cobra.Command` that reports which tests are not covered by QE for a specified suite. | +| **Parameters** | None | +| **Return value** | A pointer to the configured `cobra.Command`. | +| **Key dependencies** | *Calls `qeCoverageReportCmd.PersistentFlags().String(...)` to add a `--suitename` flag.
* Relies on the global `qeCoverageReportCmd` variable (defined elsewhere in the package). | +| **Side effects** | Mutates the command’s persistent flags; no external I/O or concurrency. | +| **How it fits the package** | Provides the entry point for the QE coverage sub‑command, which is added to the main generate command tree. | + +#### 3. Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["NewCommand"] --> B{"Add Persistent Flag"} + B --> C["--suitename string"] + C --> D["Return qeCoverageReportCmd"] +``` + +#### 4. Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_String + func_NewCommand --> func_PersistentFlags +``` + +#### 5. Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + func_github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/qe_coverage.NewCommand --> func_NewCommand +``` + +#### 6. Usage example (Go) + +```go +// Minimal example invoking NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/qe_coverage" +) + +func main() { + cmd := qe_coverage.NewCommand() + // cmd can now be added to a root command or executed directly. +} +``` + +--- + +## Local Functions + +### showQeCoverageForTestCaseName + +**showQeCoverageForTestCaseName** - Outputs a human‑readable report detailing the number of test cases, overall QE coverage percentage, and any unimplemented tests for the specified suite. + +```go +func showQeCoverageForTestCaseName(suiteName string, qeCoverage TestCoverageSummaryReport) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Outputs a human‑readable report detailing the number of test cases, overall QE coverage percentage, and any unimplemented tests for the specified suite. | +| **Parameters** | `suiteName string –` name of the test suite to report.
`qeCoverage TestCoverageSummaryReport –` aggregated coverage data that contains per‑suite statistics. | +| **Return value** | None (void). | +| **Key dependencies** | • `fmt.Println`, `fmt.Printf`
• `len` (builtin)
• `strings.Join` | +| **Side effects** | Writes formatted text to standard output; no state mutation beyond printing. | +| **How it fits the package** | Acts as a helper for the QE coverage generator, formatting and displaying results for individual test suites. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Retrieve suite data"} + B --> C["Print suite name"] + C --> D["Print totals & coverage"] + D --> E{"Any unimplemented tests?"} + E -- No --> F["Print success message"] + E -- Yes --> G["Join test names"] + G --> H["Print list of missing tests"] + F --> I["Newline"] + H --> I + I --> Z["End"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_showQeCoverageForTestCaseName --> fmt.Println + func_showQeCoverageForTestCaseName --> fmt.Printf + func_showQeCoverageForTestCaseName --> len + func_showQeCoverageForTestCaseName --> strings.Join +``` + +#### Functions calling `showQeCoverageForTestCaseName` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking showQeCoverageForTestCaseName +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/qe_coverage" +) + +func main() { + // Assume we have a populated TestCoverageSummaryReport named report. + var report qe_coverage.TestCoverageSummaryReport + // Populate report with data as required by the application. + + // Show coverage for a specific suite + qe_coverage.showQeCoverageForTestCaseName("MySuite", report) +} +``` + +--- + +### showQeCoverageSummaryReport + +**showQeCoverageSummaryReport** - Computes test‑suite level QE coverage from the catalog, orders suites alphabetically, and outputs a formatted summary to stdout. + +#### Signature (Go) + +```go +func showQeCoverageSummaryReport() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Computes test‑suite level QE coverage from the catalog, orders suites alphabetically, and outputs a formatted summary to stdout. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | • `GetQeCoverage` (from same package)
• `sort.Strings` (standard library)
• `fmt.Printf`, `fmt.Println` (standard library)
• `identifiers.Catalog` (global catalog of test cases) | +| **Side effects** | Writes formatted text to standard output; no state mutation. | +| **How it fits the package** | Helper that displays the QE coverage data generated by `GetQeCoverage`, used during command‑line execution of the certificate suite generator. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Retrieve QE Coverage"} + B -->|"Call GetQeCoverage"| C["qeCoverage"] + C --> D["Collect Suite Names"] + D --> E["Sort Suites"] + E --> F["Print Total Summary"] + F --> G["Iterate Suites"] + G --> H["Print Per‑Suite Details"] + H --> I["Print Newline"] + I --> J["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_showQeCoverageSummaryReport --> func_GetQeCoverage + func_showQeCoverageSummaryReport --> sort.Strings + func_showQeCoverageSummaryReport --> fmt.Printf + func_showQeCoverageSummaryReport --> fmt.Println +``` + +#### Functions calling `showQeCoverageSummaryReport` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking showQeCoverageSummaryReport +func main() { + // Assuming identifiers.Catalog has been populated elsewhere + showQeCoverageSummaryReport() +} +``` + +--- diff --git a/docs/cmd/certsuite/info/info.md b/docs/cmd/certsuite/info/info.md new file mode 100644 index 000000000..5640beca3 --- /dev/null +++ b/docs/cmd/certsuite/info/info.md @@ -0,0 +1,563 @@ +# Package info + +**Path**: `cmd/certsuite/info` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) +- [Local Functions](#local-functions) + - [adjustLineMaxWidth](#adjustlinemaxwidth) + - [getMatchingTestIDs](#getmatchingtestids) + - [getTestDescriptionsFromTestIDs](#gettestdescriptionsfromtestids) + - [printTestCaseInfoBox](#printtestcaseinfobox) + - [printTestList](#printtestlist) + - [showInfo](#showinfo) + +## Overview + +The `info` package supplies the ``certsuite info`` command that displays metadata about test cases from the internal catalog. It can list matching test IDs or render a colored information box for each case. + +### Key Features + +- Provides persistent flags to filter tests by label expression and toggle detailed display +- Automatically adapts terminal width to keep output boxes readable +- Formats output with colors, alignment, and word‑wrapping for clear presentation + +### Design Notes + +- Assumes the presence of an internal checks database; errors are returned if loading fails +- Limits line width based on current terminal size plus a configurable padding +- Best practice: invoke via ``certsuite info`` after ensuring the catalog is initialized + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand() *cobra.Command](#newcommand) | Builds and returns a configured `*cobra.Command` that displays information about test cases. It sets up persistent flags, enforces required flags, and handles error reporting. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func adjustLineMaxWidth()](#adjustlinemaxwidth) | Determines the current terminal width and, if it is smaller than the pre‑set maximum line length plus padding, reduces `lineMaxWidth` so that output boxes fit comfortably. | +| [func getMatchingTestIDs(labelExpr string) ([]string, error)](#getmatchingtestids) | Builds a list of internal test‑case IDs whose labels satisfy the supplied expression. | +| [func getTestDescriptionsFromTestIDs([]string) []claim.TestCaseDescription](#gettestdescriptionsfromtestids) | Looks up each provided test‑case ID in the global `identifiers.Catalog` and collects the corresponding `claim.TestCaseDescription` objects. | +| [func printTestCaseInfoBox(testCase *claim.TestCaseDescription)](#printtestcaseinfobox) | Renders a visually distinct box in the terminal containing the identifier, description, remediation, exceptions, and best‑practice reference of a test case. The output is colorized and wrapped to fit the current console width. | +| [func printTestList(testIDs []string)](#printtestlist) | Outputs a neatly bordered table listing each supplied test ID. | +| [func showInfo(cmd *cobra.Command, _ []string) error](#showinfo) | Retrieves test case identifiers that match a label expression, optionally prints a list of them, or shows a formatted information box for each matched test case. | + +## Exported Functions + +### NewCommand + +**NewCommand** - Builds and returns a configured `*cobra.Command` that displays information about test cases. It sets up persistent flags, enforces required flags, and handles error reporting. + +#### 1) Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds and returns a configured `*cobra.Command` that displays information about test cases. It sets up persistent flags, enforces required flags, and handles error reporting. | +| **Parameters** | None | +| **Return value** | A pointer to the constructed `cobra.Command`. Returns `nil` if flag registration fails. | +| **Key dependencies** | • `infoCmd.PersistentFlags().StringP`, `BoolP` – define flags
• `infoCmd.MarkPersistentFlagRequired` – makes “test-label” mandatory
• `fmt.Fprintf(os.Stderr, …)` – error output on failure | +| **Side effects** | Registers persistent command‑line flags and writes to standard error if flag marking fails. No other global state is modified. | +| **How it fits the package** | Provides the CLI entry point for the *info* sub‑command; used by `newRootCmd` to add the command to the root command tree. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Create infoCmd"] --> B["Add StringP flag “test-label”"] + B --> C["Add BoolP flag “list”"] + C --> D["MarkPersistentFlagRequired(test-label)"] + D --> E{"Error?"} + E -- Yes --> F["Print error to stderr"] + E -- No --> G["Return infoCmd"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_StringP + func_NewCommand --> func_PersistentFlags + func_NewCommand --> func_BoolP + func_NewCommand --> func_MarkPersistentFlagRequired + func_NewCommand --> fmt_Fprintf +``` + +#### 5) Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + func_newRootCmd --> func_NewCommand +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info" +) + +func main() { + cmd := info.NewCommand() + if cmd == nil { + return // handle error if needed + } + _ = cmd.Execute() // run the command (in real usage, part of a larger CLI) +} +``` + +--- + +## Local Functions + +### adjustLineMaxWidth + +**adjustLineMaxWidth** - Determines the current terminal width and, if it is smaller than the pre‑set maximum line length plus padding, reduces `lineMaxWidth` so that output boxes fit comfortably. + +#### Signature (Go) + +```go +func adjustLineMaxWidth() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines the current terminal width and, if it is smaller than the pre‑set maximum line length plus padding, reduces `lineMaxWidth` so that output boxes fit comfortably. | +| **Parameters** | None | +| **Return value** | None (void) | +| **Key dependencies** | *Calls*
• `golang.org/x/term.IsTerminal(0)` – checks if stdout is a terminal.
• `golang.org/x/term.GetSize(0)` – retrieves the current terminal width. | +| **Side effects** | Mutates the package‑level variable `lineMaxWidth` based on terminal size; may early‑return without changes if not a terminal or an error occurs. No I/O beyond querying terminal properties. | +| **How it fits the package** | Adjusts formatting parameters before printing test case information boxes in `showInfo`. Ensures that box content does not exceed the visible terminal width, improving readability on smaller terminals. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Is stdout a terminal?"} + B -- Yes --> C["Get terminal size"] + B -- No --> D["Exit early"] + C --> E{"Error retrieving size?"} + E -- Yes --> D + E -- No --> F{"Width < lineMaxWidth+linePadding?"} + F -- Yes --> G["Set lineMaxWidth = width - linePadding"] + F -- No --> H["No change"] + G --> I["End"] + H --> I +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_adjustLineMaxWidth --> term.IsTerminal + func_adjustLineMaxWidth --> term.GetSize +``` + +#### Functions calling `adjustLineMaxWidth` (Mermaid) + +```mermaid +graph TD + func_showInfo --> func_adjustLineMaxWidth +``` + +#### Usage example (Go) + +```go +// Minimal example invoking adjustLineMaxWidth +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info" +) + +func main() { + info.adjustLineMaxWidth() +} +``` + +--- + +### getMatchingTestIDs + +**getMatchingTestIDs** - Builds a list of internal test‑case IDs whose labels satisfy the supplied expression. + +#### Signature (Go) + +```go +func getMatchingTestIDs(labelExpr string) ([]string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a list of internal test‑case IDs whose labels satisfy the supplied expression. | +| **Parameters** | `labelExpr` – A label filter expressed as a string (e.g., `"all"`, `"security,network"`). | +| **Return value** | `[]string` – Matching test‑case identifiers; `error` if initialization or filtering fails. | +| **Key dependencies** | • `checksdb.InitLabelsExprEvaluator(labelExpr)`
• `certsuite.LoadInternalChecksDB()`
• `checksdb.FilterCheckIDs()` | +| **Side effects** | • Configures the global label evaluator used by `checksdb`.
• Loads all internal check definitions into memory. No I/O or concurrency beyond those functions. | +| **How it fits the package** | Used by command‑line utilities to filter and display test cases based on user‑supplied labels. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Initialize label evaluator"} + B -- success --> C["Load internal checks DB"] + C --> D{"Filter check IDs"} + D -- success --> E["Return test IDs"] + E --> F["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getMatchingTestIDs --> func_InitLabelsExprEvaluator + func_getMatchingTestIDs --> func_LoadInternalChecksDB + func_getMatchingTestIDs --> func_FilterCheckIDs +``` + +#### Functions calling `getMatchingTestIDs` (Mermaid) + +```mermaid +graph TD + func_showInfo --> func_getMatchingTestIDs +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getMatchingTestIDs +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info" +) + +func main() { + testIDs, err := info.getMatchingTestIDs("security") + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + for _, id := range testIDs { + fmt.Println(id) + } +} +``` + +--- + +--- + +### getTestDescriptionsFromTestIDs + +**getTestDescriptionsFromTestIDs** - Looks up each provided test‑case ID in the global `identifiers.Catalog` and collects the corresponding `claim.TestCaseDescription` objects. + +#### Signature (Go) + +```go +func getTestDescriptionsFromTestIDs([]string) []claim.TestCaseDescription +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Looks up each provided test‑case ID in the global `identifiers.Catalog` and collects the corresponding `claim.TestCaseDescription` objects. | +| **Parameters** | `testIDs []string – list of test case identifiers to resolve` | +| **Return value** | `[]claim.TestCaseDescription – ordered slice of descriptions matching the input IDs; empty if none found` | +| **Key dependencies** | • `identifiers.Catalog` (map of catalog entries)
• `append` function | +| **Side effects** | No state mutation or I/O. Purely functional. | +| **How it fits the package** | Supplies detailed test‑case information to higher‑level functions such as `showInfo`, enabling user‑friendly output and further processing. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over testIDs"} + B --> C{"For each ID, search identifiers.Catalog"} + C -->|"Match found"| D["Append description to result"] + C -->|"No match"| E["Continue next ID"] + D --> F["Return result slice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getTestDescriptionsFromTestIDs --> identifiers.Catalog + func_getTestDescriptionsFromTestIDs --> func_append +``` + +#### Functions calling `getTestDescriptionsFromTestID​s` (Mermaid) + +```mermaid +graph TD + func_showInfo --> func_getTestDescriptionsFromTestIDs +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getTestDescriptionsFromTestIDs +testIDs := []string{"TC001", "TC002"} +descriptions := getTestDescriptionsFromTestIDs(testIDs) + +for _, d := range descriptions { + fmt.Printf("ID: %s, Name: %s\n", d.ID, d.Name) +} +``` + +--- + +### printTestCaseInfoBox + +**printTestCaseInfoBox** - Renders a visually distinct box in the terminal containing the identifier, description, remediation, exceptions, and best‑practice reference of a test case. The output is colorized and wrapped to fit the current console width. + +#### Signature (Go) + +```go +func printTestCaseInfoBox(testCase *claim.TestCaseDescription) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Renders a visually distinct box in the terminal containing the identifier, description, remediation, exceptions, and best‑practice reference of a test case. The output is colorized and wrapped to fit the current console width. | +| **Parameters** | `testCase *claim.TestCaseDescription` – pointer to a struct that holds all textual fields for a test case (identifier, description, remediation, etc.). | +| **Return value** | None; prints directly to standard output. | +| **Key dependencies** | • `strings.Repeat` – creates border lines.
• `fmt.Println`, `fmt.Printf` – output functions.
• `cli.LineColor`, `cli.LineAlignCenter`, `cli.LineAlignLeft` – text styling and alignment helpers.
• `cli.WrapLines` – splits long paragraphs into width‑constrained slices. | +| **Side effects** | Writes to `os.Stdout`; relies on global variables `lineMaxWidth` and `linePadding`. No state mutation beyond console output. | +| **How it fits the package** | Part of the `info` command suite; called by `showInfo` to present detailed information about each selected test case in a user‑friendly format. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Create border string"] --> B["Print top border"] + B --> C["Print identifier line (centered, cyan)"] + C --> D["Print descriptor header (DESCRIPTION, green)"] + D --> E["Wrap and print description lines left‑aligned"] + E --> F["Print remediation header (REMEDIATION, green)"] + F --> G["Wrap and print remediation lines left‑aligned"] + G --> H["Print exceptions header (EXCEPTIONS, green)"] + H --> I["Wrap and print exception lines left‑aligned"] + I --> J["Print best‑practice header (BEST PRACTICES REFERENCE, green)"] + J --> K["Wrap and print best‑practice lines left‑aligned"] + K --> L["Print bottom border"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_printTestCaseInfoBox --> strings.Repeat + func_printTestCaseInfoBox --> fmt.Println + func_printTestCaseInfoBox --> fmt.Printf + func_printTestCaseInfoBox --> cli.LineColor + func_printTestCaseInfoBox --> cli.LineAlignCenter + func_printTestCaseInfoBox --> cli.LineAlignLeft + func_printTestCaseInfoBox --> cli.WrapLines +``` + +#### Functions calling `printTestCaseInfoBox` (Mermaid) + +```mermaid +graph TD + func_showInfo --> func_printTestCaseInfoBox +``` + +#### Usage example (Go) + +```go +// Minimal example invoking printTestCaseInfoBox +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/claim" +) + +func main() { + desc := &claim.TestCaseDescription{ + Identifier: claim.Identifier{Id: "TC-001"}, + Description: "This test verifies X.", + Remediation: "Ensure Y is configured.", + ExceptionProcess: "Exceptions apply when Z.", + BestPracticeReference: "See RFC 1234 for details.", + } + printTestCaseInfoBox(desc) +} +``` + +--- + +### printTestList + +**printTestList** - Outputs a neatly bordered table listing each supplied test ID. + +#### Signature (Go) + +```go +func printTestList(testIDs []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Outputs a neatly bordered table listing each supplied test ID. | +| **Parameters** | `testIDs []string` – slice of test case identifiers to display. | +| **Return value** | None (void). | +| **Key dependencies** | • `fmt.Println` – prints horizontal separators and the header/footer.
• `fmt.Printf` – formats each ID within the table width. | +| **Side effects** | Writes directly to standard output; no state mutation or concurrency. | +| **How it fits the package** | Utility helper used by `showInfo` when the user requests a list of available test cases (`--list`). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Print top border"] + B --> C["Print header row"] + C --> D["Print bottom border"] + D --> E{"For each testID"} + E -->|"Loop"| F["Print formatted ID line"] + F --> E + E --> G["Print closing border"] + G --> H["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_printTestList --> fmt_Println + func_printTestList --> fmt_Printf +``` + +#### Functions calling `printTestList` (Mermaid) + +```mermaid +graph TD + func_showInfo --> func_printTestList +``` + +#### Usage example (Go) + +```go +// Minimal example invoking printTestList +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/info" +) + +func main() { + testIDs := []string{"testA", "testB", "testC"} + info.printTestList(testIDs) +} +``` + +--- + +--- + +### showInfo + +**showInfo** - Retrieves test case identifiers that match a label expression, optionally prints a list of them, or shows a formatted information box for each matched test case. + +#### Signature (Go) + +```go +func showInfo(cmd *cobra.Command, _ []string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Retrieves test case identifiers that match a label expression, optionally prints a list of them, or shows a formatted information box for each matched test case. | +| **Parameters** | `cmd *cobra.Command` – command context; `_ []string` – positional arguments (ignored). | +| **Return value** | `error` – returned if any lookup or formatting step fails. | +| **Key dependencies** | • `cmd.Flags().GetString("test-label")`
• `cmd.Flags().GetBool("list")`
• `getMatchingTestIDs(labelExpr string)`
• `printTestList([]string)`
• `getTestDescriptionsFromTestIDs([]string)`
• `adjustLineMaxWidth()`
• `printTestCaseInfoBox(*claim.TestCaseDescription)` | +| **Side effects** | • Reads command flags.
• Writes formatted output to standard output.
• Calls helper functions that may access global state (e.g., `certsuite.LoadInternalChecksDB()`). | +| **How it fits the package** | Acts as the main execution routine for the `info` sub‑command of the CertSuite CLI, orchestrating user input parsing, data retrieval, and presentation. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Parse flags: test-label & list"] --> B{"Retrieve matching IDs"} + B --> C["getMatchingTestIDs"] + C --> D["Check error"] + D -->|"error"| E["Return error"] + D -->|"ok"| F["IDs found?"] + F -->|"no"| G["Return “no match” error"] + F -->|"yes"| H{"list flag set?"} + H -->|"yes"| I["printTestList"] + I --> J["Exit"] + H -->|"no"| K["getTestDescriptionsFromTestIDs"] + K --> L["Check length"] + L -->|"zero"| M["Return “no match” error"] + L -->|"non‑zero"| N["adjustLineMaxWidth"] + N --> O{"Print each test case"} + O --> P["printTestCaseInfoBox for each"] + P --> Q["Finish"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_showInfo --> func_getMatchingTestIDs + func_showInfo --> fmt_Errorf + func_showInfo --> printTestList + func_showInfo --> getTestDescriptionsFromTestIDs + func_showInfo --> adjustLineMaxWidth + func_showInfo --> printTestCaseInfoBox +``` + +#### Functions calling `showInfo` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking showInfo +package main + +import ( + "github.com/spf13/cobra" +) + +func main() { + cmd := &cobra.Command{} + // Assume flags are set up elsewhere + if err := showInfo(cmd, nil); err != nil { + fmt.Println("Error:", err) + } +} +``` + +--- diff --git a/docs/cmd/certsuite/main.md b/docs/cmd/certsuite/main.md new file mode 100644 index 000000000..abb924806 --- /dev/null +++ b/docs/cmd/certsuite/main.md @@ -0,0 +1,162 @@ +# Package main + +**Path**: `cmd/certsuite` + +## Table of Contents + +- [Overview](#overview) +- [Local Functions](#local-functions) + - [main](#main) + - [newRootCmd](#newrootcmd) + +## Overview + +Provides the entry point for the Certsuite CLI by creating a root cobra command and executing it. + +### Key Features + +- Initializes a cobra.Command that aggregates all sub‑commands (claim, generate, check, run, info, version, upload). +- Handles execution errors by logging via internal/log and exiting with non‑zero status. + +### Design Notes + +- Errors are surfaced through a logger before terminating to aid debugging. +- The root command is built lazily in newRootCmd to keep initialization logic separate from execution. + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func()()](#main) | Initializes the root command and starts the CLI execution loop, handling errors by logging them and exiting with a non‑zero status. | +| [func()(*cobra.Command)](#newrootcmd) | Builds and returns a *cobra.Command that serves as the entry point for the Certsuite CLI, wiring all sub‑commands together. | + +## Local Functions + +### main + +**main** - Initializes the root command and starts the CLI execution loop, handling errors by logging them and exiting with a non‑zero status. + +#### Signature (Go) + +```go +func()() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Initializes the root command and starts the CLI execution loop, handling errors by logging them and exiting with a non‑zero status. | +| **Parameters** | None | +| **Return value** | None (empty tuple) | +| **Key dependencies** | `newRootCmd() *cobra.Command` – builds the command tree.
`rootCmd.Execute()` – runs the CLI.
`log.Error(msg string, args ...any)` – logs any execution error.
`os.Exit(code int)` – terminates the process on failure. | +| **Side effects** | Creates a command hierarchy in memory; may write to standard output or error streams via Cobra and log package; exits the program if an error occurs. | +| **How it fits the package** | Serves as the program’s main function, orchestrating the startup sequence for the `certsuite` command line tool. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + func_main --> func_newRootCmd + func_main --> rootCmd_Execute + func_main --> log_Error + func_main --> os_Exit +``` + +#### Function dependencies (Mermaid) + +The `main` function calls the following functions: + +```mermaid +graph TD + func_main --> func_newRootCmd + func_main --> func_Execute + func_main --> func_Error + func_main --> func_Exit +``` + +#### Functions calling `main` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking main() +func main() { + // The program starts automatically when executed; no explicit call needed. +} +``` + +--- + +### newRootCmd + +**newRootCmd** - Builds and returns a *cobra.Command that serves as the entry point for the Certsuite CLI, wiring all sub‑commands together. + +#### Signature (Go) + +```go +func()(*cobra.Command) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds and returns a *cobra.Command that serves as the entry point for the Certsuite CLI, wiring all sub‑commands together. | +| **Parameters** | None | +| **Return value** | `*cobra.Command` – fully configured root command with child commands added. | +| **Key dependencies** | • Calls `AddCommand` on the root command to attach sub‑commands.
• Invokes `NewCommand()` from the following packages:
- `claim`
- `generate`
- `check`
- `run`
- `info`
- `version`
- `upload` | +| **Side effects** | No external I/O; only mutates the internal state of the root command by adding child commands. | +| **How it fits the package** | This function is used in `main()` to create the command tree that `cobra.Command.Execute()` will run, enabling the CLI functionality. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Create cobra.Command"] --> B["Set Use and Short"] + B --> C["Add claim sub‑command"] + C --> D["Add generate sub‑command"] + D --> E["Add check sub‑command"] + E --> F["Add run sub‑command"] + F --> G["Add info sub‑command"] + G --> H["Add version sub‑command"] + H --> I["Add upload sub‑command"] + I --> J["Return &rootCmd"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_newRootCmd --> cobra.AddCommand + func_newRootCmd --> claim.NewCommand + func_newRootCmd --> generate.NewCommand + func_newRootCmd --> check.NewCommand + func_newRootCmd --> run.NewCommand + func_newRootCmd --> info.NewCommand + func_newRootCmd --> version.NewCommand + func_newRootCmd --> upload.NewCommand +``` + +#### Functions calling `newRootCmd` + +```mermaid +graph TD + main --> func_newRootCmd +``` + +#### Usage example (Go) + +```go +// Minimal example invoking newRootCmd +func main() { + root := newRootCmd() + if err := root.Execute(); err != nil { + log.Fatal(err) + } +} +``` + +--- diff --git a/docs/cmd/certsuite/pkg/claim/claim.md b/docs/cmd/certsuite/pkg/claim/claim.md new file mode 100644 index 000000000..1fcbc79ef --- /dev/null +++ b/docs/cmd/certsuite/pkg/claim/claim.md @@ -0,0 +1,321 @@ +# Package claim + +**Path**: `cmd/certsuite/pkg/claim` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [Configurations](#configurations) + - [Nodes](#nodes) + - [Schema](#schema) + - [TestCaseID](#testcaseid) + - [TestCaseRawResult](#testcaserawresult) + - [TestCaseResult](#testcaseresult) + - [TestOperator](#testoperator) +- [Exported Functions](#exported-functions) + - [CheckVersion](#checkversion) + - [Parse](#parse) + +## Overview + +Failed to parse JSON response, but content appears to contain package information. + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**Configurations**](#configurations) | Struct definition | +| [**Nodes**](#nodes) | One-line purpose | +| [**Schema**](#schema) | One‑line purpose | +| [**TestCaseID**](#testcaseid) | One-line purpose | +| [**TestCaseRawResult**](#testcaserawresult) | Struct definition | +| [**TestCaseResult**](#testcaseresult) | Struct definition | +| [**TestOperator**](#testoperator) | Struct definition | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func CheckVersion(version string) error](#checkversion) | Ensures that the supplied claim file version matches the single supported semantic‑version used by the application. | +| [func Parse(filePath string) (*Schema, error)](#parse) | Reads the specified file, unmarshals its JSON content into a `Schema` struct and returns it. | + +## Structs + +### Configurations + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Config` | `interface{}` | Field documentation | +| `AbnormalEvents` | `[]interface{}` | Field documentation | +| `TestOperators` | `[]TestOperator` | Field documentation | + +--- + +### Nodes + +A container for collected information about Kubernetes nodes. + +#### Fields + +| Field | Type | Description | +|-------------|-----------|-------------| +| NodesSummary | interface{} | Summary data about the node (e.g., status, resource usage). Populated from JSON field `nodeSummary`. | +| CniNetworks | interface{} | Information on CNI plugins installed on the node. Corresponds to JSON field `cniPlugins`. | +| NodesHwInfo | interface{} | Hardware details of the node such as CPU and memory specifications. Maps to JSON field `nodesHwInfo`. | +| CsiDriver | interface{} | Details about CSI drivers present on the node, linked with JSON field `csiDriver`. | + +#### Purpose + +The `Nodes` struct aggregates diverse data collected from a Kubernetes cluster’s nodes. Each field holds opaque data (represented as `interface{}`) that can be marshaled to or unmarshaled from JSON. This structure is used when reporting node‑level metrics, validating infrastructure compliance, or passing node information between components of the certsuite tool. + +#### Related functions + +No methods are defined directly on this struct. It is primarily a data holder used by other packages in the `claim` module. + +--- + +### Schema + +Represents the top‑level structure of a claim file, containing configurations, node information, test results and version metadata. + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| Claim | struct{Configurations; Nodes Nodes; Results TestSuiteResults; Versions officialClaimScheme.Versions} | Encapsulates the core data of a claim. `Configurations` holds runtime settings; `Nodes` lists target nodes; `Results` stores test outcomes; `Versions` records schema and component versions. | + +#### Purpose + +The `Schema` struct models the JSON payload expected by the certsuite tool when loading or generating claim files. It groups all relevant information under a single `claim` key, enabling straightforward marshaling/unmarshaling with Go’s `encoding/json`. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| Parse | Reads a file from disk, unmarshals its JSON contents into a `Schema`, and returns the populated struct or an error. | + +--- + +### TestCaseID + +Represents a unique identifier for an individual test case within a certification suite, including its suite association and any tagging information. + +#### Fields + +| Field | Type | Description | +|-------|--------|-------------| +| `ID` | string | Unique test‑case identifier (e.g., `"TC001"`). | +| `Suite` | string | Name of the certification suite that contains this test case. | +| `Tags` | string | Comma‑separated list of tags applied to the test case for filtering or categorization purposes. | + +#### Purpose + +The `TestCaseID` struct encapsulates metadata required to reference and locate a specific test case across different suites and tooling contexts. It is typically used when generating reports, mapping results back to source tests, or applying filters based on suite membership or tags. + +#### Related functions (none) + +--- + +### TestCaseRawResult + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Name` | `string` | Field documentation | +| `Status` | `string` | Field documentation | + +--- + +### TestCaseResult + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `CapturedTestOutput` | `string` | Field documentation | +| `Duration` | `int` | Field documentation | +| `FailureLineContent` | `string` | Field documentation | +| `SkipReason` | `string` | Field documentation | +| `TestID` | `struct{ID string; Suite string; Tags string}` | Field documentation | +| `CatalogInfo` | `struct{BestPracticeReference string; Description string; ExceptionProcess string; Remediation string}` | Field documentation | +| `CategoryClassification` | `map[string]string` | Field documentation | +| `EndTime` | `string` | Field documentation | +| `FailureLocation` | `string` | Field documentation | +| `CheckDetails` | `string` | Field documentation | +| `StartTime` | `string` | Field documentation | +| `State` | `string` | Field documentation | + +--- + +### TestOperator + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Namespace` | `string` | Field documentation | +| `Version` | `string` | Field documentation | +| `Name` | `string` | Field documentation | + +--- + +## Exported Functions + +### CheckVersion + +**CheckVersion** - Ensures that the supplied claim file version matches the single supported semantic‑version used by the application. + +#### 1) Signature (Go) + +```go +func CheckVersion(version string) error +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that the supplied claim file version matches the single supported semantic‑version used by the application. | +| **Parameters** | `version` (string) – The version string extracted from a claim file. | +| **Return value** | `error` – Nil if the version is valid and supported; otherwise an error describing the mismatch or parsing failure. | +| **Key dependencies** | • `semver.NewVersion` (github.com/Masterminds/semver)
• `fmt.Errorf` (standard library) | +| **Side effects** | None – purely functional. | +| **How it fits the package** | Acts as a guard for all claim‑processing routines, preventing downstream logic from operating on unsupported formats. | + +#### 3) Internal workflow + +```mermaid +flowchart TD + A["Parse input string"] --> B{"Is valid semver?"} + B -- no --> C["Return error claim file version is not valid"] + B -- yes --> D["Create supportedSemVersion"] + D --> E{"Matches supported?"} + E -- no --> F["Return error claim format version is not supported"] + E -- yes --> G["Return nil"] +``` + +#### 4) Function dependencies + +```mermaid +graph TD + func_CheckVersion --> func_NewVersion + func_CheckVersion --> fmt_Errorf + func_CheckVersion --> func_Compare +``` + +#### 5) Functions calling `CheckVersion` + +```mermaid +graph TD + func_dumpCsv --> func_CheckVersion + func_showFailures --> func_CheckVersion +``` + +#### 6) Usage example + +```go +// Minimal example invoking CheckVersion +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim" +) + +func main() { + if err := claim.CheckVersion("v1.0.0"); err != nil { + fmt.Printf("Unsupported claim format: %v\n", err) + return + } + fmt.Println("Claim version is supported.") +} +``` + +--- + +--- + +### Parse + +**Parse** - Reads the specified file, unmarshals its JSON content into a `Schema` struct and returns it. + +Parses a JSON claim file into an in‑memory `Schema`. + +```go +func Parse(filePath string) (*Schema, error) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads the specified file, unmarshals its JSON content into a `Schema` struct and returns it. | +| **Parameters** | `filePath string – path to the claim file` | +| **Return value** | `(*Schema, error) – parsed schema or an error describing what went wrong` | +| **Key dependencies** | • `os.ReadFile`
• `encoding/json.Unmarshal`
• `fmt.Errorf` (twice for different failure contexts) | +| **Side effects** | None beyond reading the file; no global state is mutated. | +| **How it fits the package** | Core helper used by various command‑line subcommands to load claim data before further processing or validation. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Read file bytes from `filePath`"] --> B["Unmarshal JSON into Schema"] + B --> C{"Success?"} + C -- yes --> D["Return &Schema, nil"] + C -- no --> E["Return nil, error via fmt.Errorf"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Parse --> os_ReadFile + func_Parse --> encoding_json_Unmarshal + func_Parse --> fmt_Errorf +``` + +#### Functions calling `Parse` + +```mermaid +graph TD + func_dumpCsv --> func_Parse + func_showFailures --> func_Parse +``` + +#### Usage example + +```go +// Minimal example invoking Parse +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/pkg/claim" +) + +func main() { + schema, err := claim.Parse("path/to/claim.json") + if err != nil { + fmt.Printf("Failed to parse claim: %v\n", err) + return + } + fmt.Printf("Parsed claim: %+v\n", schema) +} +``` + +--- diff --git a/docs/cmd/certsuite/run/run.md b/docs/cmd/certsuite/run/run.md new file mode 100644 index 000000000..0c73215a4 --- /dev/null +++ b/docs/cmd/certsuite/run/run.md @@ -0,0 +1,287 @@ +# Package run + +**Path**: `cmd/certsuite/run` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) +- [Local Functions](#local-functions) + - [initTestParamsFromFlags](#inittestparamsfromflags) + - [runTestSuite](#runtestsuite) + +## Overview + +The package implements the `run` sub‑command of certsuite. It parses command‑line flags, prepares test parameters, optionally starts an HTTP server for remote execution, and runs the certification tests locally. + +### Key Features + +- Configures a cobra.Command with persistent flags for output directory, timeout, and various test options +- Initializes shared TestParameters from flag values and ensures output directories exist +- Supports both stand‑alone test execution and server mode via webserver.StartServer + +### Design Notes + +- Relies on cobra’s PersistentFlags to share options across subcommands; errors during init abort the run with a fatal log +- Test parameters are stored in a singleton accessed by configuration.GetTestParameters, which may be shared across packages +- When running in server mode the command starts an HTTP listener and blocks until shutdown, otherwise it runs tests synchronously + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand() *cobra.Command](#newcommand) | Creates and configures a `cobra.Command` for the `run` sub‑command, adding all required persistent flags. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func initTestParamsFromFlags(cmd *cobra.Command) error](#inittestparamsfromflags) | Reads CLI flag values into the shared `TestParameters` structure and prepares the output directory and timeout value. | +| [func runTestSuite(cmd *cobra.Command, _ []string) error](#runtestsuite) | Initializes test parameters from CLI flags and runs the suite either in server mode or as a stand‑alone process. | + +## Exported Functions + +### NewCommand + +**NewCommand** - Creates and configures a `cobra.Command` for the `run` sub‑command, adding all required persistent flags. + +#### Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates and configures a `cobra.Command` for the `run` sub‑command, adding all required persistent flags. | +| **Parameters** | none | +| **Return value** | A pointer to the configured `*cobra.Command`. | +| **Key dependencies** | Calls various flag‑registration helpers on `runCmd.PersistentFlags()` such as `StringP`, `Bool`, and `String`. | +| **Side effects** | Mutates the global `runCmd` by attaching flags; returns a command object used in the CLI tree. | +| **How it fits the package** | Provides the executable entry point for running tests, exposing configuration options to users of the certsuite CLI. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Configure Flags"} + B --> C["StringP: output-dir"] + B --> D["StringP: label-filter"] + B --> E["String: timeout"] + B --> F["StringP: config-file"] + B --> G["StringP: kubeconfig"] + B --> H["Bool: server-mode"] + B --> I["Bool: omit-artifacts-zip-file"] + B --> J["String: log-level"] + B --> K["String: offline-db"] + B --> L["String: preflight-dockerconfig"] + B --> M["Bool: intrusive"] + B --> N["Bool: allow-preflight-insecure"] + B --> O["Bool: include-web-files"] + B --> P["Bool: enable-data-collection"] + B --> Q["Bool: create-xml-junit-file"] + B --> R["String: certsuite-probe-image"] + B --> S["String: daemonset-cpu-req"] + B --> T["String: daemonset-cpu-lim"] + B --> U["String: daemonset-mem-req"] + B --> V["String: daemonset-mem-lim"] + B --> W["Bool: sanitize-claim"] + B --> X["Bool: allow-non-running"] + B --> Y["String: connect-api-key"] + B --> Z["String: connect-project-id"] + B --> AA["String: connect-api-base-url"] + B --> AB["String: connect-api-proxy-url"] + B --> AC["String: connect-api-proxy-port"] + AC --> D["Return runCmd"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_StringP + func_NewCommand --> func_PersistentFlags + func_NewCommand --> func_Bool + func_NewCommand --> func_String +``` + +#### Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + func_newRootCmd --> func_NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand to obtain the command tree. +cmd := run.NewCommand() +if err := cmd.Execute(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) +} +``` + +--- + +## Local Functions + +### initTestParamsFromFlags + +**initTestParamsFromFlags** - Reads CLI flag values into the shared `TestParameters` structure and prepares the output directory and timeout value. + +#### Signature (Go) + +```go +func initTestParamsFromFlags(cmd *cobra.Command) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads CLI flag values into the shared `TestParameters` structure and prepares the output directory and timeout value. | +| **Parameters** | `cmd *cobra.Command` – Cobra command from which flags are read. | +| **Return value** | `error` – non‑nil if a required operation (e.g., creating the output folder or parsing the timeout) fails. | +| **Key dependencies** | • `configuration.GetTestParameters()`
• `cmd.Flags().GetString`, `GetBool`
• `os.Stat`, `os.MkdirAll`
• `fmt.Errorf`, `fmt.Fprintf`
• `time.ParseDuration` | +| **Side effects** | Modifies the global test‑parameter instance; creates a directory if missing; writes error messages to standard error when timeout parsing fails. | +| **How it fits the package** | Called by `runTestSuite` during command execution to set up runtime configuration before launching either the server or standalone test runner. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Get shared TestParameters"] + B --> C{"Read flags"} + C --> D["Set OutputDir, LabelsFilter, ServerMode, …"] + D --> E{"Check output directory"} + E -- exists --> F{"Parse timeout flag"} + E -- missing --> G["Create directory with 0755 perms"] + G --> F + F --> H{"Timeout parse success?"} + H -- yes --> I["Set testParams.Timeout"] + H -- no --> J["Print error to stderr, use default"] + J --> I + I --> K["Return nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_initTestParamsFromFlags --> func_GetTestParameters + func_initTestParamsFromFlags --> func_GetString + func_initTestParamsFromFlags --> func_GetBool + func_initTestParamsFromFlags --> func_Stat + func_initTestParamsFromFlags --> func_MkdirAll + func_initTestParamsFromFlags --> func_Errorf + func_initTestParamsFromFlags --> func_ParseDuration + func_initTestParamsFromFlags --> func_Fprintf +``` + +#### Functions calling `initTestParamsFromFlags` (Mermaid) + +```mermaid +graph TD + func_runTestSuite --> func_initTestParamsFromFlags +``` + +#### Usage example (Go) + +```go +// Minimal example invoking initTestParamsFromFlags +package main + +import ( + "github.com/spf13/cobra" + cmd "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/run" +) + +func main() { + root := &cobra.Command{Use: "certsuite"} + if err := cmd.initTestParamsFromFlags(root); err != nil { + panic(err) + } +} +``` + +--- + +--- + +### runTestSuite + +**runTestSuite** - Initializes test parameters from CLI flags and runs the suite either in server mode or as a stand‑alone process. + +#### Signature (Go) + +```go +func runTestSuite(cmd *cobra.Command, _ []string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Initializes test parameters from CLI flags and runs the suite either in server mode or as a stand‑alone process. | +| **Parameters** | `cmd *cobra.Command` – command instance containing flag values; `_ []string` – unused arguments (ignored). | +| **Return value** | `error` – always nil; fatal errors are handled via `log.Fatal`. | +| **Key dependencies** | - `initTestParamsFromFlags(cmd)`
- `configuration.GetTestParameters()`
- `webserver.StartServer(outputDir)`
- `certsuite.Startup()`, `certsuite.Shutdown()`, `certsuite.Run(labelsFilter, outputFolder)` | +| **Side effects** | • Parses and stores flags into global test parameters.
• Starts an HTTP server if `ServerMode` is true.
• Invokes the core run logic which performs checks, generates reports, and may interact with external APIs.
• Logs information and exits on fatal errors. | +| **How it fits the package** | Entry point for the `certsuite run` command; orchestrates configuration, mode selection, and execution flow. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["initTestParamsFromFlags"] + B -- success --> C{"ServerMode?"} + C -- true --> D["webserver.StartServer"] + C -- false --> E["certsuite.Startup"] + E --> F["certsuite.Run"] + F --> G["certsuite.Shutdown (deferred)"] + G --> H["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_runTestSuite --> initTestParamsFromFlags + func_runTestSuite --> configuration.GetTestParameters + func_runTestSuite --> webserver.StartServer + func_runTestSuite --> certsuite.Startup + func_runTestSuite --> certsuite.Shutdown + func_runTestSuite --> certsuite.Run +``` + +#### Functions calling `runTestSuite` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking runTestSuite +package main + +import ( + "github.com/spf13/cobra" +) + +func main() { + cmd := &cobra.Command{} + // Flags would be added here in real usage. + if err := runTestSuite(cmd, nil); err != nil { + panic(err) + } +} +``` + +--- + +--- diff --git a/docs/cmd/certsuite/upload/results_spreadsheet/resultsspreadsheet.md b/docs/cmd/certsuite/upload/results_spreadsheet/resultsspreadsheet.md new file mode 100644 index 000000000..e933ec179 --- /dev/null +++ b/docs/cmd/certsuite/upload/results_spreadsheet/resultsspreadsheet.md @@ -0,0 +1,1497 @@ +# Package resultsspreadsheet + +**Path**: `cmd/certsuite/upload/results_spreadsheet` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [CreateSheetsAndDriveServices](#createsheetsanddriveservices) + - [GetHeaderIndicesByColumnNames](#getheaderindicesbycolumnnames) + - [GetHeadersFromSheet](#getheadersfromsheet) + - [GetHeadersFromValueRange](#getheadersfromvaluerange) + - [GetSheetIDByName](#getsheetidbyname) + - [MoveSpreadSheetToFolder](#movespreadsheettofolder) + - [NewCommand](#newcommand) +- [Local Functions](#local-functions) + - [addBasicFilterToSpreadSheet](#addbasicfiltertospreadsheet) + - [addDescendingSortFilterToSheet](#adddescendingsortfiltertosheet) + - [addFilterByFailedAndMandatoryToSheet](#addfilterbyfailedandmandatorytosheet) + - [createConclusionsSheet](#createconclusionssheet) + - [createDriveFolder](#createdrivefolder) + - [createRawResultsSheet](#createrawresultssheet) + - [createSingleWorkloadRawResultsSheet](#createsingleworkloadrawresultssheet) + - [createSingleWorkloadRawResultsSpreadSheet](#createsingleworkloadrawresultsspreadsheet) + - [extractFolderIDFromURL](#extractfolderidfromurl) + - [generateResultsSpreadSheet](#generateresultsspreadsheet) + - [prepareRecordsForSpreadSheet](#preparerecordsforspreadsheet) + - [readCSV](#readcsv) + +## Overview + +Creates a Google Sheets spreadsheet that aggregates raw test results and derives high‑level conclusions for each workload, then uploads the sheet to a specified Google Drive folder. + +### Key Features + +- Transforms CSV test output into a formatted Google Sheet with headers and truncation logic +- Automates filtering and sorting (e.g., failed/mandatory tests, descending order) on every tab +- Generates per‑workload result sheets and a consolidated conclusions sheet, then moves the file to a Drive folder + +### Design Notes + +- Uses Sheets API v4 and Drive API v3 with OAuth credentials supplied via a JSON key file +- All filtering and sorting are applied through batch update requests for efficiency +- The package assumes the CSV follows a fixed schema; missing columns trigger an error + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func CreateSheetsAndDriveServices(credentials string) (*sheets.Service, *drive.Service, error)](#createsheetsanddriveservices) | Instantiates authenticated Google Sheets and Google Drive service clients using the supplied credentials file. | +| [func GetHeaderIndicesByColumnNames(headers, names []string) ([]int, error)](#getheaderindicesbycolumnnames) | Returns the zero‑based positions of each requested column name within a slice of header strings. If any name is absent, an error is returned. | +| [func GetHeadersFromSheet(sheet *sheets.Sheet) []string](#getheadersfromsheet) | Returns a slice containing the string values of all cells in the first row (header row) of the supplied sheet. | +| [func GetHeadersFromValueRange(sheetsValues *sheets.ValueRange) []string](#getheadersfromvaluerange) | Returns the column header names from the first row of a Google Sheets value range, converting each cell to its string representation. | +| [func GetSheetIDByName(spreadsheet *sheets.Spreadsheet, name string) (int64, error)](#getsheetidbyname) | Looks up a sheet’s internal ID within a Google Sheets spreadsheet using the sheet’s title. | +| [func MoveSpreadSheetToFolder(srv *drive.Service, folder *drive.File, spreadsheet *sheets.Spreadsheet) error](#movespreadsheettofolder) | Relocates an existing spreadsheet to a target Drive folder by updating its parent references. | +| [func NewCommand() *cobra.Command](#newcommand) | Builds and configures the `uploadResultSpreadSheetCmd` Cobra command, defining its flags and marking required ones. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func addBasicFilterToSpreadSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet) error](#addbasicfiltertospreadsheet) | Iterates over all sheets within the provided `spreadsheet` and sets a basic filter covering each sheet’s full range. The function ensures that every tab has filtering enabled, enabling later sort or filter operations. | +| [func addDescendingSortFilterToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName, colName string) error](#adddescendingsortfiltertosheet) | Sorts a specified column (`colName`) in descending order on the given sheet (`sheetName`) of a Google Sheets document. | +| [func addFilterByFailedAndMandatoryToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName string) error](#addfilterbyfailedandmandatorytosheet) | Applies a basic filter to the specified sheet so that only rows where the *State* column equals `"failed"` and the *Mandatory/Optional* column equals `"Mandatory"` are displayed. | +| [func createConclusionsSheet( sheetsService *sheets.Service, driveService *drive.Service, rawResultsSheet *sheets.Sheet, mainResultsFolderID string, ) (*sheets.Sheet, error)](#createconclusionssheet) | Builds a new Google Sheets tab that lists each unique workload from the raw results, including category, version, OCP version, and a hyperlink to a dedicated results spreadsheet. | +| [func createDriveFolder(srv *drive.Service, folderName, parentFolderID string) (*drive.File, error)](#createdrivefolder) | Creates a sub‑folder under the specified `parentFolderID` with the name `folderName`. If a folder of that name already exists in the parent, an error is returned. | +| [func createRawResultsSheet(fp string) (*sheets.Sheet, error)](#createrawresultssheet) | Reads a CSV file and converts its contents into a `*sheets.Sheet` suitable for inclusion in a Google Spreadsheet. | +| [func createSingleWorkloadRawResultsSheet(rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Sheet, error)](#createsingleworkloadrawresultssheet) | Builds a new sheet containing only the rows that belong to `workloadName`. The resulting sheet keeps all original columns from `rawResultsSheet` and prefixes two extra columns: “Owner/TechLead Conclusion” and “Next Step Actions”. | +| [func createSingleWorkloadRawResultsSpreadSheet(sheetService *sheets.Service, driveService *drive.Service, folder *drive.File, rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Spreadsheet, error)](#createsingleworkloadrawresultsspreadsheet) | Builds a new Google Sheets file containing only the rows that belong to `workloadName`, adds two extra columns for conclusions and next‑step AI, applies filtering on failed/mandatory tests, moves the sheet into the specified Drive folder, and returns the created spreadsheet. | +| [func extractFolderIDFromURL(u string) (string, error)](#extractfolderidfromurl) | Parses a Google Drive shareable URL and returns the last path segment, which is the folder ID. | +| [func generateResultsSpreadSheet()](#generateresultsspreadsheet) | Orchestrates the creation of a spreadsheet that stores raw test output and derived conclusions, uploads it to Google Drive, and applies filtering/sorting. | +| [func prepareRecordsForSpreadSheet(records [][]string) []*sheets.RowData](#preparerecordsforspreadsheet) | Transforms a 2‑D slice of string values (`records`) into a slice of `*sheets.RowData` suitable for populating a Google Sheets spreadsheet. It normalises cell content by truncating overly long strings, replacing line breaks with spaces, and ensuring empty cells contain a single space to avoid layout issues. | +| [func readCSV(string) ([][]string, error)](#readcsv) | Loads the contents of a CSV file located at the given path and returns all rows as a two‑dimensional slice of strings. | + +## Exported Functions + +### CreateSheetsAndDriveServices + +**CreateSheetsAndDriveServices** - Instantiates authenticated Google Sheets and Google Drive service clients using the supplied credentials file. + +```go +func CreateSheetsAndDriveServices(credentials string) (*sheets.Service, *drive.Service, error) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates authenticated Google Sheets and Google Drive service clients using the supplied credentials file. | +| **Parameters** | `credentials string` – Path to a JSON credentials file for a Google Cloud Service Account. | +| **Return value** | `<*sheets.Service, *drive.Service, error>` – The Sheets client, the Drive client, and an error if creation fails. | +| **Key dependencies** | • `context.TODO()`
• `sheets.NewService` with `option.WithCredentialsFile(credentials)`
• `drive.NewService` with `option.WithCredentialsFile(credentials)`
• `fmt.Errorf` for error wrapping | +| **Side effects** | None; the function only creates and returns service objects. | +| **How it fits the package** | Provides a single entry point to obtain authenticated clients needed for generating test‑result spreadsheets in the *resultsspreadsheet* package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Create context with TODO()"] + B --> C["Instantiate Sheets service"] + C --> D{"Sheets creation OK?"} + D -- Yes --> E["Instantiate Drive service"] + E --> F{"Drive creation OK?"} + F -- Yes --> G["Return services"] + D -- No --> H["Wrap and return error"] + F -- No --> H +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CreateSheetsAndDriveServices --> context_TODO + func_CreateSheetsAndDriveServices --> sheets_NewService + func_CreateSheetsAndDriveServices --> drive_NewService + func_CreateSheetsAndDriveServices --> fmt_Errorf +``` + +#### Functions calling `CreateSheetsAndDriveServices` (Mermaid) + +```mermaid +graph TD + generateResultsSpreadSheet --> CreateSheetsAndDriveServices +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CreateSheetsAndDriveServices +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet" +) + +func main() { + credsPath := "/path/to/credentials.json" + + sheetSvc, driveSvc, err := resultsspreadsheet.CreateSheetsAndDriveServices(credsPath) + if err != nil { + log.Fatalf("Failed to create services: %v", err) + } + + // Use sheetSvc and driveSvc for further operations... + _ = sheetSvc + _ = driveSvc +} +``` + +--- + +### GetHeaderIndicesByColumnNames + +**GetHeaderIndicesByColumnNames** - Returns the zero‑based positions of each requested column name within a slice of header strings. If any name is absent, an error is returned. + +#### Signature (Go) + +```go +func GetHeaderIndicesByColumnNames(headers, names []string) ([]int, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the zero‑based positions of each requested column name within a slice of header strings. If any name is absent, an error is returned. | +| **Parameters** | `headers []string` – list of all column headers.
`names []string` – column names to locate. | +| **Return value** | `([]int, error)` – slice containing the index for each requested name or an error if a name cannot be found. | +| **Key dependencies** | *`fmt.Errorf` – constructs descriptive errors.
* Built‑in `append`. | +| **Side effects** | None; pure function with no state mutation or I/O. | +| **How it fits the package** | Utility for mapping human‑readable header names to spreadsheet indices, used by higher‑level sheet manipulation functions (e.g., sorting, filtering). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over requested names"} + B -->|"name found in headers"| C["Append index"] + B -->|"not found"| D["Return error"] + C --> E["Continue to next name"] + E --> B + D --> F["End with error"] + C --> G["All names processed"] --> H["Return indices, nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetHeaderIndicesByColumnNames --> fmt_Errorf +``` + +#### Functions calling `GetHeaderIndicesByColumnNames` + +```mermaid +graph TD + func_addDescendingSortFilterToSheet --> func_GetHeaderIndicesByColumnNames + func_addFilterByFailedAndMandatoryToSheet --> func_GetHeaderIndicesByColumnNames + func_createConclusionsSheet --> func_GetHeaderIndicesByColumnNames + func_createSingleWorkloadRawResultsSheet --> func_GetHeaderIndicesByColumnNames +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetHeaderIndicesByColumnNames +package main + +import ( + "fmt" + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet" +) + +func main() { + headers := []string{"Name", "Age", "Country"} + names := []string{"Age", "Country"} + + indices, err := resultsspreadsheet.GetHeaderIndicesByColumnNames(headers, names) + if err != nil { + log.Fatalf("lookup failed: %v", err) + } + fmt.Printf("Indices for %v are %v\n", names, indices) // Output: Indices for [Age Country] are [1 2] +} +``` + +--- + +### GetHeadersFromSheet + +**GetHeadersFromSheet** - Returns a slice containing the string values of all cells in the first row (header row) of the supplied sheet. + +#### Signature (Go) + +```go +func GetHeadersFromSheet(sheet *sheets.Sheet) []string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a slice containing the string values of all cells in the first row (header row) of the supplied sheet. | +| **Parameters** | `sheet` – pointer to a `sheets.Sheet` object from the Google Sheets API; represents the spreadsheet data structure. | +| **Return value** | `[]string` – ordered list of header names extracted from the first row. | +| **Key dependencies** | • `github.com/google/go-github/v32/api/sheets` (for the `Sheet`, `RowData`, `CellData`, and `ExtendedValue` types)
• Go standard library (`append`) | +| **Side effects** | None; purely functional, no mutation of input or external state. | +| **How it fits the package** | Provides a helper to retrieve header names used by other functions (e.g., mapping column indices, constructing new sheets). It centralizes header extraction logic for consistency across spreadsheet operations. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over sheet.Data[0].RowData[0].Values"} + B --> C["Append *val.UserEnteredValue.StringValue to headers"] + C --> D["End, return headers"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetHeadersFromSheet --> append +``` + +#### Functions calling `GetHeadersFromSheet` (Mermaid) + +```mermaid +graph TD + func_createConclusionsSheet --> func_GetHeadersFromSheet + func_createSingleWorkloadRawResultsSheet --> func_GetHeadersFromSheet +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetHeadersFromSheet +package main + +import ( + "fmt" + sheets "github.com/google/go-github/v32/api/sheets" +) + +func main() { + // Assume rawSheet is obtained from Google Sheets API elsewhere. + var rawSheet *sheets.Sheet + + headers := GetHeadersFromSheet(rawSheet) + fmt.Println("Column headers:", headers) +} +``` + +--- + +--- + +### GetHeadersFromValueRange + +**GetHeadersFromValueRange** - Returns the column header names from the first row of a Google Sheets value range, converting each cell to its string representation. + +#### 1. Signature (Go) + +```go +func GetHeadersFromValueRange(sheetsValues *sheets.ValueRange) []string +``` + +#### 2. Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the column header names from the first row of a Google Sheets value range, converting each cell to its string representation. | +| **Parameters** | `sheetsValues *sheets.ValueRange` – the raw values returned by the Sheets API (`sheets.Values.Get`). | +| **Return value** | `[]string` – an ordered slice containing the header names as strings. | +| **Key dependencies** | • `fmt.Sprint` (to convert cell values to string)
• `append` built‑in for building the slice | +| **Side effects** | None; purely functional and side‑effect free. | +| **How it fits the package** | Provides a reusable helper for other spreadsheet manipulation functions that need header metadata, such as filtering or sorting operations in `results_spreadsheet`. | + +#### 3. Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Check first row"} + B -->|"Exists"| C["Iterate over cells"] + C --> D["Convert each cell to string"] + D --> E["Append to headers slice"] + E --> F["Return headers"] +``` + +#### 4. Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetHeadersFromValueRange --> fmt_Sprint + func_GetHeadersFromValueRange --> builtin_append +``` + +#### 5. Functions calling `GetHeadersFromValueRange` (Mermaid) + +```mermaid +graph TD + func_addDescendingSortFilterToSheet --> GetHeadersFromValueRange + func_addFilterByFailedAndMandatoryToSheet --> GetHeadersFromValueRange +``` + +#### 6. Usage example (Go) + +```go +// Minimal example invoking GetHeadersFromValueRange +package main + +import ( + "fmt" + sheets "google.golang.org/api/sheets/v4" +) + +func main() { + // Assume we already fetched values from a sheet + var values *sheets.ValueRange + // ... populate values ... + + headers := GetHeadersFromValueRange(values) + fmt.Println("Sheet headers:", headers) +} +``` + +--- + +--- + +### GetSheetIDByName + +**GetSheetIDByName** - Looks up a sheet’s internal ID within a Google Sheets spreadsheet using the sheet’s title. + +#### Signature (Go) + +```go +func GetSheetIDByName(spreadsheet *sheets.Spreadsheet, name string) (int64, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Looks up a sheet’s internal ID within a Google Sheets spreadsheet using the sheet’s title. | +| **Parameters** | `spreadsheet *sheets.Spreadsheet` – parsed spreadsheet data; `name string` – desired sheet title | +| **Return value** | `int64` – the matching sheet’s ID; `error` – nil on success or a descriptive error if not found | +| **Key dependencies** | - `fmt.Errorf` for error construction
- Iteration over `spreadsheet.Sheets` | +| **Side effects** | None (pure function) | +| **How it fits the package** | Enables other utilities to reference sheets by ID when constructing API requests (e.g., sorting or filtering). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate sheets"} + B --> C["Check title match"] + C -->|"Match"| D["Return SheetId, nil"] + C -->|"No Match"| E["Continue loop"] + E -->|"End of list"| F["Return -1, error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetSheetIDByName --> fmt.Errorf +``` + +#### Functions calling `GetSheetIDByName` + +```mermaid +graph TD + func_addDescendingSortFilterToSheet --> func_GetSheetIDByName + func_addFilterByFailedAndMandatoryToSheet --> func_GetSheetIDByName +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetSheetIDByName +import ( + "fmt" + sheets "google.golang.org/api/sheets/v4" +) + +func main() { + // Assume `srv` is an authorized *sheets.Service and a spreadsheet has been fetched. + srv := /* ... */ + ss, err := srv.Spreadsheets.Get("spreadsheetId").Do() + if err != nil { + panic(err) + } + + sheetID, err := GetSheetIDByName(ss, "Results") + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + fmt.Printf("Sheet ID for 'Results': %d\n", sheetID) +} +``` + +--- + +### MoveSpreadSheetToFolder + +**MoveSpreadSheetToFolder** - Relocates an existing spreadsheet to a target Drive folder by updating its parent references. + +```go +func MoveSpreadSheetToFolder(srv *drive.Service, folder *drive.File, spreadsheet *sheets.Spreadsheet) error +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Relocates an existing spreadsheet to a target Drive folder by updating its parent references. | +| **Parameters** | `srv` – Google Drive service; `folder` – destination folder file metadata; `spreadsheet` – spreadsheet to move. | +| **Return value** | `error` – non‑nil if any API call fails or the file cannot be relocated. | +| **Key dependencies** | • `srv.Files.Get(...).Fields("parents").Do()`
• `srv.Files.Update(...).AddParents(folder.Id)`
• `updateCall.RemoveParents(parent)`
• `log.Fatalf` for fatal errors | +| **Side effects** | Calls Google Drive API to modify file parents; logs and exits on failure. | +| **How it fits the package** | Used after a spreadsheet is created to place it under the appropriate folder in the Drive hierarchy. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Get current parents of spreadsheet"] --> B["Store old parent IDs"] + B --> C["Create update call and add new folder ID"] + C --> D{"Has old parents?"} + D -- Yes --> E["Remove each old parent from update call"] + D -- No --> F["Skip removal"] + E --> G["Execute update call"] + F --> G + G --> H["Return nil on success, log fatal on error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_MoveSpreadSheetToFolder --> func_Do + func_MoveSpreadSheetToFolder --> func_Fields + func_MoveSpreadSheetToFolder --> func_Get + func_MoveSpreadSheetToFolder --> log_Fatalf + func_MoveSpreadSheetToFolder --> func_append + func_MoveSpreadSheetToFolder --> func_Update + func_MoveSpreadSheetToFolder --> func_AddParents + func_MoveSpreadSheetToFolder --> func_len + func_MoveSpreadSheetToFolder --> func_RemoveParents +``` + +#### Functions calling `MoveSpreadSheetToFolder` + +```mermaid +graph TD + func_createSingleWorkloadRawResultsSpreadSheet --> func_MoveSpreadSheetToFolder + func_generateResultsSpreadSheet --> func_MoveSpreadSheetToFolder +``` + +#### Usage example (Go) + +```go +// Minimal example invoking MoveSpreadSheetToFolder +import ( + "github.com/googleapis/google-api-go-client/drive/v3" + "google.golang.org/api/sheets/v4" +) + +func example() error { + // Assume srv, folder, and spreadsheet are already initialized. + var srv *drive.Service // Google Drive service + var folder *drive.File // Destination folder metadata + var spreadsheet *sheets.Spreadsheet // Spreadsheet to move + + return MoveSpreadSheetToFolder(srv, folder, spreadsheet) +} +``` + +--- + +### NewCommand + +**NewCommand** - Builds and configures the `uploadResultSpreadSheetCmd` Cobra command, defining its flags and marking required ones. + +#### Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds and configures the `uploadResultSpreadSheetCmd` Cobra command, defining its flags and marking required ones. | +| **Parameters** | None | +| **Return value** | A fully configured `*cobra.Command` ready to be added to a parent command. | +| **Key dependencies** | • `uploadResultSpreadSheetCmd.Flags().StringVarP` – defines string flags.
• `uploadResultSpreadSheetCmd.MarkFlagRequired` – enforces required flags.
• `log.Fatalf` – logs fatal errors when flag validation fails. | +| **Side effects** | Modifies the global `uploadResultSpreadSheetCmd` variable by adding flags; may terminate the program via `log.Fatalf`. | +| **How it fits the package** | Provides the CLI entry point for uploading spreadsheets, used by the top‑level upload command. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Define flag: results-file"] + B --> C["Define flag: dest-url"] + C --> D["Define flag: version"] + D --> E["Define flag: credentials"] + E --> F["Mark results-file required"] + F --> G{"Error?"} + G -- Yes --> H["log.Fatalf, return nil"] + G -- No --> I["Mark dest-url required"] + I --> J{"Error?"} + J -- Yes --> K["log.Fatalf, return nil"] + J -- No --> L["Return uploadResultSpreadSheetCmd"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_StringVarP + func_NewCommand --> func_Flags + func_NewCommand --> func_MarkFlagRequired + func_NewCommand --> log.Fatalf +``` + +#### Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + upload.NewCommand --> func_NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet" +) + +func main() { + cmd := results_spreadsheet.NewCommand() + // Normally you would add this to a parent command and execute. + _ = cmd +} +``` + +--- + +## Local Functions + +### addBasicFilterToSpreadSheet + +**addBasicFilterToSpreadSheet** - Iterates over all sheets within the provided `spreadsheet` and sets a basic filter covering each sheet’s full range. The function ensures that every tab has filtering enabled, enabling later sort or filter operations. + +#### Signature (Go) + +```go +func addBasicFilterToSpreadSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over all sheets within the provided `spreadsheet` and sets a basic filter covering each sheet’s full range. The function ensures that every tab has filtering enabled, enabling later sort or filter operations. | +| **Parameters** | `srv *sheets.Service –` client for Google Sheets API;
`spreadsheet *sheets.Spreadsheet –` the spreadsheet to modify | +| **Return value** | `error –` non‑nil if any API call fails | +| **Key dependencies** | • Calls `append` on a slice of `*sheets.Request`
• Builds `SetBasicFilterRequest` and `BasicFilter` structs
• Invokes `srv.Spreadsheets.BatchUpdate(...).Do()` to apply changes | +| **Side effects** | Modifies the spreadsheet stored in Google Sheets by adding filters; performs a network request to the API. No local state is altered. | +| **How it fits the package** | Part of the results‑spreadsheet utility, this helper finalises the sheet layout before the application of more specific sort or filter rules (e.g., `addDescendingSortFilterToSheet`). It is called from `generateResultsSpreadSheet`. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"For each sheet in spreadsheet"} + B --> C["Create SetBasicFilterRequest"] + C --> D["Append to requests slice"] + B --> E{"End loop"} + E --> F["BatchUpdate API call"] + F --> G{"Success?"} + G -- Yes --> H["Return nil"] + G -- No --> I["Return error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_addBasicFilterToSpreadSheet --> func_append + func_addBasicFilterToSpreadSheet --> func_Do + func_addBasicFilterToSpreadSheet --> func_BatchUpdate +``` + +#### Functions calling `addBasicFilterToSpreadSheet` + +```mermaid +graph TD + func_generateResultsSpreadSheet --> func_addBasicFilterToSpreadSheet +``` + +#### Usage example (Go) + +```go +// Minimal example invoking addBasicFilterToSpreadSheet +srv, _ := sheets.NewService(ctx) +spreadsheet, _ := srv.Spreadsheets.Get("SPREADSHEET_ID").Do() +if err := addBasicFilterToSpreadSheet(srv, spreadsheet); err != nil { + log.Fatalf("Failed to apply basic filters: %v", err) +} +``` + +--- + +--- + +### addDescendingSortFilterToSheet + +**addDescendingSortFilterToSheet** - Sorts a specified column (`colName`) in descending order on the given sheet (`sheetName`) of a Google Sheets document. + +#### Signature (Go) + +```go +func addDescendingSortFilterToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName, colName string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Sorts a specified column (`colName`) in descending order on the given sheet (`sheetName`) of a Google Sheets document. | +| **Parameters** | `srv *sheets.Service` – client for interacting with Google Sheets API.
`spreadsheet *sheets.Spreadsheet` – target spreadsheet object.
`sheetName string` – name of the sheet to sort.
`colName string` – header name of the column to sort by. | +| **Return value** | `error` – non‑nil if any step fails (e.g., API call errors, missing headers). | +| **Key dependencies** | • `srv.Spreadsheets.Values.Get(...).Do()`
• `GetHeadersFromValueRange`
• `GetHeaderIndicesByColumnNames`
• `GetSheetIDByName`
• `srv.Spreadsheets.BatchUpdate(...).Do()` | +| **Side effects** | Mutates the spreadsheet on Google Sheets by applying a sort operation; no local state changes. | +| **How it fits the package** | Used during result‑sheet generation to ensure the conclusion sheet is sorted by category before being presented to users. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Get sheet values"] --> B["Extract headers"] + B --> C["Find header index for colName"] + C --> D["Retrieve sheet ID"] + D --> E["Create SortRange request"] + E --> F["BatchUpdate spreadsheet with sort"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_addDescendingSortFilterToSheet --> func_GetHeadersFromValueRange + func_addDescendingSortFilterToSheet --> func_GetHeaderIndicesByColumnNames + func_addDescendingSortFilterToSheet --> func_GetSheetIDByName + func_addDescendingSortFilterToSheet --> srv.Spreadsheets.Values.Get + func_addDescendingSortFilterToSheet --> srv.Spreadsheets.BatchUpdate +``` + +#### Functions calling `addDescendingSortFilterToSheet` (Mermaid) + +```mermaid +graph TD + func_generateResultsSpreadSheet --> func_addDescendingSortFilterToSheet +``` + +#### Usage example (Go) + +```go +// Minimal example invoking addDescendingSortFilterToSheet +srv, _, err := CreateSheetsAndDriveServices(credentials) +if err != nil { + log.Fatalf("Unable to create services: %v", err) +} + +spreadsheet, err := srv.Spreadsheets.Get(spreadsheetID).Do() +if err != nil { + log.Fatalf("Unable to retrieve spreadsheet: %v", err) +} + +if err = addDescendingSortFilterToSheet(srv, spreadsheet, "Conclusion", "Category"); err != nil { + log.Fatalf("Failed to apply descending sort filter: %v", err) +} +``` + +--- + +### addFilterByFailedAndMandatoryToSheet + +**addFilterByFailedAndMandatoryToSheet** - Applies a basic filter to the specified sheet so that only rows where the *State* column equals `"failed"` and the *Mandatory/Optional* column equals `"Mandatory"` are displayed. + +#### Signature (Go) + +```go +func addFilterByFailedAndMandatoryToSheet(srv *sheets.Service, spreadsheet *sheets.Spreadsheet, sheetName string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Applies a basic filter to the specified sheet so that only rows where the *State* column equals `"failed"` and the *Mandatory/Optional* column equals `"Mandatory"` are displayed. | +| **Parameters** | `srv` – Google Sheets API service; `spreadsheet` – target spreadsheet object; `sheetName` – name of the sheet to filter. | +| **Return value** | `error` – non‑nil if any API call or header lookup fails. | +| **Key dependencies** | - `srv.Spreadsheets.Values.Get`
- `GetHeadersFromValueRange`
- `GetHeaderIndicesByColumnNames`
- `GetSheetIDByName`
- `srv.Spreadsheets.BatchUpdate` | +| **Side effects** | Sends a batch update request to Google Sheets, modifying the sheet’s filter settings. No local state is changed. | +| **How it fits the package** | Used during spreadsheet creation to pre‑configure the results view for end users, ensuring only relevant rows are visible by default. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Retrieve sheet values"] --> B["Extract headers"] + B --> C["Find column indices for State and Mandatory/Optional"] + C --> D["Get sheet ID by name"] + D --> E["Build filter request"] + E --> F["Send batch update to Sheets API"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_addFilterByFailedAndMandatoryToSheet --> func_Do + func_addFilterByFailedAndMandatoryToSheet --> func_Get + func_addFilterByFailedAndMandatoryToSheet --> func_Errorf + func_addFilterByFailedAndMandatoryToSheet --> func_GetHeadersFromValueRange + func_addFilterByFailedAndMandatoryToSheet --> func_GetHeaderIndicesByColumnNames + func_addFilterByFailedAndMandatoryToSheet --> func_GetSheetIDByName + func_addFilterByFailedAndMandatoryToSheet --> func_BatchUpdate +``` + +#### Functions calling `addFilterByFailedAndMandatoryToSheet` (Mermaid) + +```mermaid +graph TD + func_createSingleWorkloadRawResultsSpreadSheet --> func_addFilterByFailedAndMandatoryToSheet +``` + +#### Usage example (Go) + +```go +// Minimal example invoking addFilterByFailedAndMandatoryToSheet +package main + +import ( + "log" + + sheets "google.golang.org/api/sheets/v4" +) + +func main() { + // Assume srv is an authenticated *sheets.Service and spreadsheet has been created. + var srv *sheets.Service // initialized elsewhere + var spreadsheet *sheets.Spreadsheet // previously created + sheetName := "results" + + if err := addFilterByFailedAndMandatoryToSheet(srv, spreadsheet, sheetName); err != nil { + log.Fatalf("failed to apply filter: %v", err) + } +} +``` + +--- + +### createConclusionsSheet + +**createConclusionsSheet** - Builds a new Google Sheets tab that lists each unique workload from the raw results, including category, version, OCP version, and a hyperlink to a dedicated results spreadsheet. + +#### Signature (Go) + +```go +func createConclusionsSheet( + sheetsService *sheets.Service, + driveService *drive.Service, + rawResultsSheet *sheets.Sheet, + mainResultsFolderID string, +) (*sheets.Sheet, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a new Google Sheets tab that lists each unique workload from the raw results, including category, version, OCP version, and a hyperlink to a dedicated results spreadsheet. | +| **Parameters** | `sheetsService *sheets.Service` – client for Sheets API.
`driveService *drive.Service` – client for Drive API.
`rawResultsSheet *sheets.Sheet` – sheet containing raw test data.
`mainResultsFolderID string` – ID of the parent folder where results will be stored. | +| **Return value** | `(*sheets.Sheet, error)` – the constructed conclusion sheet or an error if processing fails. | +| **Key dependencies** | • `createDriveFolder` – creates a sub‑folder for per‑workload spreadsheets.
• `GetHeadersFromSheet`, `GetHeaderIndicesByColumnNames` – locate columns in raw data.
• `createSingleWorkloadRawResultsSpreadSheet` – generates individual workload result files and returns their URLs. | +| **Side effects** | • Creates a Drive folder “Results Per Workload”.
• Calls Sheets API to build the conclusion sheet (but does not upload it).
• Generates hyperlinks that reference newly created spreadsheets. | +| **How it fits the package** | Part of the spreadsheet generation pipeline; invoked by `generateResultsSpreadSheet` after creating the raw results sheet, and before assembling the final workbook. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Create “Results Per Workload” folder"] + B --> C["Read headers from rawResultsSheet"] + C --> D["Get indices for workload name/type/version"] + D --> E{"Validate data exists"} + E -- No --> F["Return error: no workloads"] + E -- Yes --> G["Iterate rows to gather unique workload names"] + G --> H["For each unique name"] + H --> I["Build row values (category, version, OCP, name)"] + I --> J["Create single workload results spreadsheet"] + J --> K["Insert hyperlink formula into Results column"] + K --> L["Append row to conclusionsSheetRows"] + L --> M["Repeat for next workload"] + M --> N["Assemble final Sheet object with headers and rows"] + N --> O["Return conclusionSheet"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + createConclusionsSheet --> createDriveFolder + createConclusionsSheet --> GetHeadersFromSheet + createConclusionsSheet --> GetHeaderIndicesByColumnNames + createConclusionsSheet --> createSingleWorkloadRawResultsSpreadSheet +``` + +#### Functions calling `createConclusionsSheet` (Mermaid) + +```mermaid +graph TD + generateResultsSpreadSheet --> createConclusionsSheet +``` + +#### Usage example (Go) + +```go +// Minimal example invoking createConclusionsSheet +package main + +import ( + "log" + "github.com/google/go-github/v50/github" // placeholder for actual imports + resultsspreadsheet "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet" +) + +func main() { + // Assume sheetsService, driveService are already authenticated clients. + var sheetsService *sheets.Service // initialized elsewhere + var driveService *drive.Service // initialized elsewhere + + rawSheet := &sheets.Sheet{ /* populated with raw test data */ } + parentFolderID := "root-folder-id" + + conclusionSheet, err := resultsspreadsheet.CreateConclusionsSheet( + sheetsService, + driveService, + rawSheet, + parentFolderID, + ) + if err != nil { + log.Fatalf("Failed to create conclusions sheet: %v", err) + } + + // conclusionSheet can now be added to a spreadsheet or further processed. +} +``` + +*Note:* The example omits authentication and actual data population for brevity. + +--- + +### createDriveFolder + +**createDriveFolder** - Creates a sub‑folder under the specified `parentFolderID` with the name `folderName`. If a folder of that name already exists in the parent, an error is returned. + +#### Signature (Go) + +```go +func createDriveFolder(srv *drive.Service, folderName, parentFolderID string) (*drive.File, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a sub‑folder under the specified `parentFolderID` with the name `folderName`. If a folder of that name already exists in the parent, an error is returned. | +| **Parameters** | *srv – Google Drive service client
*folderName – desired folder title
*parentFolderID – ID of the parent folder | +| **Return value** | *drive.File – metadata of the created folder (or `nil` on error)
error – description if creation or lookup fails | +| **Key dependencies** | • `fmt.Sprintf`, `fmt.Errorf`
• `srv.Files.List()`, `.Q()`, `.Fields()`
• `srv.Files.Create()`
• `len(files.Files)` check | +| **Side effects** | Network calls to Google Drive API: listing existing files and creating a new folder. No local state is mutated. | +| **How it fits the package** | Utility used by higher‑level spreadsheet generation functions to organize results into folders on Drive before uploading sheets. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Build File metadata"] --> B["Construct query string"] + B --> C["List files in parent folder"] + C --> D{"Folder exists?"} + D -- Yes --> E["Return error “folder already exists”"] + D -- No --> F["Create new folder"] + F --> G["Return created folder"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_createDriveFolder --> fmt.Sprintf + func_createDriveFolder --> fmt.Errorf + func_createDriveFolder --> srv.Files.List + func_createDriveFolder --> Q + func_createDriveFolder --> Fields + func_createDriveFolder --> Do + func_createDriveFolder --> len + func_createDriveFolder --> srv.Files.Create +``` + +#### Functions calling `createDriveFolder` (Mermaid) + +```mermaid +graph TD + func_createConclusionsSheet --> func_createDriveFolder + func_generateResultsSpreadSheet --> func_createDriveFolder +``` + +#### Usage example (Go) + +```go +// Minimal example invoking createDriveFolder +package main + +import ( + "log" + + "google.golang.org/api/drive/v3" +) + +func main() { + // Assume srv is an authorized *drive.Service. + var srv *drive.Service // placeholder for actual service creation + + parentID := "root" // ID of the folder under which to create + folderName := "Results" // Desired sub‑folder name + + folder, err := createDriveFolder(srv, folderName, parentID) + if err != nil { + log.Fatalf("Failed to create folder: %v", err) + } + log.Printf("Created folder ID: %s", folder.Id) +} +``` + +--- + +### createRawResultsSheet + +**createRawResultsSheet** - Reads a CSV file and converts its contents into a `*sheets.Sheet` suitable for inclusion in a Google Spreadsheet. + +#### Signature (Go) + +```go +func createRawResultsSheet(fp string) (*sheets.Sheet, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads a CSV file and converts its contents into a `*sheets.Sheet` suitable for inclusion in a Google Spreadsheet. | +| **Parameters** | `fp string – Path to the CSV file to be processed.` | +| **Return value** | `(*sheets.Sheet, error) – The constructed sheet or an error if reading/conversion fails.` | +| **Key dependencies** | • `readCSV` – Loads raw CSV records.
• `prepareRecordsForSpreadSheet` – Formats records into Google Sheets row data.
• `fmt.Errorf` – Wraps errors. | +| **Side effects** | No global state changes; only I/O occurs when opening the file via `readCSV`. | +| **How it fits the package** | Provides the raw results sheet that later functions (e.g., `createConclusionsSheet`) consume to build a full test‑results spreadsheet. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Read CSV"} + B --> C{"Error?"} + C -- Yes --> D["Return error"] + C -- No --> E["Prepare rows"] + E --> F["Build Sheet struct"] + F --> G["Return sheet"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_createRawResultsSheet --> func_readCSV + func_createRawResultsSheet --> func_prepareRecordsForSpreadSheet + func_createRawResultsSheet --> fmt.Errorf +``` + +#### Functions calling `createRawResultsSheet` + +```mermaid +graph TD + func_generateResultsSpreadSheet --> func_createRawResultsSheet +``` + +#### Usage example (Go) + +```go +// Minimal example invoking createRawResultsSheet +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet" +) + +func main() { + sheet, err := resultsspreadsheet.createRawResultsSheet("results.csv") + if err != nil { + log.Fatalf("Failed to create raw results sheet: %v", err) + } + // sheet can now be added to a spreadsheet via the Google Sheets API + _ = sheet +} +``` + +--- + +### createSingleWorkloadRawResultsSheet + +**createSingleWorkloadRawResultsSheet** - Builds a new sheet containing only the rows that belong to `workloadName`. The resulting sheet keeps all original columns from `rawResultsSheet` and prefixes two extra columns: “Owner/TechLead Conclusion” and “Next Step Actions”. + +#### Signature (Go) + +```go +func createSingleWorkloadRawResultsSheet(rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Sheet, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a new sheet containing only the rows that belong to `workloadName`. The resulting sheet keeps all original columns from `rawResultsSheet` and prefixes two extra columns: “Owner/TechLead Conclusion” and “Next Step Actions”. | +| **Parameters** | `rawResultsSheet *sheets.Sheet` – source spreadsheet with multiple workloads.
`workloadName string` – name of the workload to filter. | +| **Return value** | `(*sheets.Sheet, error)` – the filtered sheet or an error if the workload cannot be found or headers are missing. | +| **Key dependencies** | • `stringToPointer` (twice) – helper for converting strings to pointers.
• `append` – building new row slices.
• `GetHeadersFromSheet` – extracts header names from a sheet.
• `GetHeaderIndicesByColumnNames` – finds the column index of “CNFName”.
• `fmt.Errorf` – error formatting. | +| **Side effects** | No global state is mutated; only the returned sheet is constructed. | +| **How it fits the package** | Used by `createSingleWorkloadRawResultsSpreadSheet` to produce a per‑workload results spreadsheet before uploading to Google Sheets. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Create header row with two new columns"} + B --> C["Append original headers from raw sheet"] + C --> D["Get index of CNFName"] + D --> E{"Iterate over data rows"} + E -->|"Row matches workload"| F["Prepend empty cells for new columns"] + E -->|"Row does not match"| G["Skip row"] + F --> H["Add to filteredRows"] + G --> H + H --> I["Build sheets.Sheet struct"] + I --> J["Return sheet, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_createSingleWorkloadRawResultsSheet --> func_stringToPointer + func_createSingleWorkloadRawResultsSheet --> func_GetHeadersFromSheet + func_createSingleWorkloadRawResultsSheet --> func_GetHeaderIndicesByColumnNames + func_createSingleWorkloadRawResultsSheet --> fmt_Errorf +``` + +#### Functions calling `createSingleWorkloadRawResultsSheet` (Mermaid) + +```mermaid +graph TD + func_createSingleWorkloadRawResultsSpreadSheet --> func_createSingleWorkloadRawResultsSheet +``` + +#### Usage example (Go) + +```go +// Minimal example invoking createSingleWorkloadRawResultsSheet +rawSheet := &sheets.Sheet{ /* populate with data containing multiple workloads */ } +workloadName := "example-workload" + +filtered, err := createSingleWorkloadRawResultsSheet(rawSheet, workloadName) +if err != nil { + log.Fatalf("Failed to filter sheet: %v", err) +} + +// filtered now contains only rows for 'example-workload' plus two new columns. +``` + +--- + +### createSingleWorkloadRawResultsSpreadSheet + +**createSingleWorkloadRawResultsSpreadSheet** - Builds a new Google Sheets file containing only the rows that belong to `workloadName`, adds two extra columns for conclusions and next‑step AI, applies filtering on failed/mandatory tests, moves the sheet into the specified Drive folder, and returns the created spreadsheet. + +```go +func createSingleWorkloadRawResultsSpreadSheet(sheetService *sheets.Service, driveService *drive.Service, folder *drive.File, rawResultsSheet *sheets.Sheet, workloadName string) (*sheets.Spreadsheet, error) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a new Google Sheets file containing only the rows that belong to `workloadName`, adds two extra columns for conclusions and next‑step AI, applies filtering on failed/mandatory tests, moves the sheet into the specified Drive folder, and returns the created spreadsheet. | +| **Parameters** | `sheetService *sheets.Service` – Google Sheets API client
`driveService *drive.Service` – Google Drive API client
`folder *drive.File` – Destination folder in Drive
`rawResultsSheet *sheets.Sheet` – Sheet with all raw test results
`workloadName string` – Name of the workload to isolate | +| **Return value** | `(*sheets.Spreadsheet, error)` – The created spreadsheet or an error. | +| **Key dependencies** | • `createSingleWorkloadRawResultsSheet` – prepares sheet data.
• `addFilterByFailedAndMandatoryToSheet` – applies a filter on the sheet.
• `MoveSpreadSheetToFolder` – relocates the file in Drive.
• Standard library: `fmt`, `log`. | +| **Side effects** | Creates a new spreadsheet via Sheets API, updates its properties, adds filter requests, and moves the file in Drive. Logs creation message to stdout. | +| **How it fits the package** | This helper is used by `createConclusionsSheet` to generate individual result files for each workload before linking them in the main conclusions sheet. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Call createSingleWorkloadRawResultsSheet"] --> B["Receive sheet data"] + B --> C["Build Spreadsheet object with title and sheet"] + C --> D["Create spreadsheet via Sheets API"] + D --> E["Apply filter on “results” sheet"] + E --> F["Move spreadsheet into Drive folder"] + F --> G["Log creation message"] + G --> H["Return created spreadsheet or error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_createSingleWorkloadRawResultsSpreadSheet --> func_createSingleWorkloadRawResultsSheet + func_createSingleWorkloadRawResultsSpreadSheet --> func_addFilterByFailedAndMandatoryToSheet + func_createSingleWorkloadRawResultsSpreadSheet --> func_MoveSpreadSheetToFolder +``` + +#### Functions calling `createSingleWorkloadRawResultsSpreadSheet` + +```mermaid +graph TD + func_createConclusionsSheet --> func_createSingleWorkloadRawResultsSpreadSheet +``` + +#### Usage example + +```go +// Minimal example invoking createSingleWorkloadRawResultsSpreadSheet +package main + +import ( + "log" + + sheets "google.golang.org/api/sheets/v4" + drive "google.golang.org/api/drive/v3" + + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload/results_spreadsheet" +) + +func main() { + // Assume srvSheets, srvDrive are initialized *sheets.Service and *drive.Service + var srvSheets *sheets.Service + var srvDrive *drive.Service + + // Folder where the new spreadsheet should be placed + folder := &drive.File{Id: "folder-id"} + + // Raw results sheet obtained elsewhere + rawSheet := &sheets.Sheet{} + + workloadName := "my-workload" + + ss, err := results_spreadsheet.createSingleWorkloadRawResultsSpreadSheet(srvSheets, srvDrive, folder, rawSheet, workloadName) + if err != nil { + log.Fatalf("Failed to create spreadsheet: %v", err) + } + log.Printf("Spreadsheet created: %s\n", ss.SpreadsheetUrl) +} +``` + +--- + +### extractFolderIDFromURL + +**extractFolderIDFromURL** - Parses a Google Drive shareable URL and returns the last path segment, which is the folder ID. + +Extracts the folder identifier from a Google Drive URL. + +#### Signature (Go) + +```go +func extractFolderIDFromURL(u string) (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses a Google Drive shareable URL and returns the last path segment, which is the folder ID. | +| **Parameters** | `u string` – the full URL to parse. | +| **Return value** | `string` – the extracted folder ID; `error` if URL parsing fails. | +| **Key dependencies** | • `net/url.Parse`
• `strings.Split`
• Built‑in `len` function | +| **Side effects** | None (pure function). | +| **How it fits the package** | Utility for locating a root folder in Google Drive before creating or moving spreadsheets. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive URL string"] --> B["Parse URL with net/url.Parse"] + B --> C{"Parsing success?"} + C -- No --> D["Return empty string and error"] + C -- Yes --> E["Split parsedURL.Path by /"] + E --> F["Select last segment as folder ID"] + F --> G["Return folder ID, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_extractFolderIDFromURL --> func_Parse(net/url) + func_extractFolderIDFromURL --> func_Split(strings) + func_extractFolderIDFromURL --> len(built‑in) +``` + +#### Functions calling `extractFolderIDFromURL` (Mermaid) + +```mermaid +graph TD + func_generateResultsSpreadSheet --> func_extractFolderIDFromURL +``` + +#### Usage example (Go) + +```go +// Minimal example invoking extractFolderIDFromURL +url := "https://drive.google.com/drive/folders/1a2B3cD4e5F6g7H8i9J0" +folderID, err := extractFolderIDFromURL(url) +if err != nil { + log.Fatalf("failed to get folder ID: %v", err) +} +fmt.Println("Folder ID:", folderID) +``` + +--- + +### generateResultsSpreadSheet + +**generateResultsSpreadSheet** - Orchestrates the creation of a spreadsheet that stores raw test output and derived conclusions, uploads it to Google Drive, and applies filtering/sorting. + +#### Signature (Go) + +```go +func generateResultsSpreadSheet() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Orchestrates the creation of a spreadsheet that stores raw test output and derived conclusions, uploads it to Google Drive, and applies filtering/sorting. | +| **Parameters** | None | +| **Return value** | None – errors are handled by logging fatal messages which terminate the program. | +| **Key dependencies** | • `CreateSheetsAndDriveServices` – obtains Sheets & Drive clients
• `extractFolderIDFromURL` – parses root folder ID from a URL
• `createDriveFolder` – creates destination folders in Drive
• `createRawResultsSheet` – builds the raw data sheet from CSV
• `createConclusionsSheet` – builds the summary sheet
• `MoveSpreadSheetToFolder` – relocates the spreadsheet into the target folder
• `addBasicFilterToSpreadSheet`, `addDescendingSortFilterToSheet` – apply UI filters | +| **Side effects** | • Creates Google Drive folders and a spreadsheet.
• Logs progress and fatal errors to standard output.
• Writes no local files; all operations are network‑based. | +| **How it fits the package** | Acts as the entry point for the `resultsspreadsheet` command, wiring together lower‑level helpers to produce a ready‑to‑share artifact. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Create Sheets & Drive services"] + B --> C["Parse root folder ID"] + C --> D["Create main results folder"] + D --> E["Generate raw results sheet"] + E --> F["Generate conclusions sheet"] + F --> G["Build spreadsheet object"] + G --> H["Upload spreadsheet to Google Sheets"] + H --> I["Move spreadsheet into Drive folder"] + I --> J["Apply basic filter"] + J --> K["Apply descending sort on “Category” column"] + K --> L["Print success URL"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_generateResultsSpreadSheet --> CreateSheetsAndDriveServices + func_generateResultsSpreadSheet --> extractFolderIDFromURL + func_generateResultsSpreadSheet --> createDriveFolder + func_generateResultsSpreadSheet --> createRawResultsSheet + func_generateResultsSpreadSheet --> createConclusionsSheet + func_generateResultsSpreadSheet --> MoveSpreadSheetToFolder + func_generateResultsSpreadSheet --> addBasicFilterToSpreadSheet + func_generateResultsSpreadSheet --> addDescendingSortFilterToSheet +``` + +#### Functions calling `generateResultsSpreadSheet` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// The function has no exported interface; it is invoked internally when running the command. +// A minimal example would simply call it from a main routine after setting required globals. +func main() { + // Assume credentials, rootFolderURL, resultsFilePath, ocpVersion are set globally. + generateResultsSpreadSheet() +} +``` + +--- + +--- + +### prepareRecordsForSpreadSheet + +**prepareRecordsForSpreadSheet** - Transforms a 2‑D slice of string values (`records`) into a slice of `*sheets.RowData` suitable for populating a Google Sheets spreadsheet. It normalises cell content by truncating overly long strings, replacing line breaks with spaces, and ensuring empty cells contain a single space to avoid layout issues. + +#### Signature (Go) + +```go +func prepareRecordsForSpreadSheet(records [][]string) []*sheets.RowData +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Transforms a 2‑D slice of string values (`records`) into a slice of `*sheets.RowData` suitable for populating a Google Sheets spreadsheet. It normalises cell content by truncating overly long strings, replacing line breaks with spaces, and ensuring empty cells contain a single space to avoid layout issues. | +| **Parameters** | `records [][]string – raw CSV rows (each row is a slice of column values)` | +| **Return value** | `[]*sheets.RowData – formatted rows ready for the Sheets API` | +| **Key dependencies** | • `len` (builtin)
• `strings.ReplaceAll` (twice per cell)
• `append` (for building slices) | +| **Side effects** | No external I/O or state mutation; purely functional transformation. | +| **How it fits the package** | Used by `createRawResultsSheet` to prepare data before creating a raw results sheet in the spreadsheet upload process. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate rows"} + B --> C["Create empty rowData slice"] + B --> D["Iterate columns"] + D --> E{"Check length > limit"} + E -- Yes --> F["Truncate to cellContentLimit"] + E -- No --> G["Keep original"] + D --> H{"Empty string?"} + H -- Yes --> I["val ="] + H -- No --> J["val = column value"] + D --> K["Replace \r\n and \n with space"] + K --> L["rowData append CellData(val)"] + C --> M["rows append RowData(rowData)"] + B --> N{"All rows processed"} + N --> O["Return rows"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_prepareRecordsForSpreadSheet --> len + func_prepareRecordsForSpreadSheet --> strings.ReplaceAll + func_prepareRecordsForSpreadSheet --> append +``` + +#### Functions calling `prepareRecordsForSpreadSheet` + +```mermaid +graph TD + createRawResultsSheet --> func_prepareRecordsForSpreadSheet +``` + +#### Usage example (Go) + +```go +// Minimal example invoking prepareRecordsForSpreadSheet +records := [][]string{ + {"Name", "Age"}, + {"Alice", "30"}, + {"Bob", ""}, +} +rows := prepareRecordsForSpreadSheet(records) +// rows can now be used to populate a Google Sheets GridData structure. +``` + +--- + +### readCSV + +**readCSV** - Loads the contents of a CSV file located at the given path and returns all rows as a two‑dimensional slice of strings. + +#### Signature (Go) + +```go +func readCSV(string) ([][]string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Loads the contents of a CSV file located at the given path and returns all rows as a two‑dimensional slice of strings. | +| **Parameters** | `fp string` – File system path to the CSV file. | +| **Return value** | `[][]string` – Slice of records (each record is a slice of fields).
`error` – Error encountered during opening, reading, or parsing the file. | +| **Key dependencies** | • `os.Open` – opens the file.
• `file.Close()` – ensures the file descriptor is released.
• `csv.NewReader` – creates a CSV reader for the file.
• `reader.ReadAll` – parses all records from the file. | +| **Side effects** | • Opens and subsequently closes the specified file.
• No modification of global state or external systems. | +| **How it fits the package** | Utility function used by higher‑level spreadsheet generation routines to ingest raw test results stored in CSV format. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Open file at fp"] --> B{"Error?"} + B -- No --> C["Create csv.Reader"] + C --> D["ReadAll records"] + D --> E{"Error?"} + E -- No --> F["Return records, nil"] + B -- Yes --> G["Return nil, err"] + E -- Yes --> H["Return nil, err"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_readCSV --> os.Open + func_readCSV --> file.Close + func_readCSV --> csv.NewReader + func_readCSV --> reader.ReadAll +``` + +#### Functions calling `readCSV` (Mermaid) + +```mermaid +graph TD + func_createRawResultsSheet --> func_readCSV +``` + +#### Usage example (Go) + +```go +// Minimal example invoking readCSV +records, err := readCSV("/path/to/results.csv") +if err != nil { + log.Fatalf("failed to read CSV: %v", err) +} +fmt.Printf("Read %d rows\n", len(records)) +``` + +--- diff --git a/docs/cmd/certsuite/upload/upload.md b/docs/cmd/certsuite/upload/upload.md new file mode 100644 index 000000000..76d56abbd --- /dev/null +++ b/docs/cmd/certsuite/upload/upload.md @@ -0,0 +1,97 @@ +# Package upload + +**Path**: `cmd/certsuite/upload` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) + +## Overview + +The `upload` package supplies a Cobra command that serves as the root for uploading results spreadsheets to cloud storage within the certsuite CLI. + +### Key Features + +- Provides an exported `NewCommand` function that creates the top‑level upload command and aggregates sub‑commands for handling spreadsheet uploads. +- Integrates with the `results_spreadsheet` subpackage to perform the actual upload logic. +- Uses Cobra conventions, enabling easy extension and consistent CLI behavior. +- + +### Design Notes + +- Relies on Cobra’s command hierarchy; only one exported constructor (`NewCommand`) is provided. +- The package does not expose any structs or interfaces, keeping the API surface minimal. +- Best practice: invoke `upload.NewCommand()` from the main command setup to register the upload functionality. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand() *cobra.Command](#newcommand) | Constructs and returns a Cobra command representing the `upload` sub‑command of certsuite. It aggregates sub‑commands that handle uploading results spreadsheets to cloud storage. | + +## Exported Functions + +### NewCommand + +**NewCommand** - Constructs and returns a Cobra command representing the `upload` sub‑command of certsuite. It aggregates sub‑commands that handle uploading results spreadsheets to cloud storage. + +#### Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Constructs and returns a Cobra command representing the `upload` sub‑command of certsuite. It aggregates sub‑commands that handle uploading results spreadsheets to cloud storage. | +| **Parameters** | None | +| **Return value** | A pointer to a configured `*cobra.Command`. The returned command contains child commands for specific upload operations (e.g., spreadsheet uploads). | +| **Key dependencies** | • Calls `upload.AddCommand(resultsspreadsheet.NewCommand())`
• Relies on the Cobra library (`github.com/spf13/cobra`) to create and manage sub‑commands. | +| **Side effects** | No global state mutation; only creates a new command hierarchy in memory. | +| **How it fits the package** | This function is the entry point for the `upload` feature, exposing it as part of the top‑level certsuite CLI. It registers its child commands so that users can run `certsuite upload ...`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start: NewCommand"] --> B{"Create root upload command"} + B --> C["Add sub-command: resultsspreadsheet.NewCommand()"] + C --> D["Return configured upload command"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCommand --> func_AddCommand + func_NewCommand --> func_resultsspreadsheet_NewCommand +``` + +#### Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + func_newRootCmd --> func_NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand to obtain the upload command. +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/upload" +) + +func main() { + uploadCmd := upload.NewCommand() + // uploadCmd can now be added to a root Cobra command or executed directly. +} +``` + +--- diff --git a/docs/cmd/certsuite/version/version.md b/docs/cmd/certsuite/version/version.md new file mode 100644 index 000000000..eec1a9b2e --- /dev/null +++ b/docs/cmd/certsuite/version/version.md @@ -0,0 +1,154 @@ +# Package version + +**Path**: `cmd/certsuite/version` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [NewCommand](#newcommand) +- [Local Functions](#local-functions) + - [showVersion](#showversion) + +## Overview + +Provides the `certsuite version` CLI subcommand that outputs the binary’s Git‑based build information and the supported claim file format version. + +### Key Features + +- Creates a pre‑configured Cobra command for displaying version details +- Utilises the external versions package to obtain the Git commit hash and claim format + +### Design Notes + +- The command is constructed via an exported factory function (`NewCommand`) rather than being defined globally +- It depends on the `github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions` package for version strings +- Users should invoke `NewCommand()` during root command initialization to register the subcommand + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewCommand() *cobra.Command](#newcommand) | Returns a pre‑configured `*cobra.Command` that implements the `certsuite version` CLI command. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func showVersion(cmd *cobra.Command, _ []string) error](#showversion) | Displays the binary’s Git‑based version string and the supported claim file format version. | + +## Exported Functions + +### NewCommand + +**NewCommand** - Returns a pre‑configured `*cobra.Command` that implements the `certsuite version` CLI command. + +#### Signature (Go) + +```go +func NewCommand() *cobra.Command +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a pre‑configured `*cobra.Command` that implements the `certsuite version` CLI command. | +| **Parameters** | None | +| **Return value** | A pointer to a `cobra.Command` instance (`versionCmd`) that is defined elsewhere in this package. | +| **Key dependencies** | *Uses the global variable `versionCmd` from the same package.
* Relies on the Cobra library for command handling. | +| **Side effects** | None – simply returns an existing object; no state changes or I/O occur within the function itself. | +| **How it fits the package** | Provides a public factory used by the root command builder (`newRootCmd`) to register the `version` sub‑command in the CLI application. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + func_NewCommand --> versionCmd +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `NewCommand` (Mermaid) + +```mermaid +graph TD + func_newRootCmd --> func_NewCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCommand +cmd := version.NewCommand() +fmt.Println(cmd.Use) // prints "version" +``` + +--- + +## Local Functions + +### showVersion + +**showVersion** - Displays the binary’s Git‑based version string and the supported claim file format version. + +Prints the current Certsuite and Claim file version information to standard output. + +```go +func showVersion(cmd *cobra.Command, _ []string) error +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Displays the binary’s Git‑based version string and the supported claim file format version. | +| **Parameters** | `cmd *cobra.Command` – command context (unused); `_ []string` – positional arguments (ignored). | +| **Return value** | `error` – always `nil`; the function is intended for side‑effects only. | +| **Key dependencies** | • `fmt.Printf` to write to stdout.
• `github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions.GitVersion()` for the build metadata.
• `github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions.ClaimFormatVersion` constant. | +| **Side effects** | Writes two formatted lines to standard output; no state mutation or concurrency. | +| **How it fits the package** | Used by the CLI’s “version” sub‑command to expose build information to users. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Print Certsuite version"} + B --> C["Call fmt.Printf with GitVersion()"] + C --> D{"Print Claim file version"} + D --> E["Call fmt.Printf with ClaimFormatVersion"] + E --> F["Return nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_showVersion --> fmt_Printf + func_showVersion --> versions_GitVersion + func_showVersion --> versions_ClaimFormatVersion +``` + +#### Functions calling `showVersion` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking showVersion +package main + +import ( + "github.com/spf13/cobra" +) + +func main() { + cmd := &cobra.Command{} + if err := showVersion(cmd, nil); err != nil { + panic(err) + } +} +``` + +--- diff --git a/docs/internal/cli/cli.md b/docs/internal/cli/cli.md new file mode 100644 index 000000000..83d8fc192 --- /dev/null +++ b/docs/internal/cli/cli.md @@ -0,0 +1,1386 @@ +# Package cli + +**Path**: `internal/cli` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) +- [Exported Functions](#exported-functions) + - [LineAlignCenter](#linealigncenter) + - [LineAlignLeft](#linealignleft) + - [LineColor](#linecolor) + - [PrintBanner](#printbanner) + - [PrintCheckAborted](#printcheckaborted) + - [PrintCheckErrored](#printcheckerrored) + - [PrintCheckFailed](#printcheckfailed) + - [PrintCheckPassed](#printcheckpassed) + - [PrintCheckRunning](#printcheckrunning) + - [PrintCheckSkipped](#printcheckskipped) + - [PrintResultsTable](#printresultstable) + - [WrapLines](#wraplines) + - [cliCheckLogSniffer.Write](#clichecklogsniffer.write) +- [Local Functions](#local-functions) + - [cropLogLine](#croplogline) + - [getTerminalWidth](#getterminalwidth) + - [isTTY](#istty) + - [printRunningCheckLine](#printrunningcheckline) + - [stopCheckLineGoroutine](#stopchecklinegoroutine) + - [updateRunningCheckLine](#updaterunningcheckline) + +## Overview + +The `cli` package supplies terminal‑centric utilities for the Certsuite tool, including a startup banner, ANSI colour handling, dynamic progress lines during check execution, and formatted output of test case summaries. + +### Key Features + +- ANSI colour helpers (e.g., LineColor) to wrap text safely in terminals +- A background goroutine that updates a single line with elapsed time and recent log messages for running checks +- Convenience functions to print the status of each check – passed, failed, skipped, aborted, or errored + +### Design Notes + +- Uses `golang.org/x/term` to detect TTY and adjust output accordingly; non‑TTY streams suppress dynamic updates +- A dedicated channel (`checkLoggerChan`) feeds log snippets to the running‑line goroutine, with a stop channel (`stopChan`) signalling termination +- The package keeps state minimal – no global mutexes – relying on Go channels for safe concurrency + +### Structs Summary + +| Name | Purpose | +|------|----------| + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func LineAlignCenter(s string, w int) string](#linealigncenter) | Produces a new string of length `w` with the input string `s` horizontally centered. Padding is added on both sides using spaces. | +| [func LineAlignLeft(s string, w int) string](#linealignleft) | Returns the input string padded with spaces on the right so that its total length equals `w`. If the string is longer than `w`, it is returned unchanged. | +| [func LineColor(s, color string) string](#linecolor) | Wraps a given string with a specified ANSI color code and appends the reset code so that subsequent terminal output is unaffected. | +| [func()()](#printbanner) | Outputs a static banner string (`banner`) to `stdout` using `fmt.Print`. | +| [func(string, string)()](#printcheckaborted) | Signals that a compliance check has been aborted and displays the check name along with an optional reason. | +| [func PrintCheckErrored(checkName string)](#printcheckerrored) | Signals that a specific check has finished with an error, stops any ongoing line‑printing goroutine, and outputs a formatted message to the console. | +| [func PrintCheckFailed(checkName string)()](#printcheckfailed) | Terminates the background line‑printing goroutine and outputs a formatted failure message for the given check. | +| [func PrintCheckPassed(checkName string)](#printcheckpassed) | Displays a “passed” status for the given check name, prefixed by a clear‑line escape code and a pass tag. It also signals any running line‑progress goroutine to terminate. | +| [func PrintCheckRunning(checkName string) ()](#printcheckrunning) | Outputs the initial “running” message for a check and launches a background goroutine that periodically updates this line with timing or log information. | +| [func PrintCheckSkipped(checkName, reason string)](#printcheckskipped) | Reports that a check was skipped and displays the skip reason. | +| [func PrintResultsTable(results map[string][]int)](#printresultstable) | Outputs a human‑readable table summarizing the number of passed, failed, and skipped checks per test suite. | +| [func WrapLines(text string, maxWidth int) []string](#wraplines) | Splits an arbitrary text block into multiple lines so that each line does not exceed `maxWidth` characters. It preserves existing line breaks and wraps long words by splitting on whitespace. | +| [func (c *cliCheckLogSniffer) Write(p []byte) (n int, err error)](#clichecklogsniffer.write) | Provides an `io.Write` implementation that forwards log messages to a channel when running in a terminal, otherwise silently discards them. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func cropLogLine(line string, maxAvailableWidth int) string](#croplogline) | Removes new‑line characters from the supplied `line` and truncates it so that its length does not exceed `maxAvailableWidth`. | +| [func() int](#getterminalwidth) | Obtains the width of the terminal window in columns, allowing other functions to format output appropriately. | +| [func isTTY() bool](#istty) | Returns true if the program’s stdin is a terminal (tty/ptty), false otherwise. | +| [func printRunningCheckLine(checkName string, startTime time.Time, logLine string) ()](#printrunningcheckline) | Renders a line to the terminal showing a running check’s name, elapsed time and, if space permits, its most recent log message. It handles both TTY (interactive) and non‑TTY environments. | +| [func stopCheckLineGoroutine()](#stopchecklinegoroutine) | Signals the goroutine that prints a moving “checking…” line to terminate and disables the channel used for this purpose. | +| [func(string, <-chan bool)()](#updaterunningcheckline) | Continuously refreshes a status line in the terminal to show elapsed time and the most recent log message for an ongoing check. | + +## Structs + +## Exported Functions + +### LineAlignCenter + +**LineAlignCenter** - Produces a new string of length `w` with the input string `s` horizontally centered. Padding is added on both sides using spaces. + +#### Signature + +```go +func LineAlignCenter(s string, w int) string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a new string of length `w` with the input string `s` horizontally centered. Padding is added on both sides using spaces. | +| **Parameters** | `s string –` text to center.
`w int –` desired total width (including padding). | +| **Return value** | A formatted string of length `w` containing `s` centered. | +| **Key dependencies** | • `fmt.Sprintf` from the standard library
• Built‑in function `len` to compute string length | +| **Side effects** | None; pure function with no state mutation or I/O. | +| **How it fits the package** | Utility used by the CLI for formatting output boxes, ensuring labels and titles appear centered in terminal tables. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Input string `s`"] --> B{"Calculate padding"} + B --> C["Center position: (w+len(s))/2"] + C --> D["Format right‑justified string with width `-w`"] + D --> E["Return padded, centered string"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_LineAlignCenter --> fmt.Sprintf + func_LineAlignCenter --> len +``` + +#### Functions calling `LineAlignCenter` + +```mermaid +graph TD + printTestCaseInfoBox --> func_LineAlignCenter +``` + +#### Usage example + +```go +// Minimal example invoking LineAlignCenter +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + centered := cli.LineAlignCenter("HELLO", 20) + fmt.Println("|" + centered + "|") // prints: | HELLO | +} +``` + +--- + +### LineAlignLeft + +**LineAlignLeft** - Returns the input string padded with spaces on the right so that its total length equals `w`. If the string is longer than `w`, it is returned unchanged. + +#### Signature (Go) + +```go +func LineAlignLeft(s string, w int) string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the input string padded with spaces on the right so that its total length equals `w`. If the string is longer than `w`, it is returned unchanged. | +| **Parameters** | `s` – the string to align.
`w` – desired width of the output. | +| **Return value** | A new string left‑aligned to width `w`. | +| **Key dependencies** | * `fmt.Sprintf` – used for formatting with a negative field width. | +| **Side effects** | None. The function is pure; it does not modify global state or perform I/O. | +| **How it fits the package** | Utility for console output in the CLI, ensuring consistent column widths when printing tables or boxes. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Input string s & width w"] --> B["Call fmt.Sprintf with %[1]*s and -w"] + B --> C["Return padded string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_LineAlignLeft --> func_Sprintf(fmt) +``` + +#### Functions calling `LineAlignLeft` (Mermaid) + +```mermaid +graph TD + func_printTestCaseInfoBox --> func_LineAlignLeft +``` + +#### Usage example (Go) + +```go +// Minimal example invoking LineAlignLeft +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + s := "Example text" + width := 20 + leftAligned := cli.LineAlignLeft(s, width) + fmt.Printf("'%s'\n", leftAligned) // Output: 'Example text ' +} +``` + +--- + +### LineColor + +**LineColor** - Wraps a given string with a specified ANSI color code and appends the reset code so that subsequent terminal output is unaffected. + +Adds an ANSI color escape sequence to a string and resets the terminal formatting afterwards. + +```go +func LineColor(s, color string) string +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Wraps a given string with a specified ANSI color code and appends the reset code so that subsequent terminal output is unaffected. | +| **Parameters** | `s` – the text to colour; `color` – an ANSI escape sequence such as `cli.Cyan` or `cli.Green`. | +| **Return value** | The coloured string: ``. | +| **Key dependencies** | Uses the package‑level constant `Reset` (the ANSI reset code). | +| **Side effects** | Pure function; no state mutation, I/O, or concurrency. | +| **How it fits the package** | Provides a lightweight helper for colouring CLI output used throughout the `cli` package. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Return"} + B --> C[" + s + Reset"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `LineColor` + +```mermaid +graph TD + func_printTestCaseInfoBox --> func_LineColor +``` + +#### Usage example (Go) + +```go +// Minimal example invoking LineColor +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + text := cli.LineColor("Hello, world!", cli.Green) + fmt.Println(text) // Prints the string in green followed by a reset +} +``` + +--- + +### PrintBanner + +**PrintBanner** - Outputs a static banner string (`banner`) to `stdout` using `fmt.Print`. + +The function displays a pre‑defined ASCII banner to standard output when the Certsuite CLI starts up. + +--- + +#### Signature (Go) + +```go +func()() +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Outputs a static banner string (`banner`) to `stdout` using `fmt.Print`. | +| **Parameters** | None. | +| **Return value** | None. | +| **Key dependencies** | • Calls `fmt.Print(banner)` from the standard library.
• Relies on the package‑level variable `banner`. | +| **Side effects** | Writes to `stdout`; no state mutation beyond I/O. | +| **How it fits the package** | Provides a visual cue when Certsuite starts, called during the `Startup` routine in the top‑level package. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph PrintBanner + A["PrintBanner"] --> B["fmt.Print(banner)"] + end +``` + +--- + +#### Function dependencies + +```mermaid +graph TD + func_PrintBanner --> fmt.Print +``` + +--- + +#### Functions calling `PrintBanner` + +```mermaid +graph TD + func_Startup --> func_PrintBanner +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking PrintBanner +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + cli.PrintBanner() +} +``` + +--- + +### PrintCheckAborted + +**PrintCheckAborted** - Signals that a compliance check has been aborted and displays the check name along with an optional reason. + +#### Signature (Go) + +```go +func(string, string)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Signals that a compliance check has been aborted and displays the check name along with an optional reason. | +| **Parameters** | `checkName string` – Identifier of the check;
`reason string` – Explanation for the abortion (may be empty). | +| **Return value** | None (void). | +| **Key dependencies** | • Calls `stopCheckLineGoroutine()` to halt any ongoing line‑printing goroutine.
• Uses `fmt.Print` to output a formatted message. | +| **Side effects** | Stops the check‑line goroutine, writes text to standard output, and may alter global channel state (`stopChan`). | +| **How it fits the package** | Part of the internal CLI utilities that report check results; invoked when a check terminates prematurely. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Stop line goroutine"} + B --> C["Call stopCheckLineGoroutine()"] + C --> D["Print aborted message"] + D --> E["fmt.Print output"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_PrintCheckAborted --> func_stopCheckLineGoroutine + func_PrintCheckAborted --> fmt.Print +``` + +#### Functions calling `PrintCheckAborted` + +```mermaid +graph TD + printCheckResult --> PrintCheckAborted +``` + +#### Usage example (Go) + +```go +// Minimal example invoking PrintCheckAborted +func main() { + cli.PrintCheckAborted("example-check", "timeout reached") +} +``` + +--- + +### PrintCheckErrored + +**PrintCheckErrored** - Signals that a specific check has finished with an error, stops any ongoing line‑printing goroutine, and outputs a formatted message to the console. + +#### Signature (Go) + +```go +func PrintCheckErrored(checkName string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Signals that a specific check has finished with an error, stops any ongoing line‑printing goroutine, and outputs a formatted message to the console. | +| **Parameters** | `checkName` string – Identifier of the check that failed. | +| **Return value** | None | +| **Key dependencies** | • Calls `stopCheckLineGoroutine()` to terminate any spinner or progress line.
• Uses `fmt.Print` (via `Print`) to write the error message. | +| **Side effects** | • Stops a goroutine that updates the console line.
• Writes an error status line to standard output. | +| **How it fits the package** | Part of the CLI helper set for displaying check results; called by the checks database when a check ends in `CheckResultError`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["PrintCheckErrored"] --> B["stopCheckLineGoroutine"] + A --> C["fmt.Print(ClearLineCode + ' [ ' + CheckResultTagError + ' ] ' + checkName + '\\n')"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_PrintCheckErrored --> func_stopCheckLineGoroutine + func_PrintCheckErrored --> fmt.Print +``` + +#### Functions calling `PrintCheckErrored` (Mermaid) + +```mermaid +graph TD + pkg_checksdb.printCheckResult --> func_PrintCheckErrored +``` + +#### Usage example (Go) + +```go +// Minimal example invoking PrintCheckErrored +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + cli.PrintCheckErrored("example-check") +} +``` + +--- + +### PrintCheckFailed + +**PrintCheckFailed** - Terminates the background line‑printing goroutine and outputs a formatted failure message for the given check. + +#### Signature (Go) + +```go +func PrintCheckFailed(checkName string)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Terminates the background line‑printing goroutine and outputs a formatted failure message for the given check. | +| **Parameters** | `checkName` – the identifier of the failed check (string). | +| **Return value** | None (`void`). | +| **Key dependencies** | • `stopCheckLineGoroutine()` – stops the auxiliary goroutine that updates the progress line.
• `fmt.Print()` – writes the final failure message to standard output. | +| **Side effects** | • Sends a stop signal via `stopChan` and clears it.
• Prints a new line containing `[ ✘ ] ` with ANSI clear‑line control codes. | +| **How it fits the package** | Part of the CLI printing utilities; invoked by the checks database layer when a check result is marked as failed. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["PrintCheckFailed"] --> B{"Stop goroutine"} + B --> C["stopCheckLineGoroutine()"] + A --> D["Print failure line"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_PrintCheckFailed --> func_stopCheckLineGoroutine + func_PrintCheckFailed --> fmt_Print +``` + +#### Functions calling `PrintCheckFailed` (Mermaid) + +```mermaid +graph TD + func_printCheckResult --> func_PrintCheckFailed +``` + +#### Usage example (Go) + +```go +// Minimal example invoking PrintCheckFailed +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + cli.PrintCheckFailed("KubeletRunning") +} +``` + +--- + +### PrintCheckPassed + +**PrintCheckPassed** - Displays a “passed” status for the given check name, prefixed by a clear‑line escape code and a pass tag. It also signals any running line‑progress goroutine to terminate. + +Prints a formatted message indicating that a check has passed and stops the line‑progress goroutine. + +--- + +#### Signature (Go) + +```go +func PrintCheckPassed(checkName string) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Displays a “passed” status for the given check name, prefixed by a clear‑line escape code and a pass tag. It also signals any running line‑progress goroutine to terminate. | +| **Parameters** | `checkName string` – Identifier of the check that has passed. | +| **Return value** | None. | +| **Key dependencies** | • `stopCheckLineGoroutine()` – stops the background progress indicator.
• `fmt.Print` – writes the formatted output to stdout. | +| **Side effects** | • Sends a stop signal on the `stopChan` channel, then clears it.
• Prints to standard output (console). | +| **How it fits the package** | Part of the internal CLI utilities that render check results; invoked by higher‑level logic when a check finishes successfully. | + +--- + +#### Internal workflow + +```mermaid +flowchart TD + A["PrintCheckPassed"] --> B{"stopCheckLineGoroutine()"} + B --> C["Send true on stopChan"] + C --> D["Set stopChan = nil"] + A --> E["fmt.Print(formatted message)"] +``` + +--- + +#### Function dependencies + +```mermaid +graph TD + func_PrintCheckPassed --> func_stopCheckLineGoroutine + func_PrintCheckPassed --> fmt_Print +``` + +--- + +#### Functions calling `PrintCheckPassed` + +```mermaid +graph TD + func_printCheckResult --> func_PrintCheckPassed +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking PrintCheckPassed +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + cli.PrintCheckPassed("example-check-id") +} +``` + +--- + +### PrintCheckRunning + +**PrintCheckRunning** - Outputs the initial “running” message for a check and launches a background goroutine that periodically updates this line with timing or log information. + +#### Signature (Go) + +```go +func PrintCheckRunning(checkName string) () +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Outputs the initial “running” message for a check and launches a background goroutine that periodically updates this line with timing or log information. | +| **Parameters** | `checkName string` – Identifier of the check to display. | +| **Return value** | None (void). | +| **Key dependencies** | • `make` – creates two channels (`stopChan`, `checkLoggerChan`).
• `isTTY()` – determines if output is a terminal.
• `fmt.Print` – writes the initial line to stdout.
• `updateRunningCheckLine` – background goroutine that refreshes the running line. | +| **Side effects** | • Creates global channels (`stopChan`, `checkLoggerChan`).
• Writes directly to standard output.
• Spawns a goroutine that may run until signaled via `stopChan`. | +| **How it fits the package** | Part of the CLI helper functions that provide user‑facing status updates during check execution. It is invoked by `Check.Run` before the actual check logic begins. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start PrintCheckRunning"] --> B{"Create Channels"} + B --> C["Build line string"] + C --> D{"TTY?"} + D -- yes --> E["Print line"] + D -- no --> F["Append newline, Print line"] + E --> G["Launch updateRunningCheckLine goroutine"] + F --> G +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_PrintCheckRunning --> make + func_PrintCheckRunning --> isTTY + func_PrintCheckRunning --> fmt.Print + func_PrintCheckRunning --> func_updateRunningCheckLine +``` + +#### Functions calling `PrintCheckRunning` (Mermaid) + +```mermaid +graph TD + func_Check_Run --> func_PrintCheckRunning +``` + +#### Usage example (Go) + +```go +// Minimal example invoking PrintCheckRunning +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + cli.PrintCheckRunning("example-check") + // The function returns immediately; the status line will be updated by a goroutine. +} +``` + +--- + +### PrintCheckSkipped + +**PrintCheckSkipped** - Reports that a check was skipped and displays the skip reason. + +Prints a message indicating that a check has been skipped along with the reason. + +--- + +#### Signature (Go) + +```go +func PrintCheckSkipped(checkName, reason string) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reports that a check was skipped and displays the skip reason. | +| **Parameters** | `checkName` – identifier of the check; `reason` – explanation for skipping. | +| **Return value** | None (void). | +| **Key dependencies** | • Calls `stopCheckLineGoroutine()` to terminate any line‑printing goroutine.
• Uses `fmt.Print` with ANSI escape codes (`ClearLineCode`, `CheckResultTagSkip`). | +| **Side effects** | • Emits output to standard out.
• Stops a goroutine that prints progress lines. | +| **How it fits the package** | Part of the CLI helper set; invoked by higher‑level check reporting logic when a check is skipped. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Check skip reason?"} + B --> C["Stop line goroutine"] + C --> D["Print SKIP + checkName + (reason)"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_PrintCheckSkipped --> func_stopCheckLineGoroutine + func_PrintCheckSkipped --> fmt.Print +``` + +--- + +#### Functions calling `PrintCheckSkipped` (Mermaid) + +```mermaid +graph TD + func_printCheckResult --> func_PrintCheckSkipped +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking PrintCheckSkipped +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + cli.PrintCheckSkipped("check-123", "no relevant objects found") +} +``` + +--- + +### PrintResultsTable + +**PrintResultsTable** - Outputs a human‑readable table summarizing the number of passed, failed, and skipped checks per test suite. + +#### Signature (Go) + +```go +func PrintResultsTable(results map[string][]int) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Outputs a human‑readable table summarizing the number of passed, failed, and skipped checks per test suite. | +| **Parameters** | `results` – mapping from suite name to a slice containing `[passed, failed, skipped]` counts. | +| **Return value** | None (void). | +| **Key dependencies** | *`fmt.Printf` – for formatted lines.
* `fmt.Println` – for separator lines. | +| **Side effects** | Writes directly to standard output; no state changes within the package. | +| **How it fits the package** | Used by the CLI layer after all checks have run to present a concise results table to the user. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Print empty line"] + B --> C["Print header separator"] + C --> D["Print column titles"] + D --> E["Print header separator"] + E --> F{"Iterate over results"} + F --> G["Print suite row"] + G --> H["Print separator"] + H --> I["End of loop"] + I --> J["Print empty line"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_PrintResultsTable --> fmt.Printf + func_PrintResultsTable --> fmt.Println +``` + +#### Functions calling `PrintResultsTable` (Mermaid) + +```mermaid +graph TD + func_RunChecks --> func_PrintResultsTable +``` + +#### Usage example (Go) + +```go +// Minimal example invoking PrintResultsTable +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + // Example result set: suite → [passed, failed, skipped] + results := map[string][]int{ + "Network Checks": {12, 1, 0}, + "Security Checks": {9, 3, 2}, + } + + cli.PrintResultsTable(results) +} +``` + +--- + +### WrapLines + +**WrapLines** - Splits an arbitrary text block into multiple lines so that each line does not exceed `maxWidth` characters. It preserves existing line breaks and wraps long words by splitting on whitespace. + +#### Signature (Go) + +```go +func WrapLines(text string, maxWidth int) []string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Splits an arbitrary text block into multiple lines so that each line does not exceed `maxWidth` characters. It preserves existing line breaks and wraps long words by splitting on whitespace. | +| **Parameters** | `text string – the raw text to wrap`
`maxWidth int – maximum allowed width per line` | +| **Return value** | `[]string – slice of wrapped lines` | +| **Key dependencies** | • `strings.Split`
• `make`
• `len` (multiple calls)
• `append` (multiple calls)
• `strings.Fields` | +| **Side effects** | None. The function is pure; it only reads its arguments and returns a new slice. | +| **How it fits the package** | Used by CLI utilities to format long descriptions, remediation steps, or other multiline strings for display in a terminal table. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Split input on `\n`"} + B --> C{"For each line"} + C --> D{"Line length ≤ maxWidth?"} + D -- Yes --> E["Append original line to result"] + D -- No --> F{"Tokenize with Fields()"} + F --> G{"Iterate words"} + G --> H{"Current line + word + space ≤ maxWidth?"} + H -- Yes --> I["Add word to currentLine"] + H -- No --> J["Append currentLine, start new line with word"] + J --> G + G --> K["After loop: append last currentLine"] + K --> C + E --> C + C --> L["Return result"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_WrapLines --> strings.Split + func_WrapLines --> make + func_WrapLines --> len + func_WrapLines --> append + func_WrapLines --> strings.Fields +``` + +#### Functions calling `WrapLines` (Mermaid) + +```mermaid +graph TD + func_printTestCaseInfoBox --> func_WrapLines +``` + +#### Usage example (Go) + +```go +// Minimal example invoking WrapLines +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + text := "This is a long line that will be wrapped into multiple lines of maximum width." + lines := cli.WrapLines(text, 20) + for _, l := range lines { + fmt.Println(l) + } +} +``` + +--- + +### cliCheckLogSniffer.Write + +**Write** - Provides an `io.Write` implementation that forwards log messages to a channel when running in a terminal, otherwise silently discards them. + +#### Signature (Go) + +```go +func (c *cliCheckLogSniffer) Write(p []byte) (n int, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides an `io.Write` implementation that forwards log messages to a channel when running in a terminal, otherwise silently discards them. | +| **Parameters** | `p []byte – the data to write (log message)` | +| **Return value** | `n int – number of bytes processed (always len(p))
err error – always nil` | +| **Key dependencies** | • `isTTY()` – checks terminal status
• `len(p)` – byte length calculation
• `string(p)` – conversion to string for channel transmission | +| **Side effects** | Writes a string representation of `p` into the global `checkLoggerChan` if it is ready; otherwise does nothing. Does not block the caller. No state changes on `c`. | +| **How it fits the package** | Enables the CLI component to capture and forward log output from the `slog` logger to a dedicated channel used by the checks subsystem, while avoiding I/O overhead in non‑TTY environments. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph CheckWriter["cliCheckLogSniffer.Write"] + A["Start"] --> B{"isTTY?"} + B -- No --> C["Return len(p), nil"] + B -- Yes --> D["Attempt to send string(p) on checkLoggerChan (non‑blocking)"] + D --> E["Return len(p), nil"] + end +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_cliCheckLogSniffer.Write --> func_isTTY + func_cliCheckLogSniffer.Write --> func_len + func_cliCheckLogSniffer.Write --> func_string +``` + +#### Functions calling `cliCheckLogSniffer.Write` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking cliCheckLogSniffer.Write +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + var sniffer cli.CliCheckLogSniffer // assume exported constructor or zero value + data := []byte("example log message") + n, err := sniffer.Write(data) + if err != nil { + fmt.Println("write error:", err) + } + fmt.Printf("Wrote %d bytes\n", n) +} +``` + +--- + +## Local Functions + +### cropLogLine + +**cropLogLine** - Removes new‑line characters from the supplied `line` and truncates it so that its length does not exceed `maxAvailableWidth`. + +#### Signature (Go) + +```go +func cropLogLine(line string, maxAvailableWidth int) string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Removes new‑line characters from the supplied `line` and truncates it so that its length does not exceed `maxAvailableWidth`. | +| **Parameters** | `line` (string) – raw log output;
`maxAvailableWidth` (int) – maximum number of printable characters allowed. | +| **Return value** | A sanitized string that fits within the specified width. | +| **Key dependencies** | • `strings.ReplaceAll` (to strip line feeds)
• Built‑in `len` function | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by `printRunningCheckLine` to display a concise, single‑line log fragment when terminal width is limited. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A(Start) --> B{"Remove newlines"} + B --> C["filteredLine := strings.ReplaceAll(line,\\n, )"] + C --> D{"Length check"} + D -->|"len(filteredLine)>maxAvailableWidth"| E["Return first maxAvailableWidth chars"] + D -->|"else"| F["Return filteredLine"] + E --> G(End) + F --> G +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_cropLogLine --> func_ReplaceAll + func_cropLogLine --> func_len +``` + +#### Functions calling `cropLogLine` (Mermaid) + +```mermaid +graph TD + func_printRunningCheckLine --> func_cropLogLine +``` + +#### Usage example (Go) + +```go +// Minimal example invoking cropLogLine +package main + +import ( + "fmt" +) + +func main() { + log := "This is a very long log line that might not fit." + cropped := cropLogLine(log, 30) + fmt.Println(cropped) // Output: "This is a very long log li" +} +``` + +--- + +### getTerminalWidth + +**getTerminalWidth** - Obtains the width of the terminal window in columns, allowing other functions to format output appropriately. + +#### Signature (Go) + +```go +func() int +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Obtains the width of the terminal window in columns, allowing other functions to format output appropriately. | +| **Parameters** | None | +| **Return value** | `int` – number of columns available in the current terminal; returns zero if an error occurs during size retrieval (implicitly handled by `term.GetSize`). | +| **Key dependencies** | • `golang.org/x/term.GetSize(int)`
• `os.Stdin.Fd()`
• Type conversion to `int` | +| **Side effects** | No state mutation; performs a system call to query terminal size. | +| **How it fits the package** | Used by CLI formatting helpers (e.g., `printRunningCheckLine`) to adapt output width when running in an interactive terminal. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Retrieve FD"} + B --> C["Convert FD to int"] + C --> D["Call term.GetSize(width, height)"] + D --> E["Return width"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_getTerminalWidth --> golang.org/x/term.GetSize + func_getTerminalWidth --> int + func_getTerminalWidth --> os.Stdin.Fd +``` + +#### Functions calling `getTerminalWidth` + +```mermaid +graph TD + func_printRunningCheckLine --> func_getTerminalWidth +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getTerminalWidth +func main() { + width := getTerminalWidth() + fmt.Printf("Current terminal width: %d columns\n", width) +} +``` + +--- + +### isTTY + +**isTTY** - Returns true if the program’s stdin is a terminal (tty/ptty), false otherwise. + +Detects whether the current process’s standard input is attached to a terminal. + +```go +func isTTY() bool +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns true if the program’s stdin is a terminal (tty/ptty), false otherwise. | +| **Parameters** | None | +| **Return value** | `bool` – `true` when stdin is a terminal, `false` otherwise. | +| **Key dependencies** | *`golang.org/x/term.IsTerminal`* – checks terminal status.
*`os.Stdin.Fd()`* – obtains file descriptor of stdin.
*`int` conversion* – converts the file descriptor to an integer for the API call. | +| **Side effects** | None – purely introspective. | +| **How it fits the package** | Used by various CLI helpers to adjust output formatting (e.g., line clearing, progress updates) when running in a terminal versus non‑terminal environments. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Call os.Stdin.Fd()"] --> B["int conversion"] + B --> C["term.IsTerminal(int(os.Stdin.Fd()))"] + C --> D["Return bool"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_isTTY --> term_IsTerminal + func_isTTY --> int_conversion + func_isTTY --> os_Stdin_Fd +``` + +#### Functions calling `isTTY` + +```mermaid +graph TD + PrintCheckRunning --> isTTY + cliCheckLogSniffer_Write --> isTTY + printRunningCheckLine --> isTTY + updateRunningCheckLine --> isTTY +``` + +#### Usage example (Go) + +```go +// Minimal example invoking isTTY +package main + +import ( + "fmt" +) + +func main() { + if isTTY() { + fmt.Println("Running in a terminal") + } else { + fmt.Println("Not running in a terminal") + } +} +``` + +--- + +### printRunningCheckLine + +**printRunningCheckLine** - Renders a line to the terminal showing a running check’s name, elapsed time and, if space permits, its most recent log message. It handles both TTY (interactive) and non‑TTY environments. + +```go +func printRunningCheckLine(checkName string, startTime time.Time, logLine string) () +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Renders a line to the terminal showing a running check’s name, elapsed time and, if space permits, its most recent log message. It handles both TTY (interactive) and non‑TTY environments. | +| **Parameters** | `checkName string` – Identifier of the check.
`startTime time.Time` – Time when the check started.
`logLine string` – Latest log entry produced by the check. | +| **Return value** | None (`()`); the function writes directly to standard output. | +| **Key dependencies** | • `time.Since`, `Round(time.Second)`
• `isTTY()`, `getTerminalWidth()`
• `cropLogLine(line, width)`
• `fmt.Print` and terminal control codes (`ClearLineCode`) | +| **Side effects** | Prints to `os.Stdout`; may clear the current line before printing. Does not modify any external state. | +| **How it fits the package** | Used by `updateRunningCheckLine` to refresh the CLI display while a check is executing, providing real‑time feedback to the user. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"isTTY?"} + B -- No --> C["Print line + newline"] + B -- Yes --> D["Compute elapsedTime"] + D --> E["Build base line"] + E --> F{"logLine non‑empty & space available?"} + F -- No --> G["Print cleared line with base"] + F -- Yes --> H["Crop logLine"] + H --> I["Append cropped logLine to base"] + I --> G +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_printRunningCheckLine --> Round + func_printRunningCheckLine --> time.Since + func_printRunningCheckLine --> fmt.Print + func_printRunningCheckLine --> isTTY + func_printRunningCheckLine --> getTerminalWidth + func_printRunningCheckLine --> len + func_printRunningCheckLine --> cropLogLine +``` + +#### Functions calling `printRunningCheckLine` (Mermaid) + +```mermaid +graph TD + updateRunningCheckLine --> printRunningCheckLine +``` + +#### Usage example (Go) + +```go +// Minimal example invoking printRunningCheckLine +package main + +import ( + "time" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/cli" +) + +func main() { + start := time.Now() + checkName := "ValidateCertificates" + logMsg := "Checking certificate chain..." + cli.printRunningCheckLine(checkName, start, logMsg) +} +``` + +--- + +### stopCheckLineGoroutine + +**stopCheckLineGoroutine** - Signals the goroutine that prints a moving “checking…” line to terminate and disables the channel used for this purpose. + +#### Signature (Go) + +```go +func stopCheckLineGoroutine() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Signals the goroutine that prints a moving “checking…” line to terminate and disables the channel used for this purpose. | +| **Parameters** | *none* | +| **Return value** | *none* | +| **Key dependencies** | - Global variable `stopChan` (a `chan bool`)
- Standard Go channel operations | +| **Side effects** | Sends a `true` value on `stopChan`, then sets the global to `nil`, preventing further sends and allowing the goroutine to exit. | +| **How it fits the package** | Invoked by various `PrintCheck*` helper functions to cleanly stop the animated line when a check finishes or is aborted. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"stopChan == nil?"} + B -- Yes --> C["Return"] + B -- No --> D["Send true on stopChan"] + D --> E["Set stopChan = nil"] + E --> F["End"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_stopCheckLineGoroutine +``` + +#### Functions calling `stopCheckLineGoroutine` (Mermaid) + +```mermaid +graph TD + func_PrintCheckAborted --> func_stopCheckLineGoroutine + func_PrintCheckErrored --> func_stopCheckLineGoroutine + func_PrintCheckFailed --> func_stopCheckLineGoroutine + func_PrintCheckPassed --> func_stopCheckLineGoroutine + func_PrintCheckSkipped --> func_stopCheckLineGoroutine +``` + +#### Usage example (Go) + +```go +// The function is unexported; it is called internally by the CLI package. +// A direct call would look like this if we were inside the same package: + +func example() { + // ... some check logic ... + stopCheckLineGoroutine() // stops the progress line goroutine +} +``` + +--- + +### updateRunningCheckLine + +**updateRunningCheckLine** - Continuously refreshes a status line in the terminal to show elapsed time and the most recent log message for an ongoing check. + +#### Signature (Go) + +```go +func(string, <-chan bool)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Continuously refreshes a status line in the terminal to show elapsed time and the most recent log message for an ongoing check. | +| **Parameters** | `checkName string` – name of the check being executed.
`stopChan <-chan bool` – channel used to signal termination of the update loop. | +| **Return value** | None (the function blocks until a stop signal is received). | +| **Key dependencies** | • `time.Now()`
• `isTTY()`
• `time.NewTicker()`
• `printRunningCheckLine(checkName, startTime, lastCheckLogLine)`
• Global channel `checkLoggerChan` (receives log lines) | +| **Side effects** | • Writes to standard output via `fmt.Print` inside `printRunningCheckLine`.
• Stops the ticker and exits on receiving a value from `stopChan`. | +| **How it fits the package** | Used by `PrintCheckRunning` to provide live feedback during long-running checks. It keeps the terminal line updated without cluttering output when not in a TTY. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + start["Start"] --> init{"Initialize"} + init --> timer["Ticker created"] + loop{"Select"} + loop --> case1{"Timer tick"} + case1 --> update1["printRunningCheckLine with last line"] + loop --> case2{"New log line"} + case2 --> update2["Update lastCheckLogLine"] + update2 --> update1 + loop --> case3{"Stop signal"} + case3 --> stop["Stop ticker & return"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_updateRunningCheckLine --> time.Now + func_updateRunningCheckLine --> isTTY + func_updateRunningCheckLine --> time.NewTicker + func_updateRunningCheckLine --> printRunningCheckLine + func_updateRunningCheckLine --> checkLoggerChan +``` + +#### Functions calling `updateRunningCheckLine` (Mermaid) + +```mermaid +graph TD + PrintCheckRunning --> func_updateRunningCheckLine +``` + +#### Usage example (Go) + +```go +// Minimal example invoking updateRunningCheckLine +func main() { + stop := make(chan bool) + + // Start the updater in a goroutine + go updateRunningCheckLine("Example Check", stop) + + // Simulate some work and send log lines + time.Sleep(3 * time.Second) + checkLoggerChan <- "Still working..." + + // Finish after 5 seconds + time.Sleep(2 * time.Second) + stop <- true +} +``` + +--- diff --git a/docs/internal/clientsholder/clientsholder.md b/docs/internal/clientsholder/clientsholder.md new file mode 100644 index 000000000..e48ce0ace --- /dev/null +++ b/docs/internal/clientsholder/clientsholder.md @@ -0,0 +1,1487 @@ +# Package clientsholder + +**Path**: `internal/clientsholder` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [ClientsHolder](#clientsholder) + - [CommandMock](#commandmock) + - [Context](#context) +- [Interfaces](#interfaces) + - [Command](#command) +- [Exported Functions](#exported-functions) + - [ClearTestClientsHolder](#cleartestclientsholder) + - [ClientsHolder.ExecCommandContainer](#clientsholder.execcommandcontainer) + - [CommandMock.ExecCommandContainer](#commandmock.execcommandcontainer) + - [CommandMock.ExecCommandContainerCalls](#commandmock.execcommandcontainercalls) + - [Context.GetContainerName](#context.getcontainername) + - [Context.GetNamespace](#context.getnamespace) + - [Context.GetPodName](#context.getpodname) + - [GetClientConfigFromRestConfig](#getclientconfigfromrestconfig) + - [GetClientsHolder](#getclientsholder) + - [GetNewClientsHolder](#getnewclientsholder) + - [GetTestClientsHolder](#gettestclientsholder) + - [NewContext](#newcontext) + - [SetTestClientGroupResources](#settestclientgroupresources) + - [SetTestK8sClientsHolder](#settestk8sclientsholder) + - [SetTestK8sDynamicClientsHolder](#settestk8sdynamicclientsholder) + - [SetupFakeOlmClient](#setupfakeolmclient) +- [Local Functions](#local-functions) + - [createByteArrayKubeConfig](#createbytearraykubeconfig) + - [getClusterRestConfig](#getclusterrestconfig) + - [newClientsHolder](#newclientsholder) + +## Overview + +Provides a central holder for all Kubernetes and OpenShift client interfaces, utilities for executing commands inside pod containers, and helpers to create fake clients for unit testing. + +### Key Features + +- Aggregates dynamic, core, API‑extension, OLM, and networking clients into a single struct that can be lazily initialized from kubeconfig or in‑cluster config +- Executes shell commands inside a specified container via the Kubernetes remote‑command API, returning stdout and error streams +- Offers functions to construct fully populated fake client sets for isolated testing of client interactions + +### Design Notes + +- ClientsHolder is lazily instantiated; GetClientsHolder creates it on first call and caches it for reuse +- Thread safety is ensured with mutexes around command mocks and holder initialization; however, the holder itself is not concurrent‑write safe after creation +- When running tests, SetTestK8sClientsHolder or NewTestClientsHolder should be used to inject fake clients; real cluster interactions are avoided + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**ClientsHolder**](#clientsholder) | Holds all Kubernetes client interfaces and related configuration | +| [**CommandMock**](#commandmock) | One-line purpose | +| [**Context**](#context) | One-line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func ClearTestClientsHolder()](#cleartestclientsholder) | Resets the global clients holder by clearing its Kubernetes client reference and marking it as not ready. | +| [func (clientsholder *ClientsHolder) ExecCommandContainer(ctx Context, command string) (stdout, stderr string, err error)](#clientsholder.execcommandcontainer) | Runs a shell command inside the container identified by `ctx` and captures its standard output and error streams. | +| [func (mock *CommandMock) ExecCommandContainer(context Context, s string) (string, string, error)](#commandmock.execcommandcontainer) | Stores invocation details of `ExecCommandContainer` and delegates execution to the supplied mock function. | +| [func (mock *CommandMock) ExecCommandContainerCalls() []struct { Context Context S string }](#commandmock.execcommandcontainercalls) | Returns a slice containing all recorded calls to `ExecCommandContainer` made on the mock instance. Each entry holds the execution context and the command string supplied during that call. | +| [func (c *Context) GetContainerName() string](#context.getcontainername) | Returns the name of the container associated with the current execution context. This value is used when executing commands inside a pod via Kubernetes API calls. | +| [func (c *Context) GetNamespace() string](#context.getnamespace) | Returns the Kubernetes namespace associated with the receiver `Context`. This value is used by other client‑side functions to target API calls within a specific namespace. | +| [func (c *Context) GetPodName() string](#context.getpodname) | Returns the pod name associated with the context. | +| [func GetClientConfigFromRestConfig(restConfig *rest.Config) *clientcmdapi.Config](#getclientconfigfromrestconfig) | Builds a `clientcmdapi.Config` object that represents a kubeconfig file, derived from an existing `*rest.Config`. This allows code that expects a kubeconfig to use the in‑cluster configuration. | +| [func GetClientsHolder(filenames ...string) *ClientsHolder](#getclientsholder) | Returns a global, lazily‑initialized `*ClientsHolder` that aggregates Kubernetes API clients. If the holder is not yet ready, it is created via `newClientsHolder`. | +| [func GetNewClientsHolder(kubeconfigFile string) *ClientsHolder](#getnewclientsholder) | Instantiates and returns a `*ClientsHolder` populated with Kubernetes clients based on the supplied kubeconfig file. If creation fails, it terminates the process via logging. | +| [func GetTestClientsHolder(k8sMockObjects []runtime.Object) *ClientsHolder](#gettestclientsholder) | Builds a `ClientsHolder` populated with fake Kubernetes clients that expose only the supplied runtime objects, facilitating isolated unit testing. | +| [func NewContext(namespace, podName, containerName string) Context](#newcontext) | Builds and returns a `Context` value that encapsulates the namespace, pod name, and container name needed for subsequent command executions against a probe pod. | +| [func SetTestClientGroupResources(groupResources []*metav1.APIResourceList)](#settestclientgroupresources) | Stores a slice of API resource lists into the package’s client holder, enabling test clients to reference available Kubernetes group resources. | +| [func SetTestK8sClientsHolder(kubernetes.Interface)](#settestk8sclientsholder) | Stores the supplied Kubernetes client in the package‑wide `clientsHolder` and marks it as ready, enabling test code to use a fake or mock client. | +| [func SetTestK8sDynamicClientsHolder(dynamicClient dynamic.Interface)](#settestk8sdynamicclientsholder) | Sets the internal `DynamicClient` of the global `clientsHolder` to a supplied test client and marks it as ready. | +| [func SetupFakeOlmClient(olmMockObjects []runtime.Object)](#setupfakeolmclient) | Replaces the real Operator‑Lifecycle‑Manager client in `clientsHolder` with a fake client that serves the supplied mock objects, enabling unit tests to exercise OLM interactions without a live cluster. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func createByteArrayKubeConfig(kubeConfig *clientcmdapi.Config) ([]byte, error)](#createbytearraykubeconfig) | Serialises a `*clientcmdapi.Config` into YAML‑encoded bytes for use as an in‑memory kubeconfig. | +| [func getClusterRestConfig(filenames ...string) (*rest.Config, error)](#getclusterrestconfig) | Determines the appropriate `*rest.Config` for connecting to a Kubernetes cluster. If running inside a pod it uses the in‑cluster service account; otherwise it merges one or more kubeconfig files supplied via `filenames`. The function also generates an in‑memory byte slice of the resulting kubeconfig for downstream use. | +| [func newClientsHolder(filenames ...string) (*ClientsHolder, error)](#newclientsholder) | Builds a `ClientsHolder` struct that contains all required Kubernetes and OpenShift clientsets, discovery data, scaling utilities, and networking clients. It determines whether the code is running inside or outside a cluster by obtaining an appropriate `rest.Config`. | + +## Structs + +### ClientsHolder + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `RestConfig` | `*rest.Config` | REST configuration used by all clients; includes server URL, authentication, and timeout. | +| `DynamicClient` | `dynamic.Interface` | Client for dynamic/unstructured Kubernetes resources. | +| `ScalingClient` | `scale.ScalesGetter` | Client to retrieve scale subresources (e.g., HorizontalPodAutoscaler). | +| `APIExtClient` | `apiextv1.Interface` | Client for CustomResourceDefinition and other API extensions. | +| `OlmClient` | `olmClient.Interface` | Operator Lifecycle Manager client for managing operators. | +| `OlmPkgClient` | `olmpkgclient.PackagesV1Interface` | OLM package client for handling operator packages. | +| `OcpClient` | `clientconfigv1.ConfigV1Interface` | OpenShift configuration client (e.g., config.openshift.io). | +| `K8sClient` | `kubernetes.Interface` | Core Kubernetes client covering all standard APIs. | +| `K8sNetworkingClient` | `networkingv1.NetworkingV1Interface` | Client for core networking resources (Services, Ingresses, etc.). | +| `CNCFNetworkingClient` | `cncfNetworkAttachmentv1.Interface` | Client for CNCF NetworkAttachmentDefinition CRD. | +| `DiscoveryClient` | `discovery.DiscoveryInterface` | Client to discover API groups and resources in the cluster. | +| `MachineCfg` | `ocpMachine.Interface` | OpenShift machine configuration client. | +| `KubeConfig` | `[]byte` | Raw kubeconfig file contents; used when constructing clients from a config file. | +| `ready` | `bool` | Indicates whether the holder has been fully initialized. | +| `GroupResources` | `[]*metav1.APIResourceList` | Cached list of all API resources known to the cluster. | +| `ApiserverClient` | `apiserverscheme.Interface` | Client for API server scheme registration (used by OLM). | + +#### Purpose + +`ClientsHolder` aggregates a collection of Kubernetes and OpenShift client interfaces, along with configuration data such as `RestConfig`, `KubeConfig`, and discovery information. It serves as a central access point for performing operations across multiple APIs—ranging from core Kubernetes objects to custom resources and operator lifecycle management—within the CertSuite testing framework. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `ExecCommandContainer` | Executes an arbitrary shell command inside a specified pod container using the holder’s `K8sClient`. | +| `GetClientsHolder` | Returns a singleton `ClientsHolder`; creates it from kubeconfig files if not already ready. | +| `GetNewClientsHolder` | Creates a fresh `ClientsHolder` based on a given kubeconfig file path, logging fatal errors on failure. | +| `GetTestClientsHolder` | Builds a mock `ClientsHolder` populated with fake clients for unit testing, filtering supplied objects by type. | +| `newClientsHolder` | Internal constructor that initializes all client interfaces from the provided kubeconfig files and sets up discovery and scaling support. | + +--- + +--- + +### CommandMock + +| Field | Type | Description | +|-------|------|-------------| +| `ExecCommandContainerFunc` | `func(context Context, s string) (string, string, error)` | Function that will be called when the mock’s `ExecCommandContainer` method is invoked. It allows tests to supply custom behavior for this method. | +| `calls` | `struct { ExecCommandContainer []struct{Context Context; S string} }` | Internal storage of every call made to `ExecCommandContainer`. Each entry records the arguments supplied during that call. | +| `lockExecCommandContainer` | `sync.RWMutex` | Read‑write mutex guarding concurrent access to `calls.ExecCommandContainer`. | + +#### Purpose + +`CommandMock` provides a test double for the `Command` interface used in the client holder package. By assigning a custom function to `ExecCommandContainerFunc`, tests can simulate various responses from the underlying command execution logic without invoking real commands. The struct also records each invocation so that tests can assert call counts and argument values via helper methods. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `ExecCommandContainer(context Context, s string) (string, string, error)` | Implements the mocked method; it stores the call details and forwards execution to `ExecCommandContainerFunc`. | +| `ExecCommandContainerCalls() []struct{Context Context; S string}` | Returns a snapshot of all recorded calls to `ExecCommandContainer`, enabling assertions on call history. | + +--- + +### Context + +Represents the execution context for a command that runs inside a Kubernetes pod container: the target namespace, pod name, and container name. + +| Field | Type | Description | +|-------|------|-------------| +| `namespace` | `string` | Namespace in which the pod resides. Must be a valid K8s namespace identifier. | +| `podName` | `string` | Name of the pod to target. Must match an existing pod within the specified namespace. | +| `containerName` | `string` | Name of the container inside the pod where the command will execute. | + +#### Purpose + +The `Context` struct encapsulates all information required by client code to locate a specific container in Kubernetes and perform operations such as executing shell commands. It is created via `NewContext(namespace, podName, containerName string)` and accessed through its getter methods (`GetNamespace`, `GetPodName`, `GetContainerName`). The struct is passed to functions that interact with the Kubernetes API (e.g., `ClientsHolder.ExecCommandContainer`) or used in mock implementations for testing. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NewContext(namespace, podName, containerName string) Context` | Constructs a new `Context` instance. | +| `GetNamespace() string` | Returns the namespace stored in the context. | +| `GetPodName() string` | Returns the pod name stored in the context. | +| `GetContainerName() string` | Returns the container name stored in the context. | +| `ClientsHolder.ExecCommandContainer(ctx Context, command string)` | Executes a shell command inside the container specified by `ctx`. | +| `CommandMock.ExecCommandContainer(context Context, s string)` | Mocked implementation for testing that records calls with the provided context. | + +--- + +--- + +## Interfaces + +### Command + + +**Purpose**: go:generate moq -out command_moq.go . Command + +**Methods**: + +| Method | Description | +|--------|--------------| +| `ExecCommandContainer` | Method documentation | + +--- + +## Exported Functions + +### ClearTestClientsHolder + +**ClearTestClientsHolder** - Resets the global clients holder by clearing its Kubernetes client reference and marking it as not ready. + +#### 1) Signature (Go) + +```go +func ClearTestClientsHolder() +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Resets the global clients holder by clearing its Kubernetes client reference and marking it as not ready. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | `clientsHolder.K8sClient`, `clientsHolder.ready` | +| **Side effects** | Mutates package‑level state: sets the K8s client to `nil` and the readiness flag to `false`. | +| **How it fits the package** | Provides a clean‑up routine used during testing or reinitialisation to ensure subsequent tests start with a pristine client holder. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["ClearTestClientsHolder"] --> B["Set clientsHolder.K8sClient = nil"] + A --> C["Set clientsHolder.ready = false"] +``` + +#### 4) Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 5) Functions calling `ClearTestClientsHolder` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking ClearTestClientsHolder +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder" +) + +func main() { + // Assume clients have been configured earlier... + clientsholder.ClearTestClientsHolder() +} +``` + +--- + +### ClientsHolder.ExecCommandContainer + +**ExecCommandContainer** - Runs a shell command inside the container identified by `ctx` and captures its standard output and error streams. + +#### Signature (Go) + +```go +func (clientsholder *ClientsHolder) ExecCommandContainer(ctx Context, command string) (stdout, stderr string, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs a shell command inside the container identified by `ctx` and captures its standard output and error streams. | +| **Parameters** | `clientsholder *ClientsHolder` – receiver providing Kubernetes client configuration.
`ctx Context` – contains namespace, pod name, and container name.
`command string` – shell command to execute. | +| **Return value** | `stdout string` – captured standard output.
`stderr string` – captured standard error.
`err error` – execution or transport error (nil if successful). | +| **Key dependencies** | *`log.Debug`, `log.Error` for logging.
* `Context.GetNamespace()`, `.GetPodName()`, `.GetContainerName()` to build request.
*`strings.Join` for command string formatting.
* Kubernetes client (`clientsholder.K8sClient.CoreV1().RESTClient()`) and related builders (`Post`, `Namespace`, `Resource`, `Name`, `SubResource`, `VersionedParams`).
* `remotecommand.NewSPDYExecutor` and `exec.StreamWithContext` for running the command. | +| **Side effects** | No global state mutation; performs network I/O to the Kubernetes API server and streams data over SPDY. Logs debug and error information via the internal logger. | +| **How it fits the package** | Provides low‑level container interaction functionality used by higher‑level tests or tooling that needs to inspect pod behavior directly from Go code. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Build command array"] --> B["Log debug info"] + B --> C["Create REST request with pod exec options"] + C --> D["Instantiate SPDY executor"] + D --> E{"Executor creation ok?"} + E -- Yes --> F["Stream execution results"] + F --> G["Assign stdout/stderr buffers"] + E -- No --> H["Log error & return"] + G --> I{"Execution succeeded?"} + I -- Yes --> J["Return outputs, nil error"] + I -- No --> K["Log errors, return error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + ExecCommandContainer --> DebugLog + ExecCommandContainer --> GetNamespace + ExecCommandContainer --> GetPodName + ExecCommandContainer --> GetContainerName + ExecCommandContainer --> JoinStrings + ExecCommandContainer --> RESTClientCall + ExecCommandContainer --> PostRequest + ExecCommandContainer --> NamespaceSpec + ExecCommandContainer --> ResourceSpec + ExecCommandContainer --> NameSpec + ExecCommandContainer --> SubResourceSpec + ExecCommandContainer --> VersionedParamsSpec + ExecCommandContainer --> NewSPDYExecutor + ExecCommandContainer --> StreamWithContext + ExecCommandContainer --> ErrorLog +``` + +#### Functions calling `ClientsHolder.ExecCommandContainer` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ExecCommandContainer +ctx := Context{ + namespace: "default", + podName: "my-pod", + containerName:"app-container", +} +holder := &ClientsHolder{ /* initialized with K8s client and RestConfig */ } + +stdout, stderr, err := holder.ExecCommandContainer(ctx, "echo hello") +if err != nil { + fmt.Printf("command failed: %v\n", err) +} else { + fmt.Printf("stdout: %s\nstderr: %s\n", stdout, stderr) +} +``` + +--- + +### CommandMock.ExecCommandContainer + +**ExecCommandContainer** - Stores invocation details of `ExecCommandContainer` and delegates execution to the supplied mock function. + +#### Signature (Go) + +```go +func (mock *CommandMock) ExecCommandContainer(context Context, s string) (string, string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores invocation details of `ExecCommandContainer` and delegates execution to the supplied mock function. | +| **Parameters** | `context Context – execution context;
`
`s string – input string argument` | +| **Return value** | `(string, string, error) – values returned by the mock implementation` | +| **Key dependencies** | • `panic` – invoked if the mock function is nil
• `Lock`, `Unlock` – thread‑safe recording of calls
• `append` – adds call record to slice
• `ExecCommandContainerFunc` – user‑supplied mock logic | +| **Side effects** | Mutates `mock.calls.ExecCommandContainer` slice; acquires/releases a mutex. | +| **How it fits the package** | Part of the internal client holder mock used in tests to capture and simulate command execution. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check if ExecCommandContainerFunc is nil"] -->|"yes"| B["panic"] + A -->|"no"| C["Create callInfo struct"] + C --> D["Lock mutex"] + D --> E["Append to calls.ExecCommandContainer"] + E --> F["Unlock mutex"] + F --> G["Invoke ExecCommandContainerFunc(context, s)"] + G --> H["Return its result"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CommandMock_ExecCommandContainer --> func_panic + func_CommandMock_ExecCommandContainer --> func_Lock + func_CommandMock_ExecCommandContainer --> func_append + func_CommandMock_ExecCommandContainer --> func_Unlock + func_CommandMock_ExecCommandContainer --> func_ExecCommandContainerFunc +``` + +#### Functions calling `CommandMock.ExecCommandContainer` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking CommandMock.ExecCommandContainer +mock := &clientsholder.CommandMock{ + ExecCommandContainerFunc: func(ctx clientsholder.Context, s string) (string, string, error) { + return "out", "err", nil + }, +} +ctx := clientsholder.NewContext() +out, errOut, err := mock.ExecCommandContainer(ctx, "sample") +fmt.Println(out, errOut, err) +``` + +--- + +### CommandMock.ExecCommandContainerCalls + +**ExecCommandContainerCalls** - Returns a slice containing all recorded calls to `ExecCommandContainer` made on the mock instance. Each entry holds the execution context and the command string supplied during that call. + +#### Signature (Go) + +```go +func (mock *CommandMock) ExecCommandContainerCalls() []struct { + Context Context + S string +} +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a slice containing all recorded calls to `ExecCommandContainer` made on the mock instance. Each entry holds the execution context and the command string supplied during that call. | +| **Parameters** | *None* – operates solely on the receiver’s internal state. | +| **Return value** | A slice of structs, each with a `Context` field (type `Context`) and an `S` field (`string`). The slice is a snapshot; subsequent mock activity does not affect it. | +| **Key dependencies** | • `mock.lockExecCommandContainer.RLock()` – acquires a read lock.
• `mock.calls.ExecCommandContainer` – the stored call records.
• `mock.lockExecCommandContainer.RUnlock()` – releases the read lock. | +| **Side effects** | None beyond acquiring and releasing a read lock; the function does not modify any state. | +| **How it fits the package** | Provides test harness functionality for the `clientsholder` mock, enabling callers to assert that expected commands were executed in tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Acquire read lock"] --> B["Read calls slice"] + B --> C["Release read lock"] + C --> D["Return copy of calls"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CommandMock.ExecCommandContainerCalls --> func_RLock + func_CommandMock.ExecCommandContainerCalls --> func_RUnlock +``` + +#### Functions calling `CommandMock.ExecCommandContainerCalls` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking CommandMock.ExecCommandContainerCalls + +// Assume mock is an initialized *CommandMock instance. +calls := mock.ExecCommandContainerCalls() + +for _, c := range calls { + fmt.Printf("Context: %v, Command: %s\n", c.Context, c.S) +} +``` + +--- + +### Context.GetContainerName + +**GetContainerName** - Returns the name of the container associated with the current execution context. This value is used when executing commands inside a pod via Kubernetes API calls. + +#### 1) Signature (Go) + +```go +func (c *Context) GetContainerName() string +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the name of the container associated with the current execution context. This value is used when executing commands inside a pod via Kubernetes API calls. | +| **Parameters** | None – operates on the receiver `c *Context`. | +| **Return value** | `string` – the container name stored in the context. | +| **Key dependencies** | • Accesses the private field `containerName` of the `Context` struct. | +| **Side effects** | None – pure accessor; no state changes or I/O. | +| **How it fits the package** | Provides a simple getter for other functions (e.g., `ClientsHolder.ExecCommandContainer`) to obtain the container name needed for Kubernetes exec requests. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Context"] --> B{"GetContainerName"} + B --> C["Return c.containerName"] +``` + +#### 4) Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 5) Functions calling `Context.GetContainerName` (Mermaid) + +```mermaid +graph TD + func_ClientsHolder.ExecCommandContainer --> func_Context.GetContainerName +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Context.GetContainerName +ctx := &clientsholder.Context{containerName: "nginx"} +name := ctx.GetContainerName() +fmt.Println("Container name:", name) +``` + +--- + +### Context.GetNamespace + +**GetNamespace** - Returns the Kubernetes namespace associated with the receiver `Context`. This value is used by other client‑side functions to target API calls within a specific namespace. + +#### Signature (Go) + +```go +func (c *Context) GetNamespace() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the Kubernetes namespace associated with the receiver `Context`. This value is used by other client‑side functions to target API calls within a specific namespace. | +| **Parameters** | None – operates on the method’s receiver (`c`). | +| **Return value** | `string` – the stored namespace name. | +| **Key dependencies** | *No external calls; simply accesses the struct field.* | +| **Side effects** | None – purely read‑only operation. | +| **How it fits the package** | The `clientsholder` package encapsulates interactions with Kubernetes resources. `Context.GetNamespace` provides a convenient accessor for the namespace used by commands such as `ExecCommandContainer`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Context object"] --> B{"Get namespace"} + B --> C["Return c.namespace"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `Context.GetNamespace` (Mermaid) + +```mermaid +graph TD + func_ClientsHolder.ExecCommandContainer --> func_Context.GetNamespace +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Context.GetNamespace +ctx := &clientsholder.Context{namespace: "production"} +ns := ctx.GetNamespace() +fmt.Println("Namespace:", ns) // Output: Namespace: production +``` + +--- + +### Context.GetPodName + +**GetPodName** - Returns the pod name associated with the context. + +Retrieves the pod name stored in a `Context`. + +#### Signature (Go) + +```go +func (c *Context) GetPodName() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the pod name associated with the context. | +| **Parameters** | None | +| **Return value** | `string` – the pod name (`c.podName`). | +| **Key dependencies** | None (simple field access). | +| **Side effects** | No state changes, no I/O. | +| **How it fits the package** | Provides a convenient accessor used by functions such as `ClientsHolder.ExecCommandContainer` to construct API requests targeting a specific pod. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> ReturnPodName["return c.podName"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `Context.GetPodName` (Mermaid) + +```mermaid +graph TD + func_ClientsHolder.ExecCommandContainer --> func_Context.GetPodName +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Context.GetPodName +ctx := &clientsholder.Context{ /* fields initialized elsewhere */ } +podName := ctx.GetPodName() +fmt.Println("Target pod:", podName) +``` + +--- + +### GetClientConfigFromRestConfig + +**GetClientConfigFromRestConfig** - Builds a `clientcmdapi.Config` object that represents a kubeconfig file, derived from an existing `*rest.Config`. This allows code that expects a kubeconfig to use the in‑cluster configuration. + +#### Signature (Go) + +```go +func GetClientConfigFromRestConfig(restConfig *rest.Config) *clientcmdapi.Config +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `clientcmdapi.Config` object that represents a kubeconfig file, derived from an existing `*rest.Config`. This allows code that expects a kubeconfig to use the in‑cluster configuration. | +| **Parameters** | `restConfig *rest.Config –` REST client configuration obtained from the cluster (e.g., via `rest.InClusterConfig()`). | +| **Return value** | `*clientcmdapi.Config –` A fully populated kubeconfig structure with one cluster, context and user entry. | +| **Key dependencies** | • `k8s.io/client-go/rest`
• `k8s.io/client-go/tools/clientcmd/api` | +| **Side effects** | No external I/O or mutation of the passed config; purely constructs a new struct. | +| **How it fits the package** | Provides a bridge between in‑cluster configuration and components that require kubeconfig bytes (e.g., preflight operator checks). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Create Config object"] + B --> C{"Populate fields"} + C --> D["Set Kind & APIVersion"] + C --> E["Add default cluster with Server & CAFile"] + C --> F["Add default context pointing to cluster & user"] + C --> G["Set CurrentContext"] + C --> H["Add AuthInfo with BearerToken"] + H --> I["Return *clientcmdapi.Config"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + note1["No internal calls"] +``` + +#### Functions calling `GetClientConfigFromRestConfig` (Mermaid) + +```mermaid +graph TD + func_getClusterRestConfig --> func_GetClientConfigFromRestConfig +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetClientConfigFromRestConfig +import ( + "k8s.io/client-go/rest" + clientsholder "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder" +) + +func main() { + // Assume we already have a rest.Config, e.g., from InClusterConfig() + restCfg, err := rest.InClusterConfig() + if err != nil { + panic(err) + } + + kubeCfg := clientsholder.GetClientConfigFromRestConfig(restCfg) + fmt.Printf("Generated kubeconfig: %+v\n", kubeCfg) +} +``` + +--- + +### GetClientsHolder + +**GetClientsHolder** - Returns a global, lazily‑initialized `*ClientsHolder` that aggregates Kubernetes API clients. If the holder is not yet ready, it is created via `newClientsHolder`. + +#### 1) Signature (Go) + +```go +func GetClientsHolder(filenames ...string) *ClientsHolder +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a global, lazily‑initialized `*ClientsHolder` that aggregates Kubernetes API clients. If the holder is not yet ready, it is created via `newClientsHolder`. | +| **Parameters** | `filenames ...string` – Optional list of kubeconfig file paths used when initializing the holder. | +| **Return value** | A pointer to a fully initialized `ClientsHolder` instance; panics (via `log.Fatal`) if initialization fails. | +| **Key dependencies** | • `newClientsHolder(filenames...)` – builds the holder.
• `log.Fatal(msg, args…)` – terminates on error. | +| **Side effects** | *May instantiate numerous Kubernetes clientsets and a discovery client.
* On failure, logs a fatal message and exits the process. | +| **How it fits the package** | Provides global access to shared clients across the Certsuite codebase, avoiding repeated construction of expensive client objects. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["GetClientsHolder"] --> B{"clientsHolder.ready"} + B -- true --> C["Return &clientsHolder"] + B -- false --> D["newClientsHolder(filenames...)"] + D --> E{"err"} + E -- non‑nil --> F["log.Fatal(Failed to create k8s clients holder, err: %v, err)"] + E -- nil --> G["return clientsHolder"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetClientsHolder --> func_newClientsHolder + func_GetClientsHolder --> func_log.Fatal +``` + +#### 5) Functions calling `GetClientsHolder` (Mermaid) + +```mermaid +graph TD + func_crclient.ExecCommandContainerNSEnter --> func_GetClientsHolder + func_crclient.GetContainerPidNamespace --> func_GetClientsHolder + func_crclient.GetPidsFromPidNamespace --> func_GetClientsHolder + func_autodiscover.DoAutoDiscover --> func_GetClientsHolder + func_provider.Node.IsHyperThreadNode --> func_GetClientsHolder + func_provider.Operator.SetPreflightResults --> func_GetClientsHolder + func_provider.Pod.CreatedByDeploymentConfig --> func_GetClientsHolder + func_provider.Pod.IsUsingSRIOV --> func_GetClientsHolder + func_provider.Pod.IsUsingSRIOVWithMTU --> func_GetClientsHolder + func_crclient.GetNodeProbePodContext --> func_GetClientsHolder + func_getOperatorTargetNamespaces --> func_GetClientsHolder +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking GetClientsHolder +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder" +) + +func main() { + // Obtain the singleton holder; passes kubeconfig filenames if needed. + clients := clientsholder.GetClientsHolder("~/.kube/config") + // Use the holder, e.g., list pods in default namespace + podList, err := clients.K8sClient.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + panic(err) + } + fmt.Printf("Found %d pods\n", len(podList.Items)) +} +``` + +--- + +### GetNewClientsHolder + +**GetNewClientsHolder** - Instantiates and returns a `*ClientsHolder` populated with Kubernetes clients based on the supplied kubeconfig file. If creation fails, it terminates the process via logging. + +#### Signature (Go) + +```go +func GetNewClientsHolder(kubeconfigFile string) *ClientsHolder +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates and returns a `*ClientsHolder` populated with Kubernetes clients based on the supplied kubeconfig file. If creation fails, it terminates the process via logging. | +| **Parameters** | `kubeconfigFile string` – Path to a kubeconfig file used for cluster configuration. | +| **Return value** | `*ClientsHolder` – Reference to the initialized client holder. | +| **Key dependencies** | • Calls `newClientsHolder(kubeconfigFile)`
• Invokes `log.Fatal()` on error | +| **Side effects** | *Fatal logs a message and exits the program if initialization fails.
* No external I/O beyond logging. | +| **How it fits the package** | Provides a public API for other packages to obtain a ready‑to‑use collection of Kubernetes client interfaces, hiding the complex setup performed by `newClientsHolder`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["GetNewClientsHolder"] --> B["newClientsHolder(kubeconfigFile)"] + B --> C{"error?"} + C -- yes --> D["log.Fatal(Failed to create k8s clients holder, err: %v, err)"] + C -- no --> E["return &clientsHolder"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNewClientsHolder --> func_newClientsHolder + func_GetNewClientsHolder --> func_log.Fatal +``` + +#### Functions calling `GetNewClientsHolder` (Mermaid) + +```mermaid +graph TD + func_runHandler --> func_GetNewClientsHolder +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetNewClientsHolder +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder" +) + +func main() { + // Path to kubeconfig file obtained elsewhere (e.g., from env or user input) + kubeConfigPath := "/path/to/kubeconfig.yaml" + + // Obtain the client holder; program exits if initialization fails + clients := clientsholder.GetNewClientsHolder(kubeConfigPath) + + // Use clients.K8sClient, clients.DynamicClient, etc. as needed... + _ = clients +} +``` + +--- + +### GetTestClientsHolder + +**GetTestClientsHolder** - Builds a `ClientsHolder` populated with fake Kubernetes clients that expose only the supplied runtime objects, facilitating isolated unit testing. + +#### Signature (Go) + +```go +func GetTestClientsHolder(k8sMockObjects []runtime.Object) *ClientsHolder +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `ClientsHolder` populated with fake Kubernetes clients that expose only the supplied runtime objects, facilitating isolated unit testing. | +| **Parameters** | `k8sMockObjects []runtime.Object –` a slice of pure Kubernetes API objects to be served by the mock clients. | +| **Return value** | `*ClientsHolder –` a pointer to a fully initialized holder whose internal flag is set to ready. | +| **Key dependencies** | • `k8s.io/client-go/kubernetes/fake.NewSimpleClientset`
• `k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake.NewSimpleClientset`
• `github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/fake.NewSimpleClientset` | +| **Side effects** | None external; only internal state of the returned holder is mutated. | +| **How it fits the package** | Serves as the test‑time counterpart to production client initialisation, allowing tests to inject deterministic objects without hitting a real API server. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start["Receive mock objects"] + Split1{"Group by type"} + BuildK8sObjects["Collect core & RBAC objects"] + BuildExtObjects["Collect CRD objects"] + BuildPlumbing["Collect NetworkAttachmentDefinition objects"] + CreateK8sClient["NewSimpleClientset(k8sClientObjects…)"] + CreateExtClient["NewSimpleClientset(k8sExtClientObjects…)"] + CreatePlumbingClient["NewSimpleClientset(k8sPlumbingObjects…)"] + SetReady["clientsHolder.ready = true"] + Return["Return &clientsHolder"] + + Start --> Split1 + Split1 --> BuildK8sObjects + Split1 --> BuildExtObjects + Split1 --> BuildPlumbing + BuildK8sObjects --> CreateK8sClient + BuildExtObjects --> CreateExtClient + BuildPlumbing --> CreatePlumbingClient + CreateK8sClient --> SetReady + CreateExtClient --> SetReady + CreatePlumbingClient --> SetReady + SetReady --> Return +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + GetTestClientsHolder --> NewSimpleClientset_k8s + GetTestClientsHolder --> NewSimpleClientset_ext + GetTestClientsHolder --> NewSimpleClientset_plumbing + + classDef func fill:#f9f,stroke:#333,stroke-width:2px; + class NewSimpleClientset_k8s,NewSimpleClientset_ext,NewSimpleClientset_plumbing func; +``` + +#### Functions calling `GetTestClientsHolder` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetTestClientsHolder +package main + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + + cs "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder" +) + +func main() { + objects := []runtime.Object{ + &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}}, + } + holder := cs.GetTestClientsHolder(objects) + // holder.K8sClient can now be used in tests +} +``` + +--- + +### NewContext + +**NewContext** - Builds and returns a `Context` value that encapsulates the namespace, pod name, and container name needed for subsequent command executions against a probe pod. + +#### Signature (Go) + +```go +func NewContext(namespace, podName, containerName string) Context +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds and returns a `Context` value that encapsulates the namespace, pod name, and container name needed for subsequent command executions against a probe pod. | +| **Parameters** | `namespace` string – Kubernetes namespace of the pod.
`podName` string – Name of the target pod.
`containerName` string – Container inside the pod to target. | +| **Return value** | A `Context` struct containing the provided fields; used by command‑execution helpers. | +| **Key dependencies** | None – this function only constructs a struct. | +| **Side effects** | No state changes, no I/O. Pure constructor. | +| **How it fits the package** | Provides the minimal data structure that higher‑level client holders use to reference probe pods when running commands via `ExecCommandContainer`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive namespace"] --> B{"Construct Context"} + B --> C["Set .namespace"] + B --> D["Set .podName"] + B --> E["Set .containerName"] + E --> F["Return Context struct"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `NewContext` (Mermaid) + +```mermaid +graph TD + clientsholder_GetClientsHolder --> func_NewContext + provider_Node_IsHyperThreadNode --> func_NewContext + pkg_diagnostics_GetCniPlugins --> func_NewContext + pkg_provider_filterDPDKRunningPods --> func_NewContext + pkg_tests_platform_testIsSELinuxEnforcing --> func_NewContext + tests_accesscontrol_testOneProcessPerContainer --> func_NewContext +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewContext +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder" +) + +func main() { + ctx := clientsholder.NewContext("default", "probe-pod-1234", "probe-container") + // ctx can now be passed to ExecCommandContainer or similar helpers +} +``` + +--- + +--- + +### SetTestClientGroupResources + +**SetTestClientGroupResources** - Stores a slice of API resource lists into the package’s client holder, enabling test clients to reference available Kubernetes group resources. + +Sets the group resources for the test client holder. + +```go +func SetTestClientGroupResources(groupResources []*metav1.APIResourceList) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores a slice of API resource lists into the package’s client holder, enabling test clients to reference available Kubernetes group resources. | +| **Parameters** | `groupResources []*metav1.APIResourceList` – a list of resource groups and their contained API resources. | +| **Return value** | None (void). | +| **Key dependencies** | - `k8s.io/apimachinery/pkg/apis/meta/v1` for the `APIResourceList` type.
- Assignment to the package‑level variable `clientsHolder`. | +| **Side effects** | Mutates the exported field `GroupResources` of the `clientsHolder` singleton; no I/O or external communication. | +| **How it fits the package** | Provides a simple setter used during test initialization to inject mock group resource data into the client holder for subsequent API interactions. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Receive slice of APIResourceList"] --> B["Assign to clientsHolder.GroupResources"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `SetTestClientGroupResources` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking SetTestClientGroupResources +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func main() { + // Example resource lists (normally obtained from a discovery client). + resources := []*metav1.APIResourceList{ + &metav1.APIResourceList{GroupVersion: "v1", APIResources: []metav1.APIResource{}}, + } + + SetTestClientGroupResources(resources) +} +``` + +--- + +### SetTestK8sClientsHolder + +**SetTestK8sClientsHolder** - Stores the supplied Kubernetes client in the package‑wide `clientsHolder` and marks it as ready, enabling test code to use a fake or mock client. + +#### Signature (Go) + +```go +func SetTestK8sClientsHolder(kubernetes.Interface) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores the supplied Kubernetes client in the package‑wide `clientsHolder` and marks it as ready, enabling test code to use a fake or mock client. | +| **Parameters** | `k8sClient kubernetes.Interface –` The Kubernetes client implementation to be used during tests. | +| **Return value** | None (the function has no return values). | +| **Key dependencies** | - Assigns to the global variable `clientsHolder.K8sClient`.
- Sets `clientsHolder.ready = true`. | +| **Side effects** | Mutates package‑level state (`clientsHolder`). No external I/O or concurrency is involved. | +| **How it fits the package** | Provides a simple test hook for replacing the real Kubernetes client with a mock, allowing unit tests to run without needing an actual cluster. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive k8sClient"] --> B["clientsHolder.K8sClient = k8sClient"] + B --> C["clientsHolder.ready = true"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `SetTestK8sClientsHolder` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking SetTestK8sClientsHolder +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" +) + +func main() { + // Create a fake Kubernetes client for testing + fakeClient := fakeclientset.NewSimpleClientset() + + // Inject the fake client into the holder used by the package + clientsholder.SetTestK8sClientsHolder(fakeClient) + + // The rest of the test code can now use the injected client via the holder. + _ = metav1.GetOptions{} // placeholder to avoid unused import warning +} +``` + +--- + +### SetTestK8sDynamicClientsHolder + +**SetTestK8sDynamicClientsHolder** - Sets the internal `DynamicClient` of the global `clientsHolder` to a supplied test client and marks it as ready. + +#### Signature (Go) + +```go +func SetTestK8sDynamicClientsHolder(dynamicClient dynamic.Interface) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Sets the internal `DynamicClient` of the global `clientsHolder` to a supplied test client and marks it as ready. | +| **Parameters** | *dynamicClient* `dynamic.Interface` – The mock or test dynamic client to be used by the holder. | +| **Return value** | None | +| **Key dependencies** | - `clientsHolder.DynamicClient = dynamicClient`
- `clientsHolder.ready = true` | +| **Side effects** | Mutates global state: updates `DynamicClient` and flips readiness flag; no I/O or concurrency involved. | +| **How it fits the package** | Provides a simple API for tests to inject a fake Kubernetes dynamic client into the holder, enabling isolation of test logic from real cluster interactions. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["SetTestK8sDynamicClientsHolder"] --> B{"Assign provided client"} + B --> C["clientsHolder.DynamicClient = dynamicClient"] + B --> D["clientsHolder.ready = true"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `SetTestK8sDynamicClientsHolder` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking SetTestK8sDynamicClientsHolder +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder" + "k8s.io/client-go/dynamic/fake" +) + +func main() { + fakeClient := fake.NewSimpleDynamicClient(nil) + clientsholder.SetTestK8sDynamicClientsHolder(fakeClient) +} +``` + +--- + +### SetupFakeOlmClient + +**SetupFakeOlmClient** - Replaces the real Operator‑Lifecycle‑Manager client in `clientsHolder` with a fake client that serves the supplied mock objects, enabling unit tests to exercise OLM interactions without a live cluster. + +#### Signature (Go) + +```go +func SetupFakeOlmClient(olmMockObjects []runtime.Object) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Replaces the real Operator‑Lifecycle‑Manager client in `clientsHolder` with a fake client that serves the supplied mock objects, enabling unit tests to exercise OLM interactions without a live cluster. | +| **Parameters** | `olmMockObjects []runtime.Object –` A slice of Kubernetes runtime objects that represent the mocked state expected by OLM APIs. | +| **Return value** | None. The function mutates global state (`clientsHolder.OlmClient`). | +| **Key dependencies** | • `github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/fake.NewSimpleClientset` – creates a fake client pre‑loaded with the mock objects.
• `clientsHolder.OlmClient` – global holder that stores the current OLM client instance. | +| **Side effects** | Mutates the package‑level variable `clientsHolder.OlmClient`. No external I/O or concurrency is performed. | +| **How it fits the package** | Provides a convenient test helper for the `clientsholder` package, allowing other components to obtain an OLM client that behaves deterministically during unit tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + SetupFakeOlmClient --> CreateFakeClient["Create fake client with mock objects"] + CreateFakeClient --> SetGlobal["Assign to clientsHolder.OlmClient"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + SetupFakeOlmClient --> NewSimpleClientset["github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/fake.NewSimpleClientset"] +``` + +#### Functions calling `SetupFakeOlmClient` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking SetupFakeOlmClient +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// Create some mock objects that represent OLM resources +mockObjs := []runtime.Object{ + // ... populate with fake CRDs, ClusterServiceVersions, etc. +} + +// Override the real OLM client with a fake one for testing +SetupFakeOlmClient(mockObjs) +``` + +--- + +## Local Functions + +### createByteArrayKubeConfig + +**createByteArrayKubeConfig** - Serialises a `*clientcmdapi.Config` into YAML‑encoded bytes for use as an in‑memory kubeconfig. + +#### 1) Signature (Go) + +```go +func createByteArrayKubeConfig(kubeConfig *clientcmdapi.Config) ([]byte, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Serialises a `*clientcmdapi.Config` into YAML‑encoded bytes for use as an in‑memory kubeconfig. | +| **Parameters** | `kubeConfig *clientcmdapi.Config` – the configuration to serialise. | +| **Return value** | `([]byte, error)` – the YAML representation on success; a non‑nil error otherwise. | +| **Key dependencies** | • `clientcmd.Write(*kubeConfig)` (from `k8s.io/client-go/tools/clientcmd`)
• `fmt.Errorf` for wrapping errors | +| **Side effects** | No external I/O or state mutation; purely functional. | +| **How it fits the package** | Used by `getClusterRestConfig` to produce a byte array of kubeconfig data when running inside or outside a cluster, enabling downstream components that expect raw kubeconfig bytes. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Call clientcmd.Write"} + B -->|"Success"| C["Return yamlBytes"] + B -->|"Error"| D["Wrap and return error"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_createByteArrayKubeConfig --> func_Write_k8s.io_client-go_tools_clientcmd + func_createByteArrayKubeConfig --> func_Errorf_fmt +``` + +#### 5) Functions calling `createByteArrayKubeConfig` (Mermaid) + +```mermaid +graph TD + func_getClusterRestConfig --> func_createByteArrayKubeConfig +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking createByteArrayKubeConfig +import ( + "k8s.io/client-go/tools/clientcmd/api" + "fmt" +) + +func main() { + // Assume we have a kubeconfig object from elsewhere + cfg := &api.Config{ + CurrentContext: "my-context", + Clusters: map[string]*api.Cluster{ + "my-cluster": {Server: "https://example.com"}, + }, + AuthInfos: map[string]*api.AuthInfo{ + "user1": {Token: "sometoken"}, + }, + } + + yamlBytes, err := createByteArrayKubeConfig(cfg) + if err != nil { + fmt.Printf("Error creating kubeconfig bytes: %v\n", err) + return + } + fmt.Printf("YAML:\n%s\n", string(yamlBytes)) +} +``` + +--- + +--- + +### getClusterRestConfig + +**getClusterRestConfig** - Determines the appropriate `*rest.Config` for connecting to a Kubernetes cluster. If running inside a pod it uses the in‑cluster service account; otherwise it merges one or more kubeconfig files supplied via `filenames`. The function also generates an in‑memory byte slice of the resulting kubeconfig for downstream use. + +#### Signature (Go) + +```go +func getClusterRestConfig(filenames ...string) (*rest.Config, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines the appropriate `*rest.Config` for connecting to a Kubernetes cluster. If running inside a pod it uses the in‑cluster service account; otherwise it merges one or more kubeconfig files supplied via `filenames`. The function also generates an in‑memory byte slice of the resulting kubeconfig for downstream use. | +| **Parameters** | `filenames ...string` – Paths to one or more kubeconfig files (used only when not running inside a cluster). | +| **Return value** | `*rest.Config, error` – The Kubernetes REST configuration and an error if the configuration cannot be constructed. | +| **Key dependencies** | • `k8s.io/client-go/rest.InClusterConfig`
• `github.com/redhat-best-practices-for-k8s/certsuite/internal/log.Logger.Info`
• `GetClientConfigFromRestConfig`
• `createByteArrayKubeConfig`
• `k8s.io/client-go/tools/clientcmd.NewDefaultClientConfigLoadingRules`
• `k8s.io/client-go/tools/clientcmd.NewNonInteractiveDeferredLoadingClientConfig`
• `clientcmd.RawConfig`
• `clientcmd.ClientConfig` | +| **Side effects** | *Logs informational messages.
* Populates the global `clientsHolder.KubeConfig` with a byte slice representing the kubeconfig. | +| **How it fits the package** | This helper is used by `newClientsHolder` to obtain the REST configuration that underpins all Kubernetes client instances created in the package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Call getClusterRestConfig"] --> B{"rest.InClusterConfig() succeeds?"} + B -- yes --> C["Use in‑cluster config"] + C --> D["GetClientConfigFromRestConfig"] + D --> E["createByteArrayKubeConfig"] + E --> F["Return rest.Config"] + B -- no --> G["Log “Running outside a cluster”"] + G --> H{"filenames empty?"} + H -- yes --> I["return error: “no kubeconfig files set”"] + H -- no --> J["Create loadingRules with precedence"] + J --> K["NewNonInteractiveDeferredLoadingClientConfig"] + K --> L["kubeconfig.RawConfig()"] + L --> M["createByteArrayKubeConfig"] + M --> N["kubeconfig.ClientConfig()"] + N --> F +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + getClusterRestConfig --> rest.InClusterConfig + getClusterRestConfig --> log.Info + getClusterRestConfig --> GetClientConfigFromRestConfig + getClusterRestConfig --> createByteArrayKubeConfig + getClusterRestConfig --> clientcmd.NewDefaultClientConfigLoadingRules + getClusterRestConfig --> clientcmd.NewNonInteractiveDeferredLoadingClientConfig + getClusterRestConfig --> clientcmd.RawConfig + getClusterRestConfig --> clientcmd.ClientConfig + getClusterRestConfig --> fmt.Errorf + getClusterRestConfig --> errors.New +``` + +#### Functions calling `getClusterRestConfig` (Mermaid) + +```mermaid +graph TD + newClientsHolder --> getClusterRestConfig +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getClusterRestConfig +config, err := getClusterRestConfig("/path/to/kubeconfig.yaml") +if err != nil { + log.Fatalf("unable to obtain rest.Config: %v", err) +} +// config can now be used to create Kubernetes clients. +``` + +--- + +### newClientsHolder + +**newClientsHolder** - Builds a `ClientsHolder` struct that contains all required Kubernetes and OpenShift clientsets, discovery data, scaling utilities, and networking clients. It determines whether the code is running inside or outside a cluster by obtaining an appropriate `rest.Config`. + +#### 1) Signature (Go) + +```go +func newClientsHolder(filenames ...string) (*ClientsHolder, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `ClientsHolder` struct that contains all required Kubernetes and OpenShift clientsets, discovery data, scaling utilities, and networking clients. It determines whether the code is running inside or outside a cluster by obtaining an appropriate `rest.Config`. | +| **Parameters** | `filenames ...string` – Optional kubeconfig file paths used when not running in‑cluster. | +| **Return value** | `(*ClientsHolder, error)` – The fully initialized holder or an error if any client could not be created. | +| **Key dependencies** | • `log.Info` (internal logging)
• `getClusterRestConfig`
• `dynamic.NewForConfig`, `apiextv1.NewForConfig`, `olmClient.NewForConfig`, `olmpkgclient.NewForConfig`, `kubernetes.NewForConfig`, `clientconfigv1.NewForConfig`, `ocpMachine.NewForConfig`, `networkingv1.NewForConfig`
• Discovery: `discovery.NewDiscoveryClientForConfig`, `ServerPreferredResources`
• Scaling: `scale.NewDiscoveryScaleKindResolver`, `restmapper.GetAPIGroupResources`, `restmapper.NewDiscoveryRESTMapper`, `scale.NewForConfig`
• CNCF networking: `cncfNetworkAttachmentv1.NewForConfig`
• API server: `apiserverscheme.NewForConfig` | +| **Side effects** | *Mutates the global `clientsHolder` variable (sets fields and marks it ready).*
*Logs informational messages.* | +| **How it fits the package** | It is the core constructor called by public getters (`GetClientsHolder`, `GetNewClientsHolder`) to lazily initialize all client interfaces needed throughout the suite. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Log “Creating k8s go-clients holder.”"] + B --> C["getClusterRestConfig(filenames...)"] + C --> D["Set RestConfig.Timeout = DefaultTimeout"] + D --> E["dynamic.NewForConfig"] + E --> F["apiextv1.NewForConfig"] + F --> G["olmClient.NewForConfig"] + G --> H["olmpkgclient.NewForConfig"] + H --> I["kubernetes.NewForConfig"] + I --> J["clientconfigv1.NewForConfig"] + J --> K["ocpMachine.NewForConfig"] + K --> L["networkingv1.NewForConfig"] + L --> M["discovery.NewDiscoveryClientForConfig"] + M --> N["ServerPreferredResources()"] + N --> O["scale.NewDiscoveryScaleKindResolver"] + O --> P["restmapper.GetAPIGroupResources"] + P --> Q["restmapper.NewDiscoveryRESTMapper"] + Q --> R["scale.NewForConfig"] + R --> S["cncfNetworkAttachmentv1.NewForConfig"] + S --> T["apiserverscheme.NewForConfig"] + T --> U["Set ready = true"] + U --> V["Return &clientsHolder, nil"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_newClientsHolder --> log.Info + func_newClientsHolder --> getClusterRestConfig + func_newClientsHolder --> dynamic.NewForConfig + func_newClientsHolder --> apiextv1.NewForConfig + func_newClientsHolder --> olmClient.NewForConfig + func_newClientsHolder --> olmpkgclient.NewForConfig + func_newClientsHolder --> kubernetes.NewForConfig + func_newClientsHolder --> clientconfigv1.NewForConfig + func_newClientsHolder --> ocpMachine.NewForConfig + func_newClientsHolder --> networkingv1.NewForConfig + func_newClientsHolder --> discovery.NewDiscoveryClientForConfig + func_newClientsHolder --> ServerPreferredResources + func_newClientsHolder --> scale.NewDiscoveryScaleKindResolver + func_newClientsHolder --> restmapper.GetAPIGroupResources + func_newClientsHolder --> restmapper.NewDiscoveryRESTMapper + func_newClientsHolder --> scale.NewForConfig + func_newClientsHolder --> cncfNetworkAttachmentv1.NewForConfig + func_newClientsHolder --> apiserverscheme.NewForConfig +``` + +#### 5) Functions calling `newClientsHolder` (Mermaid) + +```mermaid +graph TD + GetClientsHolder --> newClientsHolder + GetNewClientsHolder --> newClientsHolder +``` + +#### 6) Usage example (Go) + +```go +// In the same package, invoking the constructor directly. +package clientsholder + +func demo() { + holder, err := newClientsHolder("/path/to/kubeconfig") + if err != nil { + log.Fatalf("cannot create clients: %v", err) + } + // Use holder.K8sClient or any other field as needed +} +``` + +--- + +--- diff --git a/docs/internal/crclient/crclient.md b/docs/internal/crclient/crclient.md new file mode 100644 index 000000000..d7a974a1d --- /dev/null +++ b/docs/internal/crclient/crclient.md @@ -0,0 +1,624 @@ +# Package crclient + +**Path**: `internal/crclient` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [Process](#process) +- [Exported Functions](#exported-functions) + - [ExecCommandContainerNSEnter](#execcommandcontainernsenter) + - [GetContainerPidNamespace](#getcontainerpidnamespace) + - [GetContainerProcesses](#getcontainerprocesses) + - [GetNodeProbePodContext](#getnodeprobepodcontext) + - [GetPidFromContainer](#getpidfromcontainer) + - [GetPidsFromPidNamespace](#getpidsfrompidnamespace) + - [Process.String](#process.string) + +## Overview + +The crclient package provides utilities for inspecting and manipulating container runtimes within a Kubernetes cluster. It enables querying process IDs, PID namespaces, executing commands inside containers or probe pods, and gathering runtime information needed by tests. + +### Key Features + +- Executes shell commands in a container’s network namespace via nsenter using the container’s PID +- Retrieves a container’s PID and PID‑namespace identifier for CRI/Docker runtimes +- Lists all processes running inside a specific container by inspecting its PID namespace + +### Design Notes + +- Assumes probe pod runs on the same node as the target, using a clientsholder context to access it +- Handles only Docker and CRI‑based runtimes; unknown runtimes result in an error +- Best practice: use GetNodeProbePodContext first to obtain a reliable context before executing commands + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**Process**](#process) | One-line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func ExecCommandContainerNSEnter(command string, aContainer *provider.Container) (outStr, errStr string, err error)](#execcommandcontainernsenter) | Runs an arbitrary shell command inside the network namespace of a specified container by first locating its PID and then invoking `nsenter`. The result is returned as stdout, stderr, and any execution error. | +| [func GetContainerPidNamespace(testContainer *provider.Container, env *provider.TestEnvironment) (string, error)](#getcontainerpidnamespace) | Determines the PID namespace identifier of a running container by querying its process ID and inspecting the namespace information. | +| [func GetContainerProcesses(container *provider.Container, env *provider.TestEnvironment) ([]*Process, error)](#getcontainerprocesses) | Returns the list of processes (pids and metadata) that are running within a specific container by querying its PID namespace. | +| [func GetNodeProbePodContext(node string, env *provider.TestEnvironment) (clientsholder.Context, error)](#getnodeprobepodcontext) | Builds a `clientsholder.Context` that points to the first container of the probe pod running on the specified node. This context is used for executing commands inside the probe pod’s namespace. | +| [func GetPidFromContainer(cut *provider.Container, ctx clientsholder.Context) (int, error)](#getpidfromcontainer) | Executes an appropriate command to obtain the PID that a given container runs under, supporting Docker and CRI‑based runtimes. | +| [func GetPidsFromPidNamespace(pidNamespace string, container *provider.Container) ([]*Process, error)](#getpidsfrompidnamespace) | Executes `ps` inside a probe pod to list all processes whose PID namespace matches `pidNamespace`, then returns them as `*Process` structs. | +| [func (p *Process) String() string](#process.string) | Creates a concise human‑readable description containing command arguments, process ID, parent PID, and PID namespace. | + +## Structs + +### Process + +Represents a single operating‑system process observed inside a Kubernetes container. + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `PidNs` | `int` | The PID namespace identifier of the process; used to group processes belonging to the same container. | +| `Pid` | `int` | The numeric process ID assigned by the host kernel. | +| `PPid` | `int` | The parent process ID, indicating the direct ancestor within the same namespace. | +| `Args` | `string` | The command line used to start the process; includes executable name and arguments. | + +#### Purpose + +The `Process` struct is a lightweight data holder for information extracted from the `ps` command executed in a probe pod. It enables higher‑level functions (e.g., `GetContainerProcesses`) to return a slice of processes that belong to a specific container, facilitating diagnostics or analysis of container runtime behavior. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetContainerProcesses(container *provider.Container, env *provider.TestEnvironment) ([]*Process, error)` | Retrieves all `Process` instances for the given container by first obtaining its PID namespace and then invoking `GetPidsFromPidNamespace`. | +| `GetPidsFromPidNamespace(pidNamespace string, container *provider.Container) (p []*Process, err error)` | Executes a shell command to list processes in the specified PID namespace, parses the output, and populates a slice of `Process` structs. | +| `(*Process).String() string` | Returns a human‑readable representation of a process: `"cmd: , pid: , ppid: , pidNs: "`. | + +--- + +--- + +## Exported Functions + +### ExecCommandContainerNSEnter + +**ExecCommandContainerNSEnter** - Runs an arbitrary shell command inside the network namespace of a specified container by first locating its PID and then invoking `nsenter`. The result is returned as stdout, stderr, and any execution error. + +#### Signature (Go) + +```go +func ExecCommandContainerNSEnter(command string, aContainer *provider.Container) (outStr, errStr string, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs an arbitrary shell command inside the network namespace of a specified container by first locating its PID and then invoking `nsenter`. The result is returned as stdout, stderr, and any execution error. | +| **Parameters** | `command string` – the command to run within the container’s namespace.
`aContainer *provider.Container` – the target container (contains node name, UID, runtime, etc.). | +| **Return value** | ` string` – captured stdout.
` string` – captured stderr.
` error` – any failure during PID lookup or command execution. | +| **Key dependencies** | *`provider.GetTestEnvironment()` – loads test environment data.
* `GetNodeProbePodContext(node, env)` – obtains probe‑pod context for the node.
*`clientsholder.GetClientsHolder()` – provides Kubernetes client helpers.
* `GetPidFromContainer(container, ctx)` – determines container PID based on runtime.
* `ch.ExecCommandContainer(ctx, cmd)` – executes a command inside the pod. | +| **Side effects** | Executes external commands (`nsenter`, container‑runtime introspection). May delay due to retry logic (up to `RetryAttempts` with `RetrySleepSeconds`). No global state mutation. | +| **How it fits the package** | The function is a core utility for tests that need to inspect or manipulate container internals (e.g., networking, processes). It abstracts the complexity of namespace entry and PID resolution, providing a simple API for higher‑level test functions. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Get test env"} + B --> C["Probe pod context"] + C --> D{"Error?"} + D -- Yes --> E["Return error"] + D -- No --> F["Clients holder"] + F --> G{"Get container PID"} + G --> H{"Error?"} + H -- Yes --> I["Return error"] + H -- No --> J["Build nsenter command"] + J --> K["Retry loop"] + K --> L["ExecCommandContainer"] + L --> M{"Success?"} + M -- Yes --> N["Return output"] + M -- No --> O["Retry if attempts left"] + O --> K +``` + +#### Function dependencies + +```mermaid +graph TD + ExecCommandContainerNSEnter --> GetTestEnvironment + ExecCommandContainerNSEnter --> GetNodeProbePodContext + ExecCommandContainerNSEnter --> clientsholder.GetClientsHolder + ExecCommandContainerNSEnter --> GetPidFromContainer + ExecCommandContainerNSEnter --> ch.ExecCommandContainer +``` + +#### Functions calling `ExecCommandContainerNSEnter` + +```mermaid +graph TD + crclient_GetListeningPorts --> ExecCommandContainerNSEnter + crclient_GetSSHDaemonPort --> ExecCommandContainerNSEnter +``` + +#### Usage example (Go) + +```go +// Minimal example invoking ExecCommandContainerNSEnter +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume we have a container object from the test environment + var container *provider.Container + + stdout, stderr, err := crclient.ExecCommandContainerNSEnter("ip addr show", container) + if err != nil { + fmt.Printf("command failed: %v\n", err) + return + } + fmt.Println("stdout:", stdout) + fmt.Println("stderr:", stderr) +} +``` + +--- + +### GetContainerPidNamespace + +**GetContainerPidNamespace** - Determines the PID namespace identifier of a running container by querying its process ID and inspecting the namespace information. + +#### Signature (Go) + +```go +func GetContainerPidNamespace(testContainer *provider.Container, env *provider.TestEnvironment) (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines the PID namespace identifier of a running container by querying its process ID and inspecting the namespace information. | +| **Parameters** | `testContainer *provider.Container` – container metadata; `env *provider.TestEnvironment` – test environment context. | +| **Return value** | `string` – first field of `lsns` output (namespace ID); `error` if any step fails. | +| **Key dependencies** | • `GetNodeProbePodContext` to obtain the pod context for the node.
• `GetPidFromContainer` to fetch the container’s PID.
• `clientsholder.GetClientsHolder().ExecCommandContainer` to run `lsns`. | +| **Side effects** | None. The function performs read‑only queries and logs debug information via `log.Debug`. | +| **How it fits the package** | Core helper for other functions that need to identify processes inside a container’s namespace (e.g., process enumeration, scheduling checks). | + +#### Internal workflow + +```mermaid +flowchart TD + A["GetNodeProbePodContext"] --> B{"Success?"} + B -- Yes --> C["GetPidFromContainer"] + B -- No --> D["Return error"] + C --> E{"PID obtained?"} + E -- Yes --> F["lsns command via ExecCommandContainer"] + E -- No --> G["Return error"] + F --> H{"Execution success?"} + H -- Yes --> I["Parse first field of stdout"] + H -- No --> J["Return error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetContainerPidNamespace --> func_GetNodeProbePodContext + func_GetContainerPidNamespace --> func_GetPidFromContainer + func_GetContainerPidNamespace --> clientsholder.GetClientsHolder + func_GetContainerPidNamespace --> fmt.Errorf + func_GetContainerPidNamespace --> log.Debug +``` + +#### Functions calling `GetContainerPidNamespace` + +```mermaid +graph TD + func_GetContainerProcesses --> func_GetContainerPidNamespace + crclient.testSchedulingPolicyInCPUPool --> func_GetContainerPidNamespace +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetContainerPidNamespace +package main + +import ( + "fmt" + + crclient "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient" + provider "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func main() { + container := &provider.Container{ + NodeName: "node-1", + UID: "abc123", + Runtime: "docker", + } + env := &provider.TestEnvironment{} + + ns, err := crclient.GetContainerPidNamespace(container, env) + if err != nil { + fmt.Printf("Error retrieving PID namespace: %v\n", err) + return + } + fmt.Printf("PID namespace for container %s is %s\n", container.UID, ns) +} +``` + +--- + +### GetContainerProcesses + +**GetContainerProcesses** - Returns the list of processes (pids and metadata) that are running within a specific container by querying its PID namespace. + +#### Signature (Go) + +```go +func GetContainerProcesses(container *provider.Container, env *provider.TestEnvironment) ([]*Process, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the list of processes (pids and metadata) that are running within a specific container by querying its PID namespace. | +| **Parameters** | `container` – *provider.Container*: the target container.
`env` – *provider.TestEnvironment*: test environment context used for probe pod resolution. | +| **Return value** | `[]*Process`: slice of process descriptors (pid, args, etc.).
`error`: non‑nil if PID namespace retrieval or process listing fails. | +| **Key dependencies** | • `GetContainerPidNamespace` – obtains the container’s PID namespace.
• `GetPidsFromPidNamespace` – lists processes in that namespace.
• `fmt.Errorf` for error wrapping. | +| **Side effects** | None beyond network/exec calls performed by dependent functions; no state mutation within this function itself. | +| **How it fits the package** | Provides a thin wrapper to expose container process information, used by higher‑level checks (e.g., probe policy validation). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Retrieve PID namespace"} + B -->|"Success"| C["Call GetPidsFromPidNamespace"] + B -->|"Failure"| D["Return error"] + C --> E["Return process list"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetContainerProcesses --> func_GetContainerPidNamespace + func_GetContainerProcesses --> func_GetPidsFromPidNamespace +``` + +#### Functions calling `GetContainerProcesses` + +```mermaid +graph TD + func_testRtAppsNoExecProbes --> func_GetContainerProcesses +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetContainerProcesses +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func main() { + env := provider.GetTestEnvironment() + container := &provider.Container{ + // populate fields as needed for the test + } + + processes, err := crclient.GetContainerProcesses(container, env) + if err != nil { + log.Fatalf("Failed to list container processes: %v", err) + } + fmt.Printf("Found %d processes in container\n", len(processes)) +} +``` + +--- + +### GetNodeProbePodContext + +**GetNodeProbePodContext** - Builds a `clientsholder.Context` that points to the first container of the probe pod running on the specified node. This context is used for executing commands inside the probe pod’s namespace. + +#### Signature (Go) + +```go +func GetNodeProbePodContext(node string, env *provider.TestEnvironment) (clientsholder.Context, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `clientsholder.Context` that points to the first container of the probe pod running on the specified node. This context is used for executing commands inside the probe pod’s namespace. | +| **Parameters** | `node string –` name of the target node
`env *provider.TestEnvironment –` environment holding mapping from nodes to probe pods | +| **Return value** | `clientsholder.Context –` populated with namespace, pod name and container name.
`error –` non‑nil if no probe pod exists for the node. | +| **Key dependencies** | • `fmt.Errorf` for error formatting
• `clientsholder.NewContext` to construct the context | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a central way for other functions (e.g., command execution, PID lookup) to acquire the correct namespace context for interacting with probe pods across nodes. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Get probe pod"] + B{"probePod exists?"} + C["Return clientsholder.NewContext(...)"] + D["Return error \probe pod not found on node\"] + A --> B + B -- yes --> C + B -- no --> D +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetNodeProbePodContext --> fmt_Errorf + func_GetNodeProbePodContext --> clientsholder_NewContext +``` + +#### Functions calling `GetNodeProbePodContext` + +```mermaid +graph TD + ExecCommandContainerNSEnter --> GetNodeProbePodContext + GetContainerPidNamespace --> GetNodeProbePodContext + GetPidsFromPidNamespace --> GetNodeProbePodContext + GetProcessCPUScheduling --> GetNodeProbePodContext +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetNodeProbePodContext +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func main() { + env := provider.GetTestEnvironment() + ctx, err := crclient.GetNodeProbePodContext("worker-node-1", &env) + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + fmt.Printf("Namespace: %s, Pod: %s, Container: %s\n", + ctx.Namespace(), ctx.PodName(), ctx.ContainerName()) +} +``` + +--- + +### GetPidFromContainer + +**GetPidFromContainer** - Executes an appropriate command to obtain the PID that a given container runs under, supporting Docker and CRI‑based runtimes. + +#### Signature (Go) + +```go +func GetPidFromContainer(cut *provider.Container, ctx clientsholder.Context) (int, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes an appropriate command to obtain the PID that a given container runs under, supporting Docker and CRI‑based runtimes. | +| **Parameters** | `cut *provider.Container` – container metadata; `ctx clientsholder.Context` – execution context for the probe pod. | +| **Return value** | `int` – numeric PID of the container; `error` – any failure during command construction or execution. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` to run commands in the probe pod.
• Runtime‑specific shell commands (`DockerInspectPID`, `crictl inspect`).
• Standard libraries: `fmt`, `strconv`, `strings`. | +| **Side effects** | No mutation of package state; performs remote command execution which may log debug messages. | +| **How it fits the package** | Core helper for container‑level introspection used by higher‑level functions (e.g., namespace queries, process counting). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Determine runtime"] --> B{"Supported?"} + B -- Yes --> C["Build command string"] + B -- No --> D["Log unsupported runtime & return error"] + C --> E["Execute command via clientsholder"] + E --> F{"Command succeeded?"} + F -- Yes --> G["Trim output, convert to int"] + F -- No --> H["Return execution error"] + G --> I["Return PID"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetPidFromContainer --> clientsholder.GetClientsHolder + func_GetPidFromContainer --> fmt.Errorf + func_GetPidFromContainer --> strconv.Atoi + func_GetPidFromContainer --> strings.TrimSuffix +``` + +#### Functions calling `GetPidFromContainer` + +```mermaid +graph TD + func_ExecCommandContainerNSEnter --> func_GetPidFromContainer + func_GetContainerPidNamespace --> func_GetPidFromContainer + func_testOneProcessPerContainer --> func_GetPidFromContainer +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetPidFromContainer +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder" +) + +func main() { + // Assume `container` is a *provider.Container obtained elsewhere + // and `probeCtx` is the clientsholder.Context for the probe pod. + pid, err := crclient.GetPidFromContainer(container, probeCtx) + if err != nil { + log.Fatalf("cannot get PID: %v", err) + } + fmt.Printf("Container PID: %d\n", pid) +} +``` + +--- + +### GetPidsFromPidNamespace + +**GetPidsFromPidNamespace** - Executes `ps` inside a probe pod to list all processes whose PID namespace matches `pidNamespace`, then returns them as `*Process` structs. + +#### Signature (Go) + +```go +func GetPidsFromPidNamespace(pidNamespace string, container *provider.Container) ([]*Process, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes `ps` inside a probe pod to list all processes whose PID namespace matches `pidNamespace`, then returns them as `*Process` structs. | +| **Parameters** | `pidNamespace string` – target PID namespace.
`container *provider.Container` – container whose node and context are used for command execution. | +| **Return value** | `[]*Process, error` – slice of processes or an error if the probe pod context or command fails. | +| **Key dependencies** | • `provider.GetTestEnvironment()`
• `GetNodeProbePodContext()`
• `clientsholder.GetClientsHolder().ExecCommandContainer()`
• `regexp.MustCompile` & `FindAllStringSubmatch`
• `strconv.Atoi`
• `log.Error` | +| **Side effects** | Executes a shell command on the probe pod; logs conversion errors to the package logger. | +| **How it fits the package** | Utility for the CR client that translates a PID namespace into a list of running processes, used by higher‑level checks such as scheduling policy validation. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Get test env"} + B --> C["Get probe pod context"] + C --> D{"Exec ps command"} + D -->|"Success"| E["Parse stdout with regex"] + D -->|"Failure"| F["Return error"] + E --> G{"Filter by pidNamespace"} + G --> H["Convert fields to ints"] + H --> I["Create & append *Process"] + I --> J["Return slice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetPidsFromPidNamespace --> func_GetTestEnvironment + func_GetPidsFromPidNamespace --> func_GetNodeProbePodContext + func_GetPidsFromPidNamespace --> fmt.Errorf + func_GetPidsFromPidNamespace --> clientsholder.GetClientsHolder + func_GetPidsFromPidNamespace --> ExecCommandContainer + func_GetPidsFromPidNamespace --> regexp.MustCompile + func_GetPidsFromPidNamespace --> FindAllStringSubmatch + func_GetPidsFromPidNamespace --> strconv.Atoi + func_GetPidsFromPidNamespace --> log.Error +``` + +#### Functions calling `GetPidsFromPidNamespace` (Mermaid) + +```mermaid +graph TD + func_GetContainerProcesses --> func_GetPidsFromPidNamespace + testSchedulingPolicyInCPUPool --> func_GetPidsFromPidNamespace +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetPidsFromPidNamespace +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/crclient" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume env and container are obtained elsewhere + var env provider.TestEnvironment + var cont *provider.Container + + pidNs := "12345" // target PID namespace string + processes, err := crclient.GetPidsFromPidNamespace(pidNs, cont) + if err != nil { + log.Fatalf("failed to get PIDs: %v", err) + } + fmt.Printf("Found %d processes in namespace %s\n", len(processes), pidNs) +} +``` + +--- + +### Process.String + +**String** - Creates a concise human‑readable description containing command arguments, process ID, parent PID, and PID namespace. + +Returns a formatted string representation of the `Process` instance. + +#### Signature (Go) + +```go +func (p *Process) String() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a concise human‑readable description containing command arguments, process ID, parent PID, and PID namespace. | +| **Parameters** | `p *Process` – the receiver; holds fields `Args`, `Pid`, `PPid`, `PidNs`. | +| **Return value** | A string such as `"cmd: ls -la, pid: 1234, ppid: 5678, pidNs: 0"`. | +| **Key dependencies** | * `fmt.Sprintf` from the standard library. | +| **Side effects** | None; purely functional and read‑only. | +| **How it fits the package** | Provides a readable representation used for logging, debugging, or displaying process information in client utilities. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + p --> fmt.Sprintf["fmt.Sprintf"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Process.String --> func_fmt.Sprintf +``` + +#### Functions calling `Process.String` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Process.String +p := &crclient.Process{ + Args: "ls -la", + Pid: 1234, + PPid: 5678, + PidNs: 0, +} +fmt.Println(p.String()) +// Output: cmd: ls -la, pid: 1234, ppid: 5678, pidNs: 0 +``` + +--- diff --git a/docs/internal/datautil/datautil.md b/docs/internal/datautil/datautil.md new file mode 100644 index 000000000..64b92ddd1 --- /dev/null +++ b/docs/internal/datautil/datautil.md @@ -0,0 +1,120 @@ +# Package datautil + +**Path**: `internal/datautil` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [IsMapSubset](#ismapsubset) + +## Overview + +The datautil package provides small helper utilities for working with Go maps, currently offering a generic subset check that can be used to verify whether one map is contained within another. + +### Key Features + +- Generic `IsMapSubset` function that works on any comparable key type and any value type +- Simple boolean return indicating subset relationship +- No external dependencies – pure Go implementation + +### Design Notes + +- Relies on Go generics, so both maps must use the same key and value types; keys must be comparable +- The function only checks presence of keys and equality of values; it does not consider map ordering or other semantics +- Best used in configuration validation or test assertions where map containment needs to be verified + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func IsMapSubset[K comparable, V comparable](m, s map[K]V) bool](#ismapsubset) | Determines if every key/value pair of `s` is present in `m`. Returns `true` when `s` is a subset of `m`; otherwise `false`. | + +## Exported Functions + +### IsMapSubset + +**IsMapSubset** - Determines if every key/value pair of `s` is present in `m`. Returns `true` when `s` is a subset of `m`; otherwise `false`. + +Checks whether one map is a subset of another, i.e., all key/value pairs in the second map exist identically in the first map. + +--- + +#### Signature (Go) + +```go +func IsMapSubset[K comparable, V comparable](m, s map[K]V) bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if every key/value pair of `s` is present in `m`. Returns `true` when `s` is a subset of `m`; otherwise `false`. | +| **Parameters** | `m map[K]V` – candidate superset;
`s map[K]V` – map to test as a subset. | +| **Return value** | `bool` – `true` if `s` ⊆ `m`, `false` otherwise. | +| **Key dependencies** | • `len()` (built‑in)
• Map indexing and range iteration | +| **Side effects** | None; purely functional, no mutation or I/O. | +| **How it fits the package** | Provides a generic utility for map comparison used by other data‑handling routines within the `datautil` package. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"len(s) > len(m)?"} + B -- Yes --> C["Return false"] + B -- No --> D["Iterate over s"] + D --> E{"Key exists in m? & values equal?"} + E -- No --> C + E -- Yes --> F["Continue loop"] + F --> G["End of loop"] --> H["Return true"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_IsMapSubset --> len + func_IsMapSubset --> map_range + func_IsMapSubset --> map_indexing +``` + +--- + +#### Functions calling `IsMapSubset` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking IsMapSubset +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/datautil" +) + +func main() { + super := map[string]int{"a": 1, "b": 2, "c": 3} + sub := map[string]int{"a": 1, "c": 3} + + if datautil.IsMapSubset(super, sub) { + fmt.Println("sub is a subset of super") + } else { + fmt.Println("sub is NOT a subset of super") + } +} +``` + +--- diff --git a/docs/internal/log/log.md b/docs/internal/log/log.md new file mode 100644 index 000000000..13a623cc2 --- /dev/null +++ b/docs/internal/log/log.md @@ -0,0 +1,2270 @@ +# Package log + +**Path**: `internal/log` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [CustomHandler](#customhandler) + - [Logger](#logger) + - [MultiHandler](#multihandler) +- [Exported Functions](#exported-functions) + - [CloseGlobalLogFile](#closegloballogfile) + - [CreateGlobalLogFile](#creategloballogfile) + - [CustomHandler.Enabled](#customhandler.enabled) + - [CustomHandler.Handle](#customhandler.handle) + - [CustomHandler.WithAttrs](#customhandler.withattrs) + - [CustomHandler.WithGroup](#customhandler.withgroup) + - [Debug](#debug) + - [Error](#error) + - [Fatal](#fatal) + - [GetLogger](#getlogger) + - [GetMultiLogger](#getmultilogger) + - [Info](#info) + - [Logf](#logf) + - [Logger.Debug](#logger.debug) + - [Logger.Error](#logger.error) + - [Logger.Fatal](#logger.fatal) + - [Logger.Info](#logger.info) + - [Logger.Warn](#logger.warn) + - [Logger.With](#logger.with) + - [MultiHandler.Enabled](#multihandler.enabled) + - [MultiHandler.Handle](#multihandler.handle) + - [MultiHandler.WithAttrs](#multihandler.withattrs) + - [MultiHandler.WithGroup](#multihandler.withgroup) + - [NewCustomHandler](#newcustomhandler) + - [NewMultiHandler](#newmultihandler) + - [SetLogger](#setlogger) + - [SetupLogger](#setuplogger) + - [Warn](#warn) +- [Local Functions](#local-functions) + - [CustomHandler.appendAttr](#customhandler.appendattr) + - [parseLevel](#parselevel) + +## Overview + +Provides a lightweight logging layer for the Certsuite project that wraps Go’s structured slog logger and supports custom formatting, multi‑writer output, and global configuration. + +### Key Features + +- CustomHandler formats log lines with level labels, timestamps, source file, and arbitrary attributes; +- MultiHandler aggregates multiple underlying handlers to enable simultaneous console, file, or other outputs; +- Global setup functions (SetupLogger, CreateGlobalLogFile) allow configuring log level and destination for the entire application. + +### Design Notes + +- Uses a global *Logger variable so all helpers can emit logs without passing a logger instance; +- CustomHandler ignores grouping information via WithGroup to keep output flat; +- Level parsing is case‑insensitive and errors are reported during SetupLogger, preventing silent misconfiguration. + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**CustomHandler**](#customhandler) | A lightweight slog handler that writes formatted log records to an arbitrary writer | +| [**Logger**](#logger) | Wrapper around Go's structured logger | +| [**MultiHandler**](#multihandler) | Aggregates multiple slog handlers | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func CloseGlobalLogFile() (error)](#closegloballogfile) | Safely shuts down the global log output by closing the underlying file handle. | +| [func CreateGlobalLogFile(outputDir, logLevel string) error](#creategloballogfile) | Deletes any existing log file in `outputDir`, creates a fresh one with permissions defined by `LogFilePermissions`, and sets up the global logger to write to this file. | +| [func (h *CustomHandler) Enabled(_ context.Context, level slog.Level) bool](#customhandler.enabled) | Checks whether the supplied `level` meets or exceeds the handler’s configured minimum logging level. | +| [func (h *CustomHandler) Handle(_ context.Context, r slog.Record) error](#customhandler.handle) | Formats and writes a single `slog.Record` as a log line: ` [TIME] [SOURCE_FILE] [CUSTOM_ATTRS] MSG\n`. | +| [func (h *CustomHandler) WithAttrs(attrs []slog.Attr) slog.Handler](#customhandler.withattrs) | Produces a new `slog.Handler` that includes the supplied attributes in addition to those already present on the receiver. If no attributes are provided, returns the original handler unchanged. | +| [func (h *CustomHandler) WithGroup(_ string) slog.Handler](#customhandler.withgroup) | Provides a placeholder implementation that returns `nil`, effectively discarding any grouping information. | +| [func(string, ...any)()](#debug) | Emits a log record at the *debug* level using the package’s global logger. | +| [func Error(msg string, args ...any)](#error) | Emits an error‑level log entry using the package’s global logger. | +| [func Fatal(msg string, args ...any)()](#fatal) | Logs a fatal message at the *LevelFatal* severity and terminates the program with exit code 1. | +| [func GetLogger() *Logger](#getlogger) | Provides access to the package‑wide `globalLogger`, enabling callers to log messages without exposing the underlying implementation. | +| [func GetMultiLogger(writers ...io.Writer) *Logger](#getmultilogger) | Builds a `*Logger` that forwards all log entries to every writer supplied, plus any global logger already configured. The logger uses custom attribute formatting for log levels. | +| [func Info(msg string, args ...any)](#info) | Emit an informational log entry by delegating to the package’s `Logf` helper. | +| [func Logf(logger *Logger, level, format string, args ...any)](#logf) | Formats and records a log entry using the provided `*Logger` at the given textual level. If the logger is nil it falls back to a default instance. It ensures that the call site information is correctly captured for accurate source references. | +| [func (logger *Logger) Debug(msg string, args ...any)](#logger.debug) | Emits a formatted log entry at the *debug* level. The message is forwarded to the underlying `slog.Logger` only if that level is enabled for the current context. | +| [func (logger *Logger) Error(msg string, args ...any)](#logger.error) | Records an error‑level log entry. It forwards the formatted message to `Logf`, specifying `LevelError`. | +| [func (logger *Logger) Fatal(msg string, args ...any)](#logger.fatal) | Emits a fatal log message at level *Fatal*, writes it to standard‑error, and exits the process with status 1. | +| [func (logger *Logger) Info(msg string, args ...any)](#logger.info) | Formats a message with optional arguments and records it at the *Info* level via `Logf`. | +| [func (logger *Logger) Warn(msg string, args ...any)](#logger.warn) | Records a log entry at the warning level using the configured `*slog.Logger`. | +| [func (logger *Logger) With(args ...any) *Logger](#logger.with) | Returns a new `*Logger` that inherits the underlying logger but augments it with extra key/value pairs supplied in `args`. This allows adding contextual information to log messages. | +| [func (h *MultiHandler) Enabled(ctx context.Context, level slog.Level) bool](#multihandler.enabled) | Returns `true` if at least one of the wrapped handlers is enabled for the supplied log level; otherwise returns `false`. | +| [func (h *MultiHandler) Handle(ctx context.Context, r slog.Record)(error)](#multihandler.handle) | Sends a single `slog.Record` to each handler stored in the receiver’s `handlers` slice. If any handler returns an error, the dispatch stops and that error is propagated. | +| [func (h *MultiHandler) WithAttrs(attrs []slog.Attr) slog.Handler](#multihandler.withattrs) | Produces a new `slog.Handler` that forwards each log record to all underlying handlers with the supplied attributes appended. | +| [func (h *MultiHandler) WithGroup(name string) slog.Handler](#multihandler.withgroup) | Creates a new handler that prefixes every log record with the specified group name for each underlying handler. | +| [func NewCustomHandler(out io.Writer, opts *slog.HandlerOptions) *CustomHandler](#newcustomhandler) | Constructs a `*CustomHandler` that routes log output to the supplied `io.Writer`, applying optional `slog.HandlerOptions`. If no options are provided, defaults are used. | +| [func NewMultiHandler(handlers ...slog.Handler) *MultiHandler](#newmultihandler) | Builds a `*MultiHandler` that forwards logging events to multiple underlying handlers. | +| [func SetLogger(l *Logger)](#setlogger) | Stores the supplied `*Logger` in the package‑wide variable `globalLogger`, making it available to all logging helpers. | +| [func SetupLogger(logWriter io.Writer, level string)](#setuplogger) | Parses the supplied textual log level, configures a custom `slog` handler that formats level labels, and assigns the resulting logger to the global variable. | +| [func Warn(msg string, args ...any)](#warn) | Sends a formatted warning message to the global logger. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func (h *CustomHandler) appendAttr(buf []byte, a slog.Attr) []byte](#customhandler.appendattr) | Formats a single `slog.Attr` into the log line and appends it to an existing byte buffer. Handles different attribute kinds (string, time, level, generic). | +| [func parseLevel(level string) (slog.Level, error)](#parselevel) | Translates a textual log level (e.g., `"debug"`, `"info"`) into the corresponding `slog.Level` value. If the input is not recognized, returns an error. | + +## Structs + +### CustomHandler + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `opts` | `slog.HandlerOptions` | Configuration options such as the minimum log level, a custom attribute replacer, and time format. | +| `attrs` | `[]slog.Attr` | Default attributes that are added to every record emitted by this handler. | +| `mu` | `*sync.Mutex` | Mutex protecting concurrent writes to `out`. | +| `out` | `io.Writer` | Destination stream (e.g., a file or stdout) where formatted log entries are written. | + +#### Purpose + +`CustomHandler` implements the `slog.Handler` interface, converting structured slog records into human‑readable lines. It serialises each record’s level, time, source location, message and any attached attributes, then writes the result to the configured writer. The handler supports a minimum log level filter, optional attribute replacement logic, and allows adding static attributes via `WithAttrs`. Thread safety is ensured with an internal mutex. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NewCustomHandler` | Constructs a new handler, setting defaults for options and initializing the output writer. | +| `Enabled` | Determines whether a record at a given level should be processed based on the configured minimum level. | +| `Handle` | Formats a slog record into a byte slice and writes it to `out`. | +| `WithAttrs` | Creates a new handler that inherits existing attributes and appends additional ones, enabling context‑specific logging. | +| `WithGroup` | (Stub) returns nil; grouping is not supported by this handler. | +| `appendAttr` | Internal helper that resolves an attribute’s value and appends its formatted representation to the output buffer. | + +--- + +--- + +### Logger + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `l` | `*slog.Logger` | The underlying standard library logger used to emit records. It is the only field; all operations delegate through it. | + +#### Purpose + +The `Logger` type encapsulates a `*slog.Logger` and provides a small, convenient API for emitting log messages at different severity levels (`Debug`, `Info`, `Warn`, `Error`, `Fatal`). +It also supports creating new logger instances that inherit the context of an existing one via `With`. The struct is stored in a package‑level variable (`globalLogger`) and can be retrieved or replaced through `GetLogger`/`SetLogger`. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `Debug(msg string, args ...any)` | Emits a debug‑level message. | +| `Info(msg string, args ...any)` | Emits an informational message. | +| `Warn(msg string, args ...any)` | Emits a warning message. | +| `Error(msg string, args ...any)` | Emits an error message. | +| `Fatal(msg string, args ...any)` | Emits a fatal message, writes to stderr and exits the process. | +| `With(args ...any) *Logger` | Returns a new `Logger` that carries additional attributes in its context. | +| `GetLogger() *Logger` | Retrieves the current global logger instance. | +| `SetLogger(l *Logger)` | Replaces the global logger with a supplied one. | +| `GetMultiLogger(writers ...io.Writer) *Logger` | Builds a new logger that writes to multiple writers, optionally including the existing global logger. | +| `Logf(logger *Logger, level, format string, args ...any)` | Low‑level helper used by the convenience methods; formats and records log entries at the specified level. | + +--- + +--- + +### MultiHandler + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `handlers` | `[]slog.Handler` | A slice holding the individual handlers that compose this multi‑handler. | + +#### Purpose + +`MultiHandler` implements the `slog.Handler` interface by delegating logging responsibilities to a collection of underlying handlers. When a log record is emitted, it forwards the record (cloned for safety) to each contained handler until one returns an error. The handler is considered enabled if any delegate reports that it can handle the given log level. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NewMultiHandler` | Constructs a new `MultiHandler` from a variadic list of `slog.Handler` instances. | +| `Enabled` | Returns true if at least one underlying handler is enabled for the specified log level. | +| `Handle` | Sends a cloned log record to each underlying handler, stopping on the first error. | +| `WithAttrs` | Creates a new `MultiHandler` whose delegates are wrapped with the supplied attributes. | +| `WithGroup` | Creates a new `MultiHandler` whose delegates are wrapped with the specified group name. | + +--- + +## Exported Functions + +### CloseGlobalLogFile + +**CloseGlobalLogFile** - Safely shuts down the global log output by closing the underlying file handle. + +Closes the globally‑configured log file and returns any error encountered. + +```go +func CloseGlobalLogFile() (error) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Safely shuts down the global log output by closing the underlying file handle. | +| **Parameters** | None | +| **Return value** | `error` – non‑nil if the file close operation fails; otherwise nil. | +| **Key dependencies** | Calls `Close()` on the `globalLogFile` variable (type `*os.File`). | +| **Side effects** | Releases the OS resource associated with the log file; may affect subsequent logging attempts. | +| **How it fits the package** | Provides a single point of cleanup for the package’s global logger, used during application shutdown. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Check globalLogFile"} + B -- valid --> C["Call globalLogFile.Close()"] + C --> D["Return error or nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_CloseGlobalLogFile --> func_Close +``` + +#### Functions calling `CloseGlobalLogFile` + +```mermaid +graph TD + func_Shutdown --> func_CloseGlobalLogFile +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CloseGlobalLogFile +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + if err := log.CloseGlobalLogFile(); err != nil { + fmt.Fprintf(os.Stderr, "Could not close the log file: %v\n", err) + } +} +``` + +--- + +### CreateGlobalLogFile + +**CreateGlobalLogFile** - Deletes any existing log file in `outputDir`, creates a fresh one with permissions defined by `LogFilePermissions`, and sets up the global logger to write to this file. + +#### Signature (Go) + +```go +func CreateGlobalLogFile(outputDir, logLevel string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Deletes any existing log file in `outputDir`, creates a fresh one with permissions defined by `LogFilePermissions`, and sets up the global logger to write to this file. | +| **Parameters** | `outputDir string` – directory where the log should reside.
`logLevel string` – textual log level (e.g., “debug”, “info”). | +| **Return value** | `error` – non‑nil if removal or creation of the log file fails. | +| **Key dependencies** | • `os.Remove`, `os.IsNotExist`
• `fmt.Errorf`
• `os.OpenFile`
• `SetupLogger` (internal helper that configures the slog logger) | +| **Side effects** | • Deletes existing log file if present.
• Creates a new file and assigns it to `globalLogFile`.
• Calls `SetupLogger`, which mutates the package‑level `globalLogger`. | +| **How it fits the package** | This function is invoked during application startup (e.g., in `certsuite.Startup`) or by the web server to initialise logging before any other component writes logs. It centralises log file handling for the entire suite. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Compute logFilePath"] --> B{"Remove old file"} + B -- success --> C["Open new file"] + B -- error (not exist) --> C + B -- other error --> D["Return error"] + C --> E["SetupLogger(logFile, logLevel)"] + E --> F["Assign to globalLogFile"] + F --> G["Return nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_CreateGlobalLogFile --> os.Remove + func_CreateGlobalLogFile --> os.IsNotExist + func_CreateGlobalLogFile --> fmt.Errorf + func_CreateGlobalLogFile --> os.OpenFile + func_CreateGlobalLogFile --> SetupLogger +``` + +#### Functions calling `CreateGlobalLogFile` + +```mermaid +graph TD + certsuite.Startup --> func_CreateGlobalLogFile + webserver.runHandler --> func_CreateGlobalLogFile +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CreateGlobalLogFile +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + // Assume output directory and desired log level are known. + err := log.CreateGlobalLogFile("/var/log/certsuite", "debug") + if err != nil { + panic(err) + } +} +``` + +--- + +### CustomHandler.Enabled + +**Enabled** - Checks whether the supplied `level` meets or exceeds the handler’s configured minimum logging level. + +#### 1) Signature (Go) + +```go +func (h *CustomHandler) Enabled(_ context.Context, level slog.Level) bool +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the supplied `level` meets or exceeds the handler’s configured minimum logging level. | +| **Parameters** | `_ context.Context` – ignored; ` slog.Level` – log entry’s severity. | +| **Return value** | `bool` – `true` if the message should be logged, otherwise `false`. | +| **Key dependencies** | Calls `h.opts.Level.Level()` to retrieve the threshold level from options. | +| **Side effects** | None; purely a decision function. | +| **How it fits the package** | Acts as part of the custom log handler’s filtering logic within the internal logging subsystem. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + getLevel["h.opts.Level.Level()"] --> compare["level >= threshold"] + compare --> returnBool["return bool"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_CustomHandler.Enabled --> func_Level +``` + +#### 5) Functions calling `CustomHandler.Enabled` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking CustomHandler.Enabled +package main + +import ( + "context" + "log/slog" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + h := &log.CustomHandler{ + opts: log.Options{Level: slog.LevelInfo}, // assumed struct for illustration + } + ctx := context.Background() + if h.Enabled(ctx, slog.LevelWarn) { + // proceed with logging the warning + } +} +``` + +--- + +### CustomHandler.Handle + +**Handle** - Formats and writes a single `slog.Record` as a log line: ` [TIME] [SOURCE_FILE] [CUSTOM_ATTRS] MSG\n`. + +#### Signature (Go) + +```go +func (h *CustomHandler) Handle(_ context.Context, r slog.Record) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Formats and writes a single `slog.Record` as a log line: ` [TIME] [SOURCE_FILE] [CUSTOM_ATTRS] MSG\n`. | +| **Parameters** | `_ context.Context – discarded;
` `r slog.Record – the record to be logged` | +| **Return value** | `error – any write error, otherwise nil` | +| **Key dependencies** | *`h.opts.ReplaceAttr` (optional)
* `slog.Any`, `slog.Time`, `slog.String`
*`CustomHandler.appendAttr`
* `runtime.CallersFrames`, `CallersFrames.Next()`
*`fmt.Sprintf`, `filepath.Base`
* `bytes.Append` (built‑in)
*`h.mu.Lock/Unlock`
* `h.out.Write` | +| **Side effects** | *Mutates internal mutex lock.
* Writes formatted bytes to the handler’s output stream. | +| **How it fits the package** | Implements the `slog.Handler` interface for a custom log format used throughout the `certsuite/internal/log` package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start Handle"] --> B["Determine level attribute"] + B --> C["Append level attr to buffer"] + C --> D{"Time present?"} + D -- yes --> E["Append time attr"] + D -- no --> F{"Source present?"} + F -- yes --> G["Resolve source via runtime.CallersFrames"] + G --> H["Append source attr"] + F -- no --> I + H --> I + I --> J["Loop over custom attrs"] + J --> K["Append each custom attr"] + K --> L["Append message attr"] + L --> M["Add newline to buffer"] + M --> N["Lock mutex"] + N --> O["Write buffer to output"] + O --> P["Unlock mutex"] + P --> Q["Return error if any, else nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CustomHandler_Handle --> func_ReplaceAttr + func_CustomHandler_Handle --> slog.Any + func_CustomHandler_Handle --> CustomHandler.appendAttr + func_CustomHandler_Handle --> runtime.CallersFrames + func_CustomHandler_Handle --> Next + func_CustomHandler_Handle --> fmt.Sprintf + func_CustomHandler_Handle --> filepath.Base + func_CustomHandler_Handle --> append + func_CustomHandler_Handle --> Lock + func_CustomHandler_Handle --> Unlock + func_CustomHandler_Handle --> Write +``` + +#### Functions calling `CustomHandler.Handle` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking CustomHandler.Handle +package main + +import ( + "context" + "log/slog" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + // Create a handler that writes to standard output + h := &log.CustomHandler{ + out: os.Stdout, + } + + // Construct a log record + r := slog.Record{ + Level: slog.LevelInfo, + Time: time.Now(), + Message: "Hello, world!", + } + + // Handle the record (normally called by slog) + if err := h.Handle(context.Background(), r); err != nil { + panic(err) + } +} +``` + +--- + +--- + +### CustomHandler.WithAttrs + +**WithAttrs** - Produces a new `slog.Handler` that includes the supplied attributes in addition to those already present on the receiver. If no attributes are provided, returns the original handler unchanged. + +#### Signature (Go) + +```go +func (h *CustomHandler) WithAttrs(attrs []slog.Attr) slog.Handler +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a new `slog.Handler` that includes the supplied attributes in addition to those already present on the receiver. If no attributes are provided, returns the original handler unchanged. | +| **Parameters** | `attrs []slog.Attr` – slice of log attributes to append. | +| **Return value** | `slog.Handler` – a handler instance with combined attributes; may be the same instance if `attrs` is empty. | +| **Key dependencies** | • `len`, `make`, `copy`, `append` from the Go runtime
• `slog.Attr` type from the standard library | +| **Side effects** | Creates a shallow copy of the receiver and a new attribute slice; no mutation of the original handler or its fields. No I/O or concurrency concerns. | +| **How it fits the package** | Allows callers to extend a custom logging handler with context‑specific attributes while preserving immutability, aligning with the `slog.Handler` interface contract. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CheckEmpty["if len(attrs)==0"] + CheckEmpty -- Yes --> ReturnOriginal["return h"] + CheckEmpty -- No --> CopyHandler["h2 := *h"] + CopyHandler --> InitSlice["h2.attrs = make([]slog.Attr, len(h.attrs)+len(attrs))"] + InitSlice --> CopyExisting["copy(h2.attrs, h.attrs)"] + CopyExisting --> AppendNew["h2.attrs = append(h2.attrs, attrs...)"] + AppendNew --> ReturnCopy["return &h2"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CustomHandler.WithAttrs --> len + func_CustomHandler.WithAttrs --> make + func_CustomHandler.WithAttrs --> copy + func_CustomHandler.WithAttrs --> append +``` + +#### Functions calling `CustomHandler.WithAttrs` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking CustomHandler.WithAttrs +import ( + "log/slog" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + base := log.NewCustomHandler(slog.LevelInfo, slog.NewJSONEncoder(nil)) + // Add a user ID attribute to the handler + extended := base.WithAttrs([]slog.Attr{ + slog.String("user_id", "alice"), + }) + logger := slog.New(extended) + logger.Info("User logged in") +} +``` + +--- + +### CustomHandler.WithGroup + +**WithGroup** - Provides a placeholder implementation that returns `nil`, effectively discarding any grouping information. + +#### Signature (Go) + +```go +func (h *CustomHandler) WithGroup(_ string) slog.Handler +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides a placeholder implementation that returns `nil`, effectively discarding any grouping information. | +| **Parameters** | `_ string` – the name of the group; unused in this stub. | +| **Return value** | `slog.Handler` – always `nil`. | +| **Key dependencies** | None (no external calls). | +| **Side effects** | None – no state changes or I/O. | +| **How it fits the package** | Implements the `WithGroup` method required by the `slog.Handler` interface, allowing a `CustomHandler` to satisfy the interface contract while not supporting grouping. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + CustomHandler_WithGroup --> ReturnNil +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `CustomHandler.WithGroup` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking CustomHandler.WithGroup +package main + +import ( + "log/slog" +) + +type CustomHandler struct{} + +func (h *CustomHandler) WithGroup(_ string) slog.Handler { + return nil +} + +func main() { + var h CustomHandler + grouped := h.WithGroup("example") + if grouped == nil { + println("grouping not supported, received nil handler") + } +} +``` + +--- + +### Debug + +**Debug** - Emits a log record at the *debug* level using the package’s global logger. + +#### Signature (Go) + +```go +func(string, ...any)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Emits a log record at the *debug* level using the package’s global logger. | +| **Parameters** | `msg string –` format string for the message.
`args …any –` optional values to interpolate into `msg`. | +| **Return value** | None (the function is fire‑and‑forget). | +| **Key dependencies** | • Calls `Logf` internally.
• Uses `globalLogger` and `LevelDebug`. | +| **Side effects** | Writes a log entry via the global logger; no state mutation beyond that. | +| **How it fits the package** | Provides a convenient, zero‑configuration wrapper for emitting debug logs from any package without passing a logger explicitly. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Debug --> Logf +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Debug --> func_Logf +``` + +#### Functions calling `Debug` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Debug +package main + +import "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" + +func main() { + log.Debug("Starting up with %d workers", 4) +} +``` + +--- + +### Error + +**Error** - Emits an error‑level log entry using the package’s global logger. + +#### Signature (Go) + +```go +func Error(msg string, args ...any) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Emits an error‑level log entry using the package’s global logger. | +| **Parameters** | `msg string` – format string; `args ...any` – values for formatting. | +| **Return value** | None. | +| **Key dependencies** | Calls `Logf(globalLogger, LevelError, msg, args...)`. | +| **Side effects** | Writes a log record via the global logger’s handler; may terminate the program if the underlying handler is fatal. | +| **How it fits the package** | Provides a convenience wrapper for error‑level logging that hides the logger and level details from callers. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Error --> Logf(globalLogger, LevelError) +``` + +#### Function dependencies + +```mermaid +graph TD + func_Error --> func_Logf +``` + +#### Functions calling `Error` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Error +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + log.Error("Failed to connect to %s: %v", "database", err) +} +``` + +--- + +### Fatal + +**Fatal** - Logs a fatal message at the *LevelFatal* severity and terminates the program with exit code 1. + +#### Signature (Go) + +```go +func Fatal(msg string, args ...any)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Logs a fatal message at the *LevelFatal* severity and terminates the program with exit code 1. | +| **Parameters** | `msg` – format string for the log message.
`args …any` – optional arguments to be formatted into `msg`. | +| **Return value** | None (function exits the process). | +| **Key dependencies** | • `Logf(globalLogger, LevelFatal, msg, args…)`
• `fmt.Fprintf(os.Stderr, ...)`
• `os.Exit(1)` | +| **Side effects** | Writes a formatted fatal message to standard error and terminates the running process. | +| **How it fits the package** | Provides a convenience wrapper for emitting critical errors that should stop execution immediately. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Fatal"] -->|"calls Logf(LevelFatal)"| B(Logf) + A -->|"writes to stderr"| C(fmt.Fprintf) + A -->|"exits process"| D(os.Exit) +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Fatal --> func_Logf +``` + +#### Functions calling `Fatal` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Fatal +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + log.Fatal("unexpected error: %v", err) // logs and exits +} +``` + +--- + +### GetLogger + +**GetLogger** - Provides access to the package‑wide `globalLogger`, enabling callers to log messages without exposing the underlying implementation. + +Retrieve the globally initialized logger instance used by the package. + +```go +func GetLogger() *Logger +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides access to the package‑wide `globalLogger`, enabling callers to log messages without exposing the underlying implementation. | +| **Parameters** | None | +| **Return value** | A pointer to the shared `Logger` instance (`*Logger`). | +| **Key dependencies** | *None – the function simply returns a global variable.* | +| **Side effects** | No state changes; purely read‑only access. | +| **How it fits the package** | Serves as the public entry point for obtaining the logger configured elsewhere in the `log` package, promoting encapsulation of logging logic. | + +#### Internal workflow + +```mermaid +flowchart TD + GetLogger --> globalLogger +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `GetLogger` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetLogger +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + logger := log.GetLogger() + logger.Info("Application started") +} +``` + +--- + +### GetMultiLogger + +**GetMultiLogger** - Builds a `*Logger` that forwards all log entries to every writer supplied, plus any global logger already configured. The logger uses custom attribute formatting for log levels. + +#### Signature (Go) + +```go +func GetMultiLogger(writers ...io.Writer) *Logger +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `*Logger` that forwards all log entries to every writer supplied, plus any global logger already configured. The logger uses custom attribute formatting for log levels. | +| **Parameters** | ` io.Writer… – One or more writers (e.g., buffers, files) where logs should be emitted. | +| **Return value** | `*Logger` – a wrapper around an `slog.Logger` that dispatches to the created multi‑handler. | +| **Key dependencies** | • `log/slog.HandlerOptions`, `log/slog.NewMultiHandler`,
• `NewCustomHandler` (internal helper)
• `globalLogLevel`, `CustomLevelNames`, `globalLogger` (package globals) | +| **Side effects** | Creates handler objects and stores them in a new logger; does not modify external state beyond logger construction. No I/O occurs here—logging happens when the returned logger is used. | +| **How it fits the package** | Provides a convenient way for other parts of the application (e.g., checks, web handlers) to obtain a unified logging surface that writes simultaneously to console, files, or in‑memory buffers. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Receive writers"] --> B{"Is global logger set?"} + B -- Yes --> C["Add global handler"] + B -- No --> D["Skip"] + C & D --> E["Loop over each writer"] + E --> F["Create CustomHandler with options"] + F --> G["Collect all handlers"] + G --> H["Instantiate NewMultiHandler"] + H --> I["Wrap in slog.New and return Logger"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetMultiLogger --> func_NewCustomHandler + func_GetMultiLogger --> log/slog/NewMultiHandler + func_GetMultiLogger --> log/slog/ReplaceAttr +``` + +#### Functions calling `GetMultiLogger` + +```mermaid +graph TD + func_NewCheck --> func_GetMultiLogger + runHandler --> func_GetMultiLogger +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetMultiLogger +package main + +import ( + "os" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + // Log to both standard output and a file. + file, _ := os.Create("app.log") + defer file.Close() + + logger := log.GetMultiLogger(os.Stdout, file) + logger.Info("Application started") +} +``` + +--- + +--- + +### Info + +**Info** - Emit an informational log entry by delegating to the package’s `Logf` helper. + +Logs a message at the *info* level using the global logger. + +```go +func Info(msg string, args ...any) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Emit an informational log entry by delegating to the package’s `Logf` helper. | +| **Parameters** | `msg string` – format string; `` – optional arguments for formatting. | +| **Return value** | None (side‑effect only). | +| **Key dependencies** | Calls `Logf(globalLogger, LevelInfo, msg, args...)`. | +| **Side effects** | Writes to the global logger’s handler; may terminate program if log level parsing fails. | +| **How it fits the package** | Provides a convenient shorthand for logging at the *info* level without requiring callers to specify the logger or level explicitly. | + +#### Internal workflow + +```mermaid +flowchart TD + func_Info --> func_Logf +``` + +#### Function dependencies + +```mermaid +graph TD + func_Info --> func_Logf +``` + +#### Functions calling `Info` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example + +```go +// Minimal example invoking Info +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + log.Info("Starting application with args: %v", os.Args) +} +``` + +--- + +### Logf + +**Logf** - Formats and records a log entry using the provided `*Logger` at the given textual level. If the logger is nil it falls back to a default instance. It ensures that the call site information is correctly captured for accurate source references. + +#### Signature (Go) + +```go +func Logf(logger *Logger, level, format string, args ...any) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Formats and records a log entry using the provided `*Logger` at the given textual level. If the logger is nil it falls back to a default instance. It ensures that the call site information is correctly captured for accurate source references. | +| **Parameters** | `logger *Logger` – target logger (may be nil).
`level string` – human‑readable log level (“debug”, “info”, etc.).
`format string` – printf‑style format string.
`args ...any` – values to interpolate into the format string. | +| **Return value** | None (void). The function may terminate the process if the level cannot be parsed or if the logger’s underlying handler is fatal. | +| **Key dependencies** | • `log/slog.Default()` for default logger
• `parseLevel` to map level string → `slog.Level`
• `runtime.Callers` to capture stack frame
• `time.Now`, `fmt.Sprintf`, `slog.NewRecord`, and the logger’s handler via `.Handler().Handle` | +| **Side effects** | • May call `logger.Fatal` which writes to stderr and exits.
• Emits a log record via the handler (potential I/O).
• No global state mutation beyond the logger passed in. | +| **How it fits the package** | Central logging helper used by all wrapper functions (`Debug`, `Info`, etc.) and by higher‑level components (`Check.Log…`). It guarantees that logs include correct source location data and respects the configured log level hierarchy. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"logger nil?"} + B -- yes --> C["Create default Logger"] + B -- no --> E["Parse level string"] + C --> E + E --> F{"parse error?"} + F -- yes --> G["Fatal error → exit"] + F -- no --> H["Check handler enabled?"] + H -- false --> I["Return early"] + H -- true --> J["Capture caller PC"] + J --> K["Build slog.Record"] + K --> L["Invoke handler.Handle"] + L --> M["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Logf --> func_Default + func_Logf --> parseLevel + func_Logf --> runtime.Callers + func_Logf --> slog.NewRecord + func_Logf --> time.Now + func_Logf --> fmt.Sprintf + func_Logf --> Handler.Handle +``` + +#### Functions calling `Logf` (Mermaid) + +```mermaid +graph TD + Debug --> Logf + Error --> Logf + Fatal --> Logf + Info --> Logf + Warn --> Logf + Logger.Debug --> Logf + Logger.Error --> Logf + Logger.Fatal --> Logf + Logger.Info --> Logf + Logger.Warn --> Logf + Check.LogDebug --> Logf + Check.LogError --> Logf + Check.LogFatal --> Logf + Check.LogInfo --> Logf + Check.LogWarn --> Logf +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Logf +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + // Use the global logger or create a custom one. + log.Logf(log.DefaultLogger(), "info", "Hello %s!", "world") +} +``` + +--- + +### Logger.Debug + +**Debug** - Emits a formatted log entry at the *debug* level. The message is forwarded to the underlying `slog.Logger` only if that level is enabled for the current context. + +#### 1) Signature (Go) + +```go +func (logger *Logger) Debug(msg string, args ...any) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Emits a formatted log entry at the *debug* level. The message is forwarded to the underlying `slog.Logger` only if that level is enabled for the current context. | +| **Parameters** | `msg string` – format string; `args ...any` – values to substitute in the format string (similar to `fmt.Sprintf`). | +| **Return value** | None – the function writes to the logger and returns immediately. | +| **Key dependencies** | • Calls `Logf(logger, LevelDebug, msg, args...)`.
• Relies on the global logging configuration (`LevelDebug` constant). | +| **Side effects** | Writes a log record via the wrapped `slog.Logger`; may trigger I/O to stdout or a file depending on logger setup. No state mutation beyond that. | +| **How it fits the package** | Provides a convenient, type‑safe shortcut for emitting debug messages; used throughout the codebase wherever diagnostic information is desired without cluttering with level checks. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Logger.Debug(msg,args...)"] --> B["Logf(logger, LevelDebug, msg, args...)"] + B --> C["slog.Logger emits record if enabled"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_Logger.Debug --> func_Logf +``` + +#### 5) Functions calling `Logger.Debug` (Mermaid) + +```mermaid +graph TD + func_Clientsholder.ExecCommandContainer --> func_Logger.Debug + func_GetContainerPidNamespace --> func_Logger.Debug + func_GetPidFromContainer --> func_Logger.Debug + func_CompressResultsArtifacts --> func_Logger.Debug + func_GetCertIDFromConnectAPI --> func_Logger.Debug + func_SendResultsToConnectAPI --> func_Logger.Debug + func_sendRequest --> func_Logger.Debug + func_setProxy --> func_Logger.Debug + func_DoAutoDiscover --> func_Logger.Debug +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Logger.Debug +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + logger := log.NewLogger(log.LevelDebug) + logger.Debug("Starting process %d with args: %+v", 42, []string{"foo", "bar"}) +} +``` + +This example creates a logger at the debug level and writes a formatted message using `Logger.Debug`. + +--- + +### Logger.Error + +**Error** - Records an error‑level log entry. It forwards the formatted message to `Logf`, specifying `LevelError`. + +#### Signature (Go) + +```go +func (logger *Logger) Error(msg string, args ...any) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Records an error‑level log entry. It forwards the formatted message to `Logf`, specifying `LevelError`. | +| **Parameters** | `msg string` – format string for the log message.
`args ...any` – optional arguments that are substituted into `msg`. | +| **Return value** | None. The function performs side effects only. | +| **Key dependencies** | - Calls `Logf(logger, LevelError, msg, args...)`.
- Relies on the global log level handling in `Logf`. | +| **Side effects** | Emits a formatted log record to the logger’s underlying `slog.Logger`. No state is returned. | +| **How it fits the package** | Provides a convenient, type‑safe method for emitting error logs within the internal logging package. It abstracts away the level handling and integrates with the rest of the logging infrastructure. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Logger.Error"] --> B{"Format message"} + B --> C["Call Logf(logger, LevelError, msg, args...)"] + C --> D["Logf checks level and handles record"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Logger.Error --> func_Logf +``` + +#### Functions calling `Logger.Error` (Mermaid) + +```mermaid +graph TD + func_main --> func_Logger.Error + func_NewCommand --> func_Logger.Error + func_addPreflightTestsToCatalog --> func_Logger.Error + func_outputJS --> func_Logger.Error + func_outputTestCases --> func_Logger.Error + func_ExecCommandContainer --> func_Logger.Error + func_getMachineConfig --> func_Logger.Error + func_GetPidsFromPidNamespace --> func_Logger.Error + func_setProxy --> func_Logger.Error + func_CreateLabels --> func_Logger.Error + func_DoAutoDiscover --> func_Logger.Error + func_getCatalogSourceBundleCountFromProbeContainer --> func_Logger.Error +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Logger.Error +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + logger := log.NewDefault() + err := someOperation() + if err != nil { + logger.Error("operation failed: %v", err) + } +} + +func someOperation() error { + return fmt.Errorf("example failure") +} +``` + +--- + +--- + +### Logger.Fatal + +**Fatal** - Emits a fatal log message at level *Fatal*, writes it to standard‑error, and exits the process with status 1. + +#### Signature (Go) + +```go +func (logger *Logger) Fatal(msg string, args ...any) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Emits a fatal log message at level *Fatal*, writes it to standard‑error, and exits the process with status 1. | +| **Parameters** | `msg` – format string; `args...` – optional arguments for formatting. | +| **Return value** | None (terminates execution). | +| **Key dependencies** | - `Logf(logger, LevelFatal, msg, args...)` to record the message via the logger’s handler.
- `fmt.Fprintf(os.Stderr, …)` to output a human‑readable fatal line.
- `os.Exit(1)` to terminate. | +| **Side effects** | - Adds a log record at *Fatal* level.
- Writes to standard‑error.
- Stops the process immediately; any deferred functions are not run. | +| **How it fits the package** | Provides a convenience wrapper for critical failures, ensuring consistent formatting and immediate program termination across the `log` package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive msg & args"] --> B{"Format message"} + B --> C["Call Logf with LevelFatal"] + C --> D["Print to stderr via fmt.Fprintf"] + D --> E["Exit(1)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Logger.Fatal --> func_Logf + func_Logger.Fatal --> pkg_fmt.Fprintf + func_Logger.Fatal --> pkg_os.Exit +``` + +#### Functions calling `Logger.Fatal` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Logger.Fatal +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + logger := log.NewLogger() + // Simulate a critical error condition + if err := doSomething(); err != nil { + logger.Fatal("Critical failure: %v", err) + } +} + +func doSomething() error { + return fmt.Errorf("simulated error") +} +``` + +*The program will print the formatted message to standard‑error and terminate with exit code 1.* + +--- + +### Logger.Info + +**Info** - Formats a message with optional arguments and records it at the *Info* level via `Logf`. + +#### Signature (Go) + +```go +func (logger *Logger) Info(msg string, args ...any) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Formats a message with optional arguments and records it at the *Info* level via `Logf`. | +| **Parameters** | `msg` – format string (similar to `fmt.Sprintf`).
`args…any` – variadic values used in formatting. | +| **Return value** | None | +| **Key dependencies** | Calls: `Logf(logger, LevelInfo, msg, args...)`. Uses the global log level constants (`LevelInfo`). | +| **Side effects** | Writes a formatted record to the logger’s underlying handler if the *Info* level is enabled. No state changes on the caller beyond logging. | +| **How it fits the package** | Provides a convenient wrapper for emitting informational logs, used throughout the codebase (e.g., during startup, configuration loading, and operation progress). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive msg & args"] --> B{"Check logger"} + B -->|"nil"| C["Create default Logger"] + B -->|"valid"| D["Call Logf with LevelInfo"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Logger.Info --> func_Logf +``` + +#### Functions calling `Logger.Info` (Mermaid) + +```mermaid +graph TD + func_runTestSuite --> func_Logger.Info + func_getClusterRestConfig --> func_Logger.Info + func_newClientsHolder --> func_Logger.Info + func_CompressResultsArtifacts --> func_Logger.Info + func_GetCertIDFromConnectAPI --> func_Logger.Info + func_SendResultsToConnectAPI --> func_Logger.Info + func_DoAutoDiscover --> func_Logger.Info +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Logger.Info +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + // Obtain a logger instance (could be the global one) + logger := log.GetLogger() + + // Emit an informational message with formatted arguments + logger.Info("Processing %d items, current status: %s", 42, "running") +} +``` + +--- + +### Logger.Warn + +**Warn** - Records a log entry at the warning level using the configured `*slog.Logger`. + +#### Signature (Go) + +```go +func (logger *Logger) Warn(msg string, args ...any) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Records a log entry at the warning level using the configured `*slog.Logger`. | +| **Parameters** | `msg` – format string; `args` – optional values to interpolate into `msg`. | +| **Return value** | None (void). | +| **Key dependencies** | • Calls the package‑level helper `Logf` with `LevelWarn`.
• Relies on `slog.Logger` for formatting and handling. | +| **Side effects** | Writes a log record via the underlying logger’s handler; may trigger side‑effects of that handler (e.g., file writes, network output). Does not modify any package‑level state. | +| **How it fits the package** | Provides a convenient method on `Logger` for emitting warning messages, delegating to the generic `Logf` routine so all level‑specific methods share the same formatting and dispatch logic. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Logger.Warn(msg,args) --> Logf(logger,LevelWarn,msg,args) +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Logf --> func_parseLevel + func_Logf --> func_slog.NewRecord + func_Logf --> func_handler.Handle +``` + +#### Functions calling `Logger.Warn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Logger.Warn +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + logger := log.NewLogger() + logger.Warn("Disk space low: %d%% remaining", 5) +} +``` + +--- + +### Logger.With + +**With** - Returns a new `*Logger` that inherits the underlying logger but augments it with extra key/value pairs supplied in `args`. This allows adding contextual information to log messages. + +#### Signature (Go) + +```go +func (logger *Logger) With(args ...any) *Logger +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a new `*Logger` that inherits the underlying logger but augments it with extra key/value pairs supplied in `args`. This allows adding contextual information to log messages. | +| **Parameters** | `args ...any –` variadic arguments typically alternating keys and values (e.g., `"user", userID`). | +| **Return value** | `*Logger –` a new logger instance that wraps the original logger with the added context. | +| **Key dependencies** | Calls the underlying logger’s `With(args ...)` method (`logger.l.With(args...)`). | +| **Side effects** | No state is mutated on the original logger; a fresh wrapper is returned. It may allocate memory for the new logger instance but does not perform I/O or alter global state. | +| **How it fits the package** | Provides a convenient, fluent API for enriching log entries while preserving immutability of existing logger instances. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Receive *Logger & args"] --> B{"Create new Logger"} + B --> C["Set l field to logger.l.With(args...)"] + C --> D["*Logger returned"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Logger.With --> func_underlying_logger.With +``` + +#### Functions calling `Logger.With` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Logger.With +logger := log.New() // assume a constructor that returns *Logger +userID := 42 +ctxLogger := logger.With("user", userID) // create a new logger with added context + +ctxLogger.Info("User logged in") // logs: ... "user"=42 ... +``` + +--- + +--- + +### MultiHandler.Enabled + +**Enabled** - Returns `true` if at least one of the wrapped handlers is enabled for the supplied log level; otherwise returns `false`. + +#### 1) Signature (Go) + +```go +func (h *MultiHandler) Enabled(ctx context.Context, level slog.Level) bool +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` if at least one of the wrapped handlers is enabled for the supplied log level; otherwise returns `false`. | +| **Parameters** | `ctx context.Context –` execution context (may carry deadlines or cancellation signals)
`level slog.Level –` severity of the log message to test | +| **Return value** | `bool –` whether logging should proceed at this level | +| **Key dependencies** | Calls `Enabled(ctx, level)` on each handler stored in `h.handlers`. | +| **Side effects** | None; purely a read‑only check. | +| **How it fits the package** | Implements the `slog.Handler` interface for a composite handler that aggregates multiple concrete handlers, enabling selective logging based on any constituent’s configuration. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over h.handlers"} + B -->|"handler.Enabled returns true"| C["Return true"] + B -->|"all false"| D["Return false"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_MultiHandler.Enabled --> func_Handler.Enabled +``` + +#### 5) Functions calling `MultiHandler.Enabled` (Mermaid) + +```mermaid +graph TD + func_Logf --> func_MultiHandler.Enabled +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking MultiHandler.Enabled +package main + +import ( + "context" + "log/slog" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + // Assume mh is a *MultiHandler instance that has been configured elsewhere. + var mh *log.MultiHandler + + ctx := context.Background() + if mh.Enabled(ctx, slog.LevelWarn) { + // Proceed with logging at warning level + } +} +``` + +--- + +### MultiHandler.Handle + +**Handle** - Sends a single `slog.Record` to each handler stored in the receiver’s `handlers` slice. If any handler returns an error, the dispatch stops and that error is propagated. + +#### Signature (Go) + +```go +func (h *MultiHandler) Handle(ctx context.Context, r slog.Record)(error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Sends a single `slog.Record` to each handler stored in the receiver’s `handlers` slice. If any handler returns an error, the dispatch stops and that error is propagated. | +| **Parameters** | `ctx context.Context –` execution context for the log operation.
`r slog.Record –` the log entry to be processed (cloned per child). | +| **Return value** | `error –` the first non‑nil error returned by a child handler, or `nil` if all succeed. | +| **Key dependencies** | • Calls each child’s `Handle(ctx, r.Clone())`.
• Uses `slog.Record.Clone()` to provide an independent copy per handler. | +| **Side effects** | No state is mutated on the receiver; it merely forwards records. The function may trigger I/O or other side effects performed by child handlers. | +| **How it fits the package** | Implements `slog.Handler` for a composite that aggregates multiple underlying handlers, enabling multi‑destination logging (e.g., console + file). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"for each handler"} + B --> C{"handle record"} + C --> D{"error?"} + D -- yes --> E["Return error"] + D -- no --> F{"next handler"} + F --> B + B --> G["All handled"] + G --> H["Return nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_MultiHandler_Handle --> func_Handler_Handle + func_MultiHandler_Handle --> func_Record_Clone +``` + +#### Functions calling `MultiHandler.Handle` + +```mermaid +graph TD + func_Logf --> func_MultiHandler_Handle +``` + +#### Usage example (Go) + +```go +// Minimal example invoking MultiHandler.Handle +import ( + "context" + "log/slog" +) + +type dummyHandler struct{} + +func (d *dummyHandler) Handle(ctx context.Context, r slog.Record) error { + // Process the record (e.g., print to stdout) + return nil +} + +func main() { + mh := &MultiHandler{handlers: []slog.Handler{ + &dummyHandler{}, + &dummyHandler{}, + }} + + rec := slog.NewRecord(time.Now(), slog.LevelInfo, "hello world", 0) + _ = mh.Handle(context.Background(), rec) // dispatch to all handlers +} +``` + +--- + +--- + +### MultiHandler.WithAttrs + +**WithAttrs** - Produces a new `slog.Handler` that forwards each log record to all underlying handlers with the supplied attributes appended. + +#### Signature (Go) + +```go +func (h *MultiHandler) WithAttrs(attrs []slog.Attr) slog.Handler +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a new `slog.Handler` that forwards each log record to all underlying handlers with the supplied attributes appended. | +| **Parameters** | `attrs []slog.Attr` – Attributes to add to every handler’s context. | +| **Return value** | A `slog.Handler` (specifically a `*MultiHandler`) that wraps the original handlers after they have been augmented by `WithAttrs`. | +| **Key dependencies** | • `make`, `len` for slice allocation.
• Calls each underlying handler’s `WithAttrs(attrs)` method.
• Wraps results with `NewMultiHandler`. | +| **Side effects** | None; it is purely functional and does not modify the original `MultiHandler`. | +| **How it fits the package** | Provides attribute propagation for composite handlers, enabling consistent metadata across multiple output sinks. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Allocate slice of length len(h.handlers)"] --> B["Iterate over h.handlers"] + B --> C{"For each handler"} + C --> D["Call handler.WithAttrs(attrs)"] + D --> E["Store result in handlersWithAttrs{i}"] + E --> F["Return NewMultiHandler(handlersWithAttrs...)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_MultiHandler.WithAttrs --> func_make + func_MultiHandler.WithAttrs --> func_len + func_MultiHandler.WithAttrs --> func_Handler.WithAttrs + func_MultiHandler.WithAttrs --> func_NewMultiHandler +``` + +#### Functions calling `MultiHandler.WithAttrs` (Mermaid) + +```mermaid +graph TD + func_MultiHandler.WithAttrs --> func_MultiHandler.WithAttrs +``` + +(Note: the only caller listed is itself, indicating no external usage within the package.) + +#### Usage example (Go) + +```go +// Minimal example invoking MultiHandler.WithAttrs +import ( + "log/slog" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + // Assume we have two underlying handlers. + h1 := slog.NewJSONHandler(os.Stdout, nil) + h2 := slog.NewTextHandler(os.Stderr, nil) + + // Create a MultiHandler that writes to both sinks. + mh := log.NewMultiHandler(h1, h2) + + // Add attributes that should appear in all logs emitted through the multi handler. + attrs := []slog.Attr{ + slog.String("app", "certsuite"), + slog.Int("version", 42), + } + enriched := mh.WithAttrs(attrs) + + logger := slog.New(enriched) + logger.Info("Starting up") +} +``` + +--- + +### MultiHandler.WithGroup + +**WithGroup** - Creates a new handler that prefixes every log record with the specified group name for each underlying handler. + +#### Signature (Go) + +```go +func (h *MultiHandler) WithGroup(name string) slog.Handler +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a new handler that prefixes every log record with the specified group name for each underlying handler. | +| **Parameters** | `name` *(string)* – The group name to apply. | +| **Return value** | A `slog.Handler` instance (a new `MultiHandler`) that delegates to the original handlers, each wrapped with the group. | +| **Key dependencies** | • `make` – allocates a slice for the wrapped handlers.
• `len` – obtains the number of existing handlers.
• `NewMultiHandler` – constructs the new composite handler.
• Each underlying handler’s `WithGroup` method. | +| **Side effects** | None; pure function that returns a new value without mutating its receiver or global state. | +| **How it fits the package** | Provides group scoping for log messages when using multiple output destinations, maintaining consistency across all handlers in the composite. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive *MultiHandler"] --> B["Allocate slice of same length"] + B --> C["Iterate over h.handlers"] + C --> D["Call handler.WithGroup(name) for each"] + D --> E["Collect wrapped handlers"] + E --> F["Return NewMultiHandler(wrapped...)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_MultiHandler.WithGroup --> make + func_MultiHandler.WithGroup --> len + func_MultiHandler.WithGroup --> slog.Handler + func_MultiHandler.WithGroup --> NewMultiHandler + func_MultiHandler.WithGroup --> handler.WithGroup +``` + +#### Functions calling `MultiHandler.WithGroup` (Mermaid) + +```mermaid +graph TD + func_MultiHandler.WithGroup --> func_MultiHandler.WithGroup +``` + +#### Usage example (Go) + +```go +// Minimal example invoking MultiHandler.WithGroup +import ( + "log/slog" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + // Assume we have two underlying handlers + h1 := slog.NewTextHandler(os.Stdout, nil) + h2 := slog.NewJSONHandler(os.Stderr, nil) + + // Create a composite handler + mh := log.NewMultiHandler(h1, h2) + + // Add a group to all wrapped handlers + groupedHandler := mh.WithGroup("request") + + // Use the new handler + logger := slog.New(groupedHandler) + logger.Info("Processing request") +} +``` + +--- + +### NewCustomHandler + +**NewCustomHandler** - Constructs a `*CustomHandler` that routes log output to the supplied `io.Writer`, applying optional `slog.HandlerOptions`. If no options are provided, defaults are used. + +#### Signature (Go) + +```go +func NewCustomHandler(out io.Writer, opts *slog.HandlerOptions) *CustomHandler +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Constructs a `*CustomHandler` that routes log output to the supplied `io.Writer`, applying optional `slog.HandlerOptions`. If no options are provided, defaults are used. | +| **Parameters** | - `out io.Writer` – destination for formatted log entries.
- `opts *slog.HandlerOptions` – pointer to handler configuration; may be `nil`. | +| **Return value** | A pointer to the newly created `CustomHandler`. | +| **Key dependencies** | - `sync.Mutex` (for concurrent access protection).
- `slog.LevelInfo` (default log level). | +| **Side effects** | No global state changes; only allocates and returns a handler. | +| **How it fits the package** | Used by higher‑level functions (`SetupLogger`, `GetMultiLogger`) to create individual handlers that can be combined into multi‑handler loggers. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"opts provided?"} + B -- yes --> C["Copy opts to handler"] + B -- no --> D["Use default options"] + C & D --> E["Ensure Level set, default to Info"] + E --> F["Return handler instance"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_NewCustomHandler +``` + +#### Functions calling `NewCustomHandler` (Mermaid) + +```mermaid +graph TD + func_GetMultiLogger --> func_NewCustomHandler + func_SetupLogger --> func_NewCustomHandler +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewCustomHandler +package main + +import ( + "os" + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + writer := os.Stdout + opts := &log.slog.HandlerOptions{ + Level: slog.LevelDebug, + } + handler := log.NewCustomHandler(writer, opts) + log.Printf("Created handler: %v", handler) +} +``` + +--- + +--- + +### NewMultiHandler + +**NewMultiHandler** - Builds a `*MultiHandler` that forwards logging events to multiple underlying handlers. + +#### Signature (Go) + +```go +func NewMultiHandler(handlers ...slog.Handler) *MultiHandler +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `*MultiHandler` that forwards logging events to multiple underlying handlers. | +| **Parameters** | ` …slog.Handler – zero or more handlers that will receive log records. | +| **Return value** | A pointer to the newly created `MultiHandler`. | +| **Key dependencies** | *None – the function only creates a struct instance. | +| **Side effects** | No I/O, state mutation, or concurrency; simply allocates memory. | +| **How it fits the package** | Acts as a factory for the composite handler used by higher‑level loggers (`GetMultiLogger`) and by other `MultiHandler` methods (`WithAttrs`, `WithGroup`). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + NewMultiHandler --> CreateStruct["Allocate & initialize MultiHandler"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `NewMultiHandler` (Mermaid) + +```mermaid +graph TD + GetMultiLogger --> NewMultiHandler +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewMultiHandler +package main + +import ( + "log/slog" + "os" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + file, _ := os.Create("app.log") + defer file.Close() + + handler1 := slog.NewJSONHandler(file, &slog.HandlerOptions{Level: slog.LevelInfo}) + handler2 := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug}) + + multi := log.NewMultiHandler(handler1, handler2) + logger := slog.New(multi) + + logger.Info("Application started") +} +``` + +--- + +### SetLogger + +**SetLogger** - Stores the supplied `*Logger` in the package‑wide variable `globalLogger`, making it available to all logging helpers. + +#### Signature (Go) + +```go +func SetLogger(l *Logger) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores the supplied `*Logger` in the package‑wide variable `globalLogger`, making it available to all logging helpers. | +| **Parameters** | `l *Logger` – pointer to a Logger instance that will become the global logger. | +| **Return value** | None (void). | +| **Key dependencies** | - Modifies the package variable `globalLogger`. | +| **Side effects** | Sets shared state; subsequent calls to logging functions use this logger for output. No I/O or concurrency is performed directly by the function. | +| **How it fits the package** | Provides a public API to inject a custom logger (e.g., for tests or alternative back‑ends) into the internal log package. | + +#### Internal workflow + +```mermaid +flowchart TD + A["SetLogger(l)"] --> B["Assigns l to globalLogger"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `SetLogger` + +```mermaid +graph TD + runHandler --> SetLogger +``` + +#### Usage example (Go) + +```go +// Minimal example invoking SetLogger +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + logger := log.NewMultiLogger(nil) // create a logger instance + log.SetLogger(logger) // install it as the global logger +} +``` + +--- + +### SetupLogger + +**SetupLogger** - Parses the supplied textual log level, configures a custom `slog` handler that formats level labels, and assigns the resulting logger to the global variable. + +Initialises the package‑wide logger with a specified output writer and log level. + +```go +func SetupLogger(logWriter io.Writer, level string) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses the supplied textual log level, configures a custom `slog` handler that formats level labels, and assigns the resulting logger to the global variable. | +| **Parameters** | `logWriter io.Writer – destination for log output`
`level string – desired verbosity (e.g., "debug", "info")` | +| **Return value** | none | +| **Key dependencies** | *parseLevel* – converts level string to `slog.Level`.
*fmt.Fprintf* – writes an error message on failure.
*slog.New* – creates a standard logger instance.
*NewCustomHandler* – builds the handler that applies custom formatting. | +| **Side effects** | Mutates global variables: `globalLogLevel`, `globalLogger`. Writes to `os.Stderr` if level parsing fails. | +| **How it fits the package** | Provides a single, reusable entry point for setting up logging across the application; other components invoke this during initialization (e.g., `CreateGlobalLogFile`). | + +```mermaid +flowchart TD + A["SetupLogger"] --> B["parseLevel(level)"] + A --> C{"parse success?"} + C -- yes --> D["set globalLogLevel"] + C -- no --> E["fmt.Fprintf(os.Stderr, ...)"] + A --> F["slog.New(NewCustomHandler(logWriter, &opts))"] + F --> G["globalLogger = &Logger{l: }"] +``` + +```mermaid +graph TD + func_SetupLogger --> func_parseLevel + func_SetupLogger --> fmt.Fprintf + func_SetupLogger --> slog.New + func_SetupLogger --> func_NewCustomHandler +``` + +```mermaid +graph TD + func_CreateGlobalLogFile --> func_SetupLogger +``` + +#### Usage example (Go) + +```go +// Minimal example invoking SetupLogger +package main + +import ( + "os" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + // Use standard output and set level to "info". + log.SetupLogger(os.Stdout, "info") + + // The global logger can now be used throughout the application. +} +``` + +--- + +### Warn + +**Warn** - Sends a formatted warning message to the global logger. + +#### Signature + +```go +func Warn(msg string, args ...any) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Sends a formatted warning message to the global logger. | +| **Parameters** | `msg string` – format string; `args …any` – optional arguments for formatting. | +| **Return value** | None | +| **Key dependencies** | Calls `Logf(globalLogger, LevelWarn, msg, args...)`. | +| **Side effects** | Writes a log record at warning level to the global logger (which may persist to file or stdout). | +| **How it fits the package** | Part of the internal logging API; provides a convenient shortcut for emitting warnings without explicitly referencing the logger. | + +#### Internal workflow + +```mermaid +flowchart TD + Warn --> Logf +``` + +#### Function dependencies + +```mermaid +graph TD + func_Warn --> func_Logf +``` + +#### Functions calling `Warn` + +```mermaid +graph TD + func_GetScaleCrUnderTest --> func_Warn + func_findDeploymentsByLabels --> func_Warn + func_findStatefulSetsByLabels --> func_Warn + func_getOpenshiftVersion --> func_Warn + func_isIstioServiceMeshInstalled --> func_Warn +``` + +#### Usage example + +```go +// Minimal example invoking Warn +func main() { + // Assume globalLogger has been initialized elsewhere. + log.Warn("Resource %q is deprecated", "my-resource") +} +``` + +--- + +## Local Functions + +### CustomHandler.appendAttr + +**appendAttr** - Formats a single `slog.Attr` into the log line and appends it to an existing byte buffer. Handles different attribute kinds (string, time, level, generic). + +#### 1) Signature (Go) + +```go +func (h *CustomHandler) appendAttr(buf []byte, a slog.Attr) []byte +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Formats a single `slog.Attr` into the log line and appends it to an existing byte buffer. Handles different attribute kinds (string, time, level, generic). | +| **Parameters** | - `buf []byte` – current output buffer.
- `a slog.Attr` – attribute to format and append. | +| **Return value** | Updated buffer containing the formatted attribute. | +| **Key dependencies** | • `slog.Attr.Resolve()`
• `slog.Attr.Equal(slog.Attr{})`
• `slog.Attr.Value.Kind()`
• `fmt.Appendf`
• `time.StampMilli` (via `a.Value.Time().Format`) | +| **Side effects** | No external I/O or state mutation; only modifies the returned buffer. | +| **How it fits the package** | Internal helper used by `CustomHandler.Handle` to build log lines from individual attributes (level, time, source, custom attrs, message). | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Resolve Attr"} + B --> C{"Is Empty?"} + C -- Yes --> D["Return buf"] + C -- No --> E["Switch on Kind"] + E -->|"KindString"| F["Format string or message"] + E -->|"KindTime"| G["Format time with StampMilli"] + E -->|"Other"| H["Generic formatting (level or key/value)"] + F & G & H --> I["Append to buf"] + I --> J["Return buf"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_CustomHandler.appendAttr --> Resolve + func_CustomHandler.appendAttr --> Equal + func_CustomHandler.appendAttr --> Kind + func_CustomHandler.appendAttr --> fmt.Appendf + func_CustomHandler.appendAttr --> String + func_CustomHandler.appendAttr --> Format + func_CustomHandler.appendAttr --> Time +``` + +#### 5) Functions calling `CustomHandler.appendAttr` (Mermaid) + +```mermaid +graph TD + CustomHandler.Handle --> func_CustomHandler.appendAttr +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking CustomHandler.appendAttr +package main + +import ( + "log/slog" + "os" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" +) + +func main() { + h := log.NewCustomHandler(os.Stdout, nil) + buf := []byte{} + attr := slog.String("user", "alice") + buf = h.appendAttr(buf, attr) + fmt.Println(string(buf)) // prints: [user: alice] +} +``` + +--- + +--- + +### parseLevel + +**parseLevel** - Translates a textual log level (e.g., `"debug"`, `"info"`) into the corresponding `slog.Level` value. If the input is not recognized, returns an error. + +#### Signature (Go) + +```go +func parseLevel(level string) (slog.Level, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Translates a textual log level (e.g., `"debug"`, `"info"`) into the corresponding `slog.Level` value. If the input is not recognized, returns an error. | +| **Parameters** | `level string –` the textual representation of the desired log level | +| **Return value** | ` –` the matching slog level; `` – non‑nil if the input cannot be parsed | +| **Key dependencies** | *`strings.ToLower` (to make comparison case‑insensitive)
* `fmt.Errorf` (for error construction) | +| **Side effects** | None – purely functional. | +| **How it fits the package** | Used by higher‑level logging helpers (`Logf`, `SetupLogger`) to interpret user‑supplied level strings into concrete slog levels before configuring or emitting log records. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive input string"] --> B{"Lowercase"} + B --> C{"Match value"} + C -->|"debug"| D["slog.LevelDebug"] + C -->|"info"| E["slog.LevelInfo"] + C -->|"warn or warning"| F["slog.LevelWarn"] + C -->|"error"| G["slog.LevelError"] + C -->|"fatal"| H["CustomLevelFatal"] + C -->|"unknown"| I["Return error via fmt.Errorf"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_parseLevel --> func_ToLower + func_parseLevel --> func_Errorf +``` + +#### Functions calling `parseLevel` (Mermaid) + +```mermaid +graph TD + func_Logf --> func_parseLevel + func_SetupLogger --> func_parseLevel +``` + +#### Usage example (Go) + +```go +// Minimal example invoking parseLevel +levelStr := "Warn" +lvl, err := parseLevel(levelStr) +if err != nil { + log.Fatalf("invalid level %q: %v", levelStr, err) +} +fmt.Printf("Parsed slog.Level: %v\n", lvl) // Output: Parsed slog.Level: 4 (slog.LevelWarn) +``` + +--- diff --git a/docs/internal/results/results.md b/docs/internal/results/results.md new file mode 100644 index 000000000..b72eb1a65 --- /dev/null +++ b/docs/internal/results/results.md @@ -0,0 +1,854 @@ +# Package results + +**Path**: `internal/results` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [CertIDResponse](#certidresponse) + - [UploadResult](#uploadresult) +- [Exported Functions](#exported-functions) + - [CompressResultsArtifacts](#compressresultsartifacts) + - [CreateResultsWebFiles](#createresultswebfiles) + - [GetCertIDFromConnectAPI](#getcertidfromconnectapi) + - [SendResultsToConnectAPI](#sendresultstoconnectapi) +- [Local Functions](#local-functions) + - [createClaimJSFile](#createclaimjsfile) + - [createFormField](#createformfield) + - [generateZipFileName](#generatezipfilename) + - [getFileTarHeader](#getfiletarheader) + - [sendRequest](#sendrequest) + - [setProxy](#setproxy) + +## Overview + +The `results` package orchestrates the creation, packaging and upload of test result artifacts to Red Hat Connect. It generates web‑viewable files, compresses them into a zip archive, obtains a certification ID via the API, and finally posts the archive as an attachment. + +### Key Features + +- Creates HTML/JS assets from claim data for local viewing +- Compresses result files into a timestamped gzip‑tar ZIP +- Uploads the archive to Red Hat Connect and logs download URLs + +### Design Notes + +- File names embed UTC timestamps to avoid collisions +- HTTP requests are wrapped with detailed logging and proxy support +- Error handling returns descriptive messages but does not retry automatically + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**CertIDResponse**](#certidresponse) | Struct definition | +| [**UploadResult**](#uploadresult) | Struct definition | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func CompressResultsArtifacts(outputDir string, filePaths []string) (string, error)](#compressresultsartifacts) | Creates a ZIP file in `outputDir` containing each file listed in `filePaths`. The archive is generated as a gzip‑compressed tarball. | +| [func CreateResultsWebFiles(outputDir, claimFileName string) (filePaths []string, err error)](#createresultswebfiles) | Creates all web‑related artifacts required to view and parse a claim file: `claimjson.js`, `results.html`, and the classification script. Returns paths of created files. | +| [func GetCertIDFromConnectAPI(apiKey, projectID, connectAPIBaseURL, proxyURL, proxyPort string) (string, error)](#getcertidfromconnectapi) | Sends a POST request to the Red Hat Connect API to obtain the certification ID for a given project. | +| [func SendResultsToConnectAPI(zipFile, apiKey, connectBaseURL, certID, proxyURL, proxyPort string) error](#sendresultstoconnectapi) | Uploads a ZIP file containing test artifacts to the Red Hat Connect API as an attachment and logs the resulting download URL. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func createClaimJSFile(claimFilePath, outputDir string) (filePath string, err error)](#createclaimjsfile) | Reads a `claim.json` file and writes its contents into a JavaScript file (`claimjson.js`) that assigns the JSON to a global variable. | +| [func createFormField(w *multipart.Writer, field, value string) error](#createformfield) | Adds a simple text field to an existing `multipart.Writer`. The field name is given by `field` and the content by `value`. | +| [func generateZipFileName() string](#generatezipfilename) | Produces a unique file name for the results archive, embedding the current UTC time in a specific layout. | +| [func getFileTarHeader(file string) (*tar.Header, error)](#getfiletarheader) | Generates an `*tar.Header` describing the specified file so it can be archived. | +| [func sendRequest(req *http.Request, client *http.Client) (*http.Response, error)](#sendrequest) | Executes an HTTP request using the supplied client, logs debug information, and ensures a successful 200 OK response. | +| [func setProxy(client *http.Client, proxyURL, proxyPort string)](#setproxy) | If both `proxyURL` and `proxyPort` are non‑empty, builds a full proxy address, parses it, logs debug information, and assigns an HTTP transport that routes requests through the specified proxy. | + +## Structs + +### CertIDResponse + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Status` | `string` | Field documentation | +| `CertificationLevel` | `string` | Field documentation | +| `RhcertURL` | `string` | Field documentation | +| `HasStartedByPartner` | `bool` | Field documentation | +| `CertificationType` | `struct{ID int; Name string}` | Field documentation | +| `ID` | `int` | Field documentation | +| `CaseNumber` | `string` | Field documentation | + +--- + +### UploadResult + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Size` | `int` | Field documentation | +| `UploadedDate` | `time.Time` | Field documentation | +| `UUID` | `string` | Field documentation | +| `ContentType` | `string` | Field documentation | +| `Desc` | `string` | Field documentation | +| `DownloadURL` | `string` | Field documentation | +| `UploadedBy` | `string` | Field documentation | +| `CertID` | `int` | Field documentation | +| `Type` | `string` | Field documentation | +| `Name` | `string` | Field documentation | + +--- + +## Exported Functions + +### CompressResultsArtifacts + +**CompressResultsArtifacts** - Creates a ZIP file in `outputDir` containing each file listed in `filePaths`. The archive is generated as a gzip‑compressed tarball. + +#### Signature (Go) + +```go +func CompressResultsArtifacts(outputDir string, filePaths []string) (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a ZIP file in `outputDir` containing each file listed in `filePaths`. The archive is generated as a gzip‑compressed tarball. | +| **Parameters** | *`outputDir string`* – directory where the ZIP will be created.
*`filePaths []string`* – paths of files to include. | +| **Return value** | *`string`* – absolute path to the created archive.
*`error`* – error if any step fails. | +| **Key dependencies** | `generateZipFileName`, `filepath.Join`, `log.Info`, `os.Create`, `gzip.NewWriter`, `tar.NewWriter`, `getFileTarHeader`, `io.Copy`, `filepath.Abs`. | +| **Side effects** | Creates/opens files on disk, writes to a ZIP archive, logs progress. | +| **How it fits the package** | Used by `Run` to bundle claim and artifact files before optional upload to Red Hat Connect or local storage. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Generate file name"] --> B["Create ZIP file"] + B --> C["Initialize gzip writer"] + C --> D["Initialize tar writer"] + subgraph Loop over files + E["Get tar header"] --> F["Write header to tar"] + F --> G["Open source file"] + G --> H["Copy file contents into tar"] + H --> I["Close source file"] + end + D --> Loop + I --> J["Resolve absolute path"] + J --> K["Return archive path"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CompressResultsArtifacts --> func_generateZipFileName + func_CompressResultsArtifacts --> func_Join + func_CompressResultsArtifacts --> func_Info + func_CompressResultsArtifacts --> func_Create + func_CompressResultsArtifacts --> func_Errorf + func_CompressResultsArtifacts --> func_NewWriter + func_CompressResultsArtifacts --> func_Close + func_CompressResultsArtifacts --> func_NewWriter + func_CompressResultsArtifacts --> func_Debug + func_CompressResultsArtifacts --> func_getFileTarHeader + func_CompressResultsArtifacts --> func_WriteHeader + func_CompressResultsArtifacts --> func_Open + func_CompressResultsArtifacts --> func_Copy + func_CompressResultsArtifacts --> func_Abs +``` + +#### Functions calling `CompressResultsArtifacts` (Mermaid) + +```mermaid +graph TD + func_Run --> func_CompressResultsArtifacts +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CompressResultsArtifacts +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/results" +) + +func main() { + outputDir := "./output" + files := []string{"claim.json", "report.html"} + + zipPath, err := results.CompressResultsArtifacts(outputDir, files) + if err != nil { + fmt.Printf("Compression failed: %v\n", err) + return + } + fmt.Printf("Artifacts archived at %s\n", zipPath) +} +``` + +--- + +### CreateResultsWebFiles + +**CreateResultsWebFiles** - Creates all web‑related artifacts required to view and parse a claim file: `claimjson.js`, `results.html`, and the classification script. Returns paths of created files. + +#### Signature (Go) + +```go +func CreateResultsWebFiles(outputDir, claimFileName string) (filePaths []string, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates all web‑related artifacts required to view and parse a claim file: `claimjson.js`, `results.html`, and the classification script. Returns paths of created files. | +| **Parameters** | `outputDir string` – directory where output will be written.
`claimFileName string` – name of the claim JSON file in that directory. | +| **Return value** | `filePaths []string` – slice containing full paths to each generated file.
`err error` – non‑nil if any step fails (e.g., file write, JS creation). | +| **Key dependencies** | • `path/filepath.Join` – builds output paths.
• `createClaimJSFile` – writes the claim JSON into a JavaScript variable.
• `os.WriteFile` – writes static HTML content.
• `fmt.Errorf` – wraps errors. | +| **Side effects** | Writes three files to disk (`results.html`, `claimjson.js`) and any additional static assets; modifies no in‑memory state beyond the returned slice. | +| **How it fits the package** | Provides the web artifact generation step invoked by `certsuite.Run`. These artifacts enable a browser‑based results viewer that consumes the claim JSON via JavaScript. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Build claim file path"] + B --> C["Create claim JS file with createClaimJSFile"] + C -- error --> D["Return nil, err"] + C --> E["Initialize filePaths slice with claimJSFilePath"] + E --> F{"Iterate staticFiles"} + F --> G["Write each static file to disk"] + G -- error --> H["Return nil, err"] + G --> I["Append file path to filePaths"] + I --> J["End – return filePaths, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CreateResultsWebFiles --> filepath.Join + func_CreateResultsWebFiles --> createClaimJSFile + func_CreateResultsWebFiles --> os.WriteFile + func_CreateResultsWebFiles --> fmt.Errorf +``` + +#### Functions calling `CreateResultsWebFiles` (Mermaid) + +```mermaid +graph TD + certsuite.Run --> func_CreateResultsWebFiles +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CreateResultsWebFiles +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/internal/results" +) + +func main() { + outputDir := "./output" + claimFileName := "claim.json" + + paths, err := results.CreateResultsWebFiles(outputDir, claimFileName) + if err != nil { + log.Fatalf("failed to create web files: %v", err) + } + + log.Printf("Created web artifacts: %v", paths) +} +``` + +--- + +--- + +### GetCertIDFromConnectAPI + +**GetCertIDFromConnectAPI** - Sends a POST request to the Red Hat Connect API to obtain the certification ID for a given project. + +#### 1) Signature (Go) + +```go +func GetCertIDFromConnectAPI(apiKey, projectID, connectAPIBaseURL, proxyURL, proxyPort string) (string, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Sends a POST request to the Red Hat Connect API to obtain the certification ID for a given project. | +| **Parameters** | `apiKey` – authentication key
`projectID` – identifier of the project
`connectAPIBaseURL` – base URL of the API (e.g., `https://access.redhat.com/hydra/cwe/rest/v1.0`)
`proxyURL`, `proxyPort` – optional HTTP proxy configuration | +| **Return value** | The certification ID as a string on success; an error otherwise | +| **Key dependencies** | `log.Info/Debug`, `strings.ReplaceAll`, `fmt.Sprintf`, `http.NewRequest`, `bytes.NewBuffer`, `setProxy`, `sendRequest`, `json.NewDecoder` | +| **Side effects** | Logs request details, sets HTTP headers, performs network I/O, may configure an HTTP proxy | +| **How it fits the package** | Part of the `results` sub‑package; used by the top‑level runner to submit artifacts to Red Hat Connect. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Sanitize inputs"] --> B["Build JSON payload"] + B --> C["Create request URL"] + C --> D["Instantiate HTTP client"] + D --> E["Configure proxy if needed"] + E --> F["Send POST request"] + F --> G["Decode JSON response"] + G --> H["Return certification ID or error"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetCertIDFromConnectAPI --> func_setProxy + func_GetCertIDFromConnectAPI --> func_sendRequest +``` + +#### 5) Functions calling `GetCertIDFromConnectAPI` (Mermaid) + +```mermaid +graph TD + func_Run --> func_GetCertIDFromConnectAPI +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking GetCertIDFromConnectAPI +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/results" +) + +func main() { + apiKey := "YOUR_API_KEY" + projectID := "12345" + baseURL := "https://access.redhat.com/hydra/cwe/rest/v1.0" + proxyURL, proxyPort := "", "" + + certID, err := results.GetCertIDFromConnectAPI(apiKey, projectID, baseURL, proxyURL, proxyPort) + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + fmt.Printf("Certification ID: %s\n", certID) +} +``` + +--- + +### SendResultsToConnectAPI + +**SendResultsToConnectAPI** - Uploads a ZIP file containing test artifacts to the Red Hat Connect API as an attachment and logs the resulting download URL. + +#### Signature (Go) + +```go +func SendResultsToConnectAPI(zipFile, apiKey, connectBaseURL, certID, proxyURL, proxyPort string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Uploads a ZIP file containing test artifacts to the Red Hat Connect API as an attachment and logs the resulting download URL. | +| **Parameters** | `zipFile` – path to the ZIP archive
`apiKey` – authentication key for Connect
`connectBaseURL` – base URL of the Connect API
`certID` – identifier of the certification being uploaded
`proxyURL`, `proxyPort` – optional HTTP proxy settings | +| **Return value** | `error` – non‑nil if any step (file handling, request creation, network call, or JSON decoding) fails | +| **Key dependencies** | • `strings.ReplaceAll` for sanitising inputs
• `os.Open`, `io.Copy` to read the ZIP file
• `mime/multipart.NewWriter` and helper `createFormField` to build a multipart/form‑data payload
• `net/http` for request creation and sending (via `sendRequest`)
• `setProxy` to apply proxy configuration | +| **Side effects** | • Writes logs via the global logger (`log.Info`, `log.Debug`)
• Reads the ZIP file from disk; closes it after use
• Sends an HTTP POST request and processes the response | +| **How it fits the package** | Part of the `results` sub‑package, this function is invoked during the final stage of a CertSuite run to deliver artifacts to Red Hat Connect for certification validation. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Sanitise inputs"] --> B["Open ZIP file"] + B --> C["Create multipart writer"] + C --> D["Add attachment file"] + D --> E["Add form fields: type, certId, description"] + E --> F["Close writer & build URL"] + F --> G["Prepare HTTP POST request"] + G --> H["Apply proxy if set"] + H --> I["Send request via sendRequest"] + I --> J["Decode JSON response into UploadResult"] + J --> K["Log download and upload date"] + K --> L["Return nil / error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_SendResultsToConnectAPI --> func_log.Info + func_SendResultsToConnectAPI --> strings.ReplaceAll + func_SendResultsToConnectAPI --> os.Open + func_SendResultsToConnectAPI --> io.Copy + func_SendResultsToConnectAPI --> multipart.NewWriter + func_SendResultsToConnectAPI --> createFormField + func_SendResultsToConnectAPI --> http.NewRequest + func_SendResultsToConnectAPI --> setProxy + func_SendResultsToConnectAPI --> sendRequest + func_SendResultsToConnectAPI --> json.NewDecoder +``` + +#### Functions calling `SendResultsToConnectAPI` + +```mermaid +graph TD + func_Run --> func_SendResultsToConnectAPI +``` + +#### Usage example (Go) + +```go +// Minimal example invoking SendResultsToConnectAPI +import "github.com/redhat-best-practices-for-k8s/certsuite/internal/results" + +func main() { + zipPath := "/tmp/results.zip" + apiKey := "my-api-key" + baseURL := "https://access.redhat.com/hydra/cwe/rest/v1.0" + certID := "12345" + proxyURL, proxyPort := "", "" + + if err := results.SendResultsToConnectAPI(zipPath, apiKey, baseURL, certID, proxyURL, proxyPort); err != nil { + log.Fatalf("Upload failed: %v", err) + } +} +``` + +--- + +## Local Functions + +### createClaimJSFile + +**createClaimJSFile** - Reads a `claim.json` file and writes its contents into a JavaScript file (`claimjson.js`) that assigns the JSON to a global variable. + +#### Signature (Go) + +```go +func createClaimJSFile(claimFilePath, outputDir string) (filePath string, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads a `claim.json` file and writes its contents into a JavaScript file (`claimjson.js`) that assigns the JSON to a global variable. | +| **Parameters** | `claimFilePath string – path to the source claim.json file`
`outputDir string – directory where claimjson.js will be written` | +| **Return value** | `filePath string – full path of the created JS file`
`err error – non‑nil if reading or writing fails` | +| **Key dependencies** | • `os.ReadFile` (from *os*)
• `fmt.Errorf` (from *fmt*)
• `string()` conversion
• `filepath.Join` (from *path/filepath*)
• `os.WriteFile` (from *os*) | +| **Side effects** | Writes a new file to disk; may return errors if I/O fails. | +| **How it fits the package** | Used by `CreateResultsWebFiles` to generate the JavaScript representation of claim data for the HTML UI. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Read claim.json"} + B --> C{"Success?"} + C -- No --> D["Return error"] + C -- Yes --> E["Convert to JS string"] + E --> F["Determine output path"] + F --> G["Write claimjson.js"] + G --> H{"Write success?"} + H -- No --> I["Return error"] + H -- Yes --> J["Return filePath"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_createClaimJSFile --> os_ReadFile + func_createClaimJSFile --> fmt_Errorf + func_createClaimJSFile --> string + func_createClaimJSFile --> filepath_Join + func_createClaimJSFile --> os_WriteFile +``` + +#### Functions calling `createClaimJSFile` + +```mermaid +graph TD + func_CreateResultsWebFiles --> func_createClaimJSFile +``` + +#### Usage example (Go) + +```go +// Minimal example invoking createClaimJSFile +outputPath, err := createClaimJSFile("/path/to/claim.json", "/tmp/output") +if err != nil { + log.Fatalf("Error creating JS file: %v", err) +} +fmt.Println("Generated JS file at:", outputPath) +``` + +--- + +### createFormField + +**createFormField** - Adds a simple text field to an existing `multipart.Writer`. The field name is given by `field` and the content by `value`. + +#### Signature (Go) + +```go +func createFormField(w *multipart.Writer, field, value string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Adds a simple text field to an existing `multipart.Writer`. The field name is given by `field` and the content by `value`. | +| **Parameters** | `w *multipart.Writer` – target writer.
`field string` – form field name.
`value string` – field value to write. | +| **Return value** | `error` – non‑nil if the field cannot be created or written. | +| **Key dependencies** | • `w.CreateFormField(field)`
• `fw.Write([]byte(value))`
• `fmt.Errorf` for error formatting | +| **Side effects** | Writes to the multipart writer; does not close it. No global state is modified. | +| **How it fits the package** | Used by higher‑level routines (e.g., `SendResultsToConnectAPI`) to add metadata fields to an HTTP request body before uploading test results. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Create field"} + B -->|"Success"| C["Write value"] + B -->|"Error"| D["Return error"] + C --> E["Return nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_createFormField --> CreateFormField + func_createFormField --> fmt_Errorf + func_createFormField --> Write + func_createFormField --> fmt_Errorf +``` + +#### Functions calling `createFormField` (Mermaid) + +```mermaid +graph TD + SendResultsToConnectAPI --> createFormField +``` + +#### Usage example (Go) + +```go +// Minimal example invoking createFormField +package main + +import ( + "mime/multipart" + "os" +) + +func main() { + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + if err := createFormField(writer, "type", "RhocpBestPracticeTestResult"); err != nil { + panic(err) + } + + // Close writer to finalize the body and send it with an HTTP request. + writer.Close() +} +``` + +--- + +### generateZipFileName + +**generateZipFileName** - Produces a unique file name for the results archive, embedding the current UTC time in a specific layout. + +#### Signature (Go) + +```go +func generateZipFileName() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a unique file name for the results archive, embedding the current UTC time in a specific layout. | +| **Parameters** | None | +| **Return value** | `string` – The generated ZIP file name (e.g., `"2025-09-03T15_30_00Z-results.zip"`). | +| **Key dependencies** | • `time.Now()` from the standard library
• `Format()` method of `time.Time` | +| **Side effects** | None. Pure function; only reads current time. | +| **How it fits the package** | Used by `CompressResultsArtifacts` to name the archive that contains test results and artifacts. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Get Current Time"} + B --> C["time.Now()"] + C --> D["Format with tarGzFileNamePrefixLayout"] + D --> E["Concatenate -+tarGzFileNameSuffix"] + E --> F["Return file name"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_generateZipFileName --> func_Format + func_generateZipFileName --> func_time.Now +``` + +#### Functions calling `generateZipFileName` (Mermaid) + +```mermaid +graph TD + func_CompressResultsArtifacts --> func_generateZipFileName +``` + +#### Usage example (Go) + +```go +// Minimal example invoking generateZipFileName +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/results" +) + +func main() { + zipName := results.generateZipFileName() + fmt.Println("Generated zip file name:", zipName) +} +``` + +--- + +--- + +### getFileTarHeader + +**getFileTarHeader** - Generates an `*tar.Header` describing the specified file so it can be archived. + +#### Signature (Go) + +```go +func getFileTarHeader(file string) (*tar.Header, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Generates an `*tar.Header` describing the specified file so it can be archived. | +| **Parameters** | `file` *string* – path to the target file. | +| **Return value** | `(*tar.Header, error)` – the header on success; a descriptive error otherwise. | +| **Key dependencies** | • `os.Stat` – obtain `FileInfo` for the path.
• `archive/tar.FileInfoHeader` – convert `FileInfo` to a tar header.
• `fmt.Errorf` – wrap errors with context. | +| **Side effects** | None; purely functional, no state mutation or I/O beyond stat calls. | +| **How it fits the package** | Helper used by `CompressResultsArtifacts` to prepare each file’s metadata before writing to a tar archive. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive file path"] --> B["os.Stat(file)"] + B --> C{"Error?"} + C -- Yes --> D["Return error via fmt.Errorf"] + C -- No --> E["tar.FileInfoHeader(info, info.Name())"] + E --> F{"Error?"} + F -- Yes --> G["Return error via fmt.Errorf"] + F -- No --> H["Return header"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getFileTarHeader --> func_Stat + func_getFileTarHeader --> func_FileInfoHeader + func_getFileTarHeader --> func_Errorf +``` + +#### Functions calling `getFileTarHeader` (Mermaid) + +```mermaid +graph TD + func_CompressResultsArtifacts --> func_getFileTarHeader +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getFileTarHeader +header, err := getFileTarHeader("/path/to/file.txt") +if err != nil { + log.Fatalf("cannot create tar header: %v", err) +} +fmt.Printf("Created tar header for file: %+v\n", header) +``` + +--- + +### sendRequest + +**sendRequest** - Executes an HTTP request using the supplied client, logs debug information, and ensures a successful 200 OK response. + +#### Signature (Go) + +```go +func sendRequest(req *http.Request, client *http.Client) (*http.Response, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes an HTTP request using the supplied client, logs debug information, and ensures a successful 200 OK response. | +| **Parameters** | `req *http.Request` – the prepared request;
`client *http.Client` – the HTTP client to use (may contain timeout or proxy settings). | +| **Return value** | `*http.Response` – the raw HTTP response on success;
`error` – wrapped error if the request fails or returns a non‑OK status. | +| **Key dependencies** | • `log.Debug` – logs request URL and responses.
• `client.Do` – performs the actual network call.
• `fmt.Errorf` – constructs descriptive errors. | +| **Side effects** | Performs I/O over the network; writes debug log entries; does not modify global state. | +| **How it fits the package** | Central helper for all API interactions in the `results` package, used by functions that obtain certification IDs and upload results to Red Hat Connect. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Log request URL"} + B --> C["client.Do(req)"] + C --> D{"Error?"} + D -- Yes --> E["Return nil, error"] + D -- No --> F{"Status OK?"} + F -- Yes --> G["Return response"] + F -- No --> H["Log status"] + H --> I["Return nil, formatted error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_sendRequest --> Logger.Debug + func_sendRequest --> client.Do + func_sendRequest --> fmt.Errorf +``` + +#### Functions calling `sendRequest` (Mermaid) + +```mermaid +graph TD + GetCertIDFromConnectAPI --> sendRequest + SendResultsToConnectAPI --> sendRequest +``` + +#### Usage example (Go) + +```go +// Minimal example invoking sendRequest +req, _ := http.NewRequest("GET", "https://example.com/api", nil) +client := &http.Client{Timeout: 30 * time.Second} + +resp, err := sendRequest(req, client) +if err != nil { + log.Fatalf("request failed: %v", err) +} +defer resp.Body.Close() + +// Process response... +``` + +--- + +### setProxy + +**setProxy** - If both `proxyURL` and `proxyPort` are non‑empty, builds a full proxy address, parses it, logs debug information, and assigns an HTTP transport that routes requests through the specified proxy. + +#### Signature (Go) + +```go +func setProxy(client *http.Client, proxyURL, proxyPort string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | If both `proxyURL` and `proxyPort` are non‑empty, builds a full proxy address, parses it, logs debug information, and assigns an HTTP transport that routes requests through the specified proxy. | +| **Parameters** | `client *http.Client` – client to configure.
`proxyURL string` – hostname or IP of the proxy.
`proxyPort string` – port number for the proxy. | +| **Return value** | None (side‑effect only). | +| **Key dependencies** | • `github.com/redhat-best-practices-for-k8s/certsuite/internal/log.Logger.Debug`
• `fmt.Sprintf`
• `net/url.Parse`
• `log.Error`
• `net/http.Transport` and `http.ProxyURL` | +| **Side effects** | Mutates the supplied `client.Transport` to a new `*http.Transport` that uses the parsed proxy URL. Logs debug and error messages. | +| **How it fits the package** | Used by higher‑level API calls (`GetCertIDFromConnectAPI`, `SendResultsToConnectAPI`) to optionally route traffic through an HTTP/HTTPS proxy when interacting with Red Hat Connect endpoints. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check if proxyURL & proxyPort not empty"] -->|"Yes"| B["Log debug: “Proxy is set”"] + B --> C["Build string proxyURL:proxyPort"] + C --> D["Parse URL"] + D -->|"Success"| E["Set client.Transport with ProxyURL(parsed)"] + D -->|"Failure"| F["Log error: “Failed to parse proxy URL”"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_setProxy --> func_log.Debug + func_setProxy --> fmt.Sprintf + func_setProxy --> url.Parse + func_setProxy --> func_log.Error + func_setProxy --> http.Transport + func_setProxy --> http.ProxyURL +``` + +#### Functions calling `setProxy` (Mermaid) + +```mermaid +graph TD + func_GetCertIDFromConnectAPI --> func_setProxy + func_SendResultsToConnectAPI --> func_setProxy +``` + +#### Usage example (Go) + +```go +// Minimal example invoking setProxy +client := &http.Client{Timeout: 30 * time.Second} +proxyHost := "proxy.example.com" +proxyPort := "3128" + +setProxy(client, proxyHost, proxyPort) + +// Now `client` will send requests via the specified proxy. +``` + +--- diff --git a/docs/pkg/arrayhelper/arrayhelper.md b/docs/pkg/arrayhelper/arrayhelper.md new file mode 100644 index 000000000..60fd91890 --- /dev/null +++ b/docs/pkg/arrayhelper/arrayhelper.md @@ -0,0 +1,189 @@ +# Package arrayhelper + +**Path**: `pkg/arrayhelper` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [ArgListToMap](#arglisttomap) + - [FilterArray](#filterarray) + - [Unique](#unique) + +## Overview + +Provides lightweight utilities for manipulating string slices used throughout the certsuite codebase, such as converting argument lists into maps, filtering elements, and deduplicating entries. + +### Key Features + +- Converts a slice of "key=value" strings into a map for quick lookup. + +### Design Notes + +- Assumes input items are formatted as key=value when converting to a map. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func ArgListToMap([]string)(map[string]string)](#arglisttomap) | ArgListToMap takes a list of strings of the form "key=value" and translate it into a map of the form {key: value} | +| [func FilterArray(vs []string, f func(string) bool) []string](#filterarray) | Returns a new slice containing only the elements of `vs` for which `f` evaluates to true. | +| [func Unique(slice []string) []string](#unique) | Produces a new slice containing each unique element of the input, preserving no particular order. | + +## Exported Functions + +### ArgListToMap + +**ArgListToMap** - ArgListToMap takes a list of strings of the form "key=value" and translate it into a map +of the form {key: value} + + +**Signature**: `func([]string)(map[string]string)` + +**Purpose**: ArgListToMap takes a list of strings of the form "key=value" and translate it into a map +of the form {key: value} + +--- + +### FilterArray + +**FilterArray** - Returns a new slice containing only the elements of `vs` for which `f` evaluates to true. + +#### 1) Signature (Go) + +```go +func FilterArray(vs []string, f func(string) bool) []string +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a new slice containing only the elements of `vs` for which `f` evaluates to true. | +| **Parameters** | `vs []string` – input slice; `f func(string) bool` – predicate function applied to each element. | +| **Return value** | `[]string` – filtered slice preserving order of matching elements. | +| **Key dependencies** | • `make` (slice allocation)
• `append` (adding elements)
• user‑supplied `f` (predicate) | +| **Side effects** | None; operates purely on inputs and returns a new slice. | +| **How it fits the package** | Utility function in *arrayhelper* used by higher‑level logic to extract relevant items from string collections. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over vs"} + B -->|"for each v"| C["Apply f(v)"] + C -->|"true"| D["Append v to result"] + C -->|"false"| E["Skip"] + E --> B + D --> B + B --> F["Return filtered slice"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_FilterArray --> make + func_FilterArray --> f + func_FilterArray --> append +``` + +#### 5) Functions calling `FilterArray` (Mermaid) + +```mermaid +graph TD + func_getGrubKernelArgs --> func_FilterArray +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking FilterArray +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper" +) + +func main() { + items := []string{"apple", "banana", "cherry", "date"} + // Keep only strings starting with 'b' or 'c' + filtered := arrayhelper.FilterArray(items, func(s string) bool { + return len(s) > 0 && (s[0] == 'b' || s[0] == 'c') + }) + fmt.Println(filtered) // Output: [banana cherry] +} +``` + +--- + +### Unique + +**Unique** - Produces a new slice containing each unique element of the input, preserving no particular order. + +Removes duplicate entries from a string slice and returns the distinct values. + +#### Signature (Go) + +```go +func Unique(slice []string) []string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a new slice containing each unique element of the input, preserving no particular order. | +| **Parameters** | `slice []string` – the source sequence to deduplicate. | +| **Return value** | `[]string` – a slice with all duplicate strings removed. | +| **Key dependencies** | • `make` (creates map and result slice)
• `len` (determines capacity for result)
• `append` (builds the final slice) | +| **Side effects** | None; operates solely on its arguments and returns a new value. | +| **How it fits the package** | Utility helper that other components use to collapse duplicate identifiers before further processing. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Create map"} + B --> C["Iterate over input"] + C --> D["Insert into uniqMap"] + D --> E{"Build result slice"} + E --> F["Append keys from uniqMap"] + F --> G["Return uniqSlice"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Unique --> make + func_Unique --> len + func_Unique --> append +``` + +#### Functions calling `Unique` + +```mermaid +graph TD + func_GetSuitesFromIdentifiers --> func_Unique +``` + +> **Note:** `GetSuitesFromIdentifiers` appears in two packages: +> *github.com/redhat-best-practices-for-k8s/certsuite/cmd/certsuite/generate/catalog* and +> *github.com/redhat-best-practices-for-k8s/certsuite/webserver*. + +#### Usage example (Go) + +```go +// Minimal example invoking Unique +import "github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper" + +func main() { + input := []string{"a", "b", "a", "c"} + unique := arrayhelper.Unique(input) + fmt.Println(unique) // Output: [a b c] (order not guaranteed) +} +``` + +--- diff --git a/docs/pkg/autodiscover/autodiscover.md b/docs/pkg/autodiscover/autodiscover.md new file mode 100644 index 000000000..07b3f1fbe --- /dev/null +++ b/docs/pkg/autodiscover/autodiscover.md @@ -0,0 +1,4013 @@ +# Package autodiscover + +**Path**: `pkg/autodiscover` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [DiscoveredTestData](#discoveredtestdata) + - [PodStates](#podstates) + - [ScaleObject](#scaleobject) +- [Exported Functions](#exported-functions) + - [CountPodsByStatus](#countpodsbystatus) + - [CreateLabels](#createlabels) + - [DoAutoDiscover](#doautodiscover) + - [FindCrObjectByNameByNamespace](#findcrobjectbynamebynamespace) + - [FindDeploymentByNameByNamespace](#finddeploymentbynamebynamespace) + - [FindPodsByLabels](#findpodsbylabels) + - [FindStatefulsetByNameByNamespace](#findstatefulsetbynamebynamespace) + - [FindTestCrdNames](#findtestcrdnames) + - [GetScaleCrUnderTest](#getscalecrundertest) +- [Local Functions](#local-functions) + - [findAbnormalEvents](#findabnormalevents) + - [findClusterOperators](#findclusteroperators) + - [findDeploymentsByLabels](#finddeploymentsbylabels) + - [findHpaControllers](#findhpacontrollers) + - [findOperatorsByLabels](#findoperatorsbylabels) + - [findOperatorsMatchingAtLeastOneLabel](#findoperatorsmatchingatleastonelabel) + - [findPodsMatchingAtLeastOneLabel](#findpodsmatchingatleastonelabel) + - [findStatefulSetsByLabels](#findstatefulsetsbylabels) + - [findSubscriptions](#findsubscriptions) + - [getAllCatalogSources](#getallcatalogsources) + - [getAllInstallPlans](#getallinstallplans) + - [getAllNamespaces](#getallnamespaces) + - [getAllOperators](#getalloperators) + - [getAllPackageManifests](#getallpackagemanifests) + - [getAllStorageClasses](#getallstorageclasses) + - [getClusterCrdNames](#getclustercrdnames) + - [getClusterRoleBindings](#getclusterrolebindings) + - [getCrScaleObjects](#getcrscaleobjects) + - [getHelmList](#gethelmlist) + - [getNetworkAttachmentDefinitions](#getnetworkattachmentdefinitions) + - [getNetworkPolicies](#getnetworkpolicies) + - [getOpenshiftVersion](#getopenshiftversion) + - [getOperandPodsFromTestCsvs](#getoperandpodsfromtestcsvs) + - [getOperatorCsvPods](#getoperatorcsvpods) + - [getPersistentVolumeClaims](#getpersistentvolumeclaims) + - [getPersistentVolumes](#getpersistentvolumes) + - [getPodDisruptionBudgets](#getpoddisruptionbudgets) + - [getPodsOwnedByCsv](#getpodsownedbycsv) + - [getResourceQuotas](#getresourcequotas) + - [getRoleBindings](#getrolebindings) + - [getRoles](#getroles) + - [getServiceAccounts](#getserviceaccounts) + - [getServices](#getservices) + - [getSriovNetworkNodePolicies](#getsriovnetworknodepolicies) + - [getSriovNetworks](#getsriovnetworks) + - [isDeploymentsPodsMatchingAtLeastOneLabel](#isdeploymentspodsmatchingatleastonelabel) + - [isIstioServiceMeshInstalled](#isistioservicemeshinstalled) + - [isStatefulSetsMatchingAtLeastOneLabel](#isstatefulsetsmatchingatleastonelabel) + - [namespacesListToStringList](#namespaceslisttostringlist) + +## Overview + +The autodiscover package queries a running Kubernetes or OpenShift cluster to collect namespaces, pods, operators, CRDs, and other resources needed for test execution. It filters objects by user‑supplied labels or predefined criteria and returns the data in a structured form that the rest of CertSuite consumes. + +### Key Features + +- Collects all relevant cluster objects—including deployments, statefulsets, services, CRDs, and operator lifecycle components—using the Kubernetes API. +- Filters resources by label selectors and custom patterns to isolate test‑specific items such as operand pods or Istio service mesh presence. +- Aggregates metrics on pod readiness and abnormal events, enabling downstream tests to make informed decisions about execution environments. + +### Design Notes + +- Uses a single client holder to avoid creating multiple REST clients; this centralizes configuration and caching. +- Falls back to non‑OpenShift defaults when the ClusterOperator API is unavailable, ensuring compatibility with vanilla Kubernetes clusters. +- Logs errors for invalid label strings but continues processing, prioritizing resilience over strict validation. + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**DiscoveredTestData**](#discoveredtestdata) | Struct definition | +| [**PodStates**](#podstates) | Tracks pod execution metrics | +| [**ScaleObject**](#scaleobject) | Represents a Kubernetes scale subresource for a custom resource | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func CountPodsByStatus(allPods []corev1.Pod) map[string]int](#countpodsbystatus) | Returns a map with the number of *ready* (`PodRunning` phase) and *non‑ready* pods from a list. | +| [func CreateLabels(labelStrings []string) (labelObjects []labelObject)](#createlabels) | Parses each input string using a predefined regular expression (`labelRegex`) and converts matched key‑value pairs into `labelObject` instances. Invalid strings are logged as errors and skipped. | +| [func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData](#doautodiscover) | Queries a running cluster to gather namespaces, pods, operators, CRDs, and other Kubernetes objects that match user‑supplied labels or are otherwise relevant for test execution. The gathered data is returned as a `DiscoveredTestData` struct which the rest of the framework consumes to build the test environment. | +| [func FindCrObjectByNameByNamespace(scale.ScalesGetter, string, string, schema.GroupResource) (*scalingv1.Scale, error)](#findcrobjectbynamebynamespace) | Looks up a Kubernetes `Scale` subresource for the specified CR (Custom Resource) within a given namespace. | +| [func FindDeploymentByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.Deployment, error)](#finddeploymentbynamebynamespace) | Fetches a Kubernetes Deployment resource identified by its namespace and name using the AppsV1 client. | +| [func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, namespaces []string) (runningPods, allPods []corev1.Pod)](#findpodsbylabels) | Enumerates pods in the provided namespaces that match any of the supplied label selectors. It returns two slices: one containing only running (or allowed non‑running) pods and another with all retrieved pods, excluding those marked for deletion. | +| [func FindStatefulsetByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.StatefulSet, error)](#findstatefulsetbynamebynamespace) | Fetches a Kubernetes StatefulSet resource identified by its `namespace` and `name`. Returns the object or an error if retrieval fails. | +| [func FindTestCrdNames(clusterCrds []*apiextv1.CustomResourceDefinition, crdFilters []configuration.CrdFilter) (targetCrds []*apiextv1.CustomResourceDefinition)](#findtestcrdnames) | Filters a list of cluster CRDs to only those whose names end with any suffix specified in `crdFilters`. | +| [func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDefinition) []ScaleObject](#getscalecrundertest) | For each namespace‑scoped CRD that supports the `scale` subresource, gather all custom resources (CRs) in the provided namespaces and return their scale objects. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func findAbnormalEvents(oc corev1client.CoreV1Interface, namespaces []string) (abnormalEvents []corev1.Event)](#findabnormalevents) | Gathers all Kubernetes events whose `type` is not `"Normal"` from the supplied namespaces. | +| [func findClusterOperators(client clientconfigv1.ClusterOperatorInterface) ([]configv1.ClusterOperator, error)](#findclusteroperators) | Queries the Kubernetes API for all `ClusterOperator` resources, returning them as a slice of `configv1.ClusterOperator`. If the CR is missing, it logs a debug message and returns `nil, nil`. | +| [func findDeploymentsByLabels(appClient appv1client.AppsV1Interface, labels []labelObject, namespaces []string) []appsv1.Deployment](#finddeploymentsbylabels) | Enumerates all deployments in the specified namespaces that contain at least one of the supplied label key/value pairs. If no labels are provided, every deployment in those namespaces is returned. | +| [func(findHpaControllers)(kubernetes.Interface, []string) []*scalingv1.HorizontalPodAutoscaler](#findhpacontrollers) | Collects every `HorizontalPodAutoscaler` object from the supplied namespaces and returns a slice of pointers to them. | +| [func findOperatorsByLabels(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespaces []configuration.Namespace) (csvs []*olmv1Alpha.ClusterServiceVersion)](#findoperatorsbylabels) | Scans each namespace in `namespaces` for ClusterServiceVersions (CSVs). If `labels` are provided, only CSVs that have at least one of those labels are considered; otherwise all CSVs are fetched. It then filters the results to include only those whose controller pods run inside any namespace listed in `namespaces`. | +| [func(v1alpha1.OperatorsV1alpha1Interface, []labelObject, configuration.Namespace)(*olmv1Alpha.ClusterServiceVersionList)](#findoperatorsmatchingatleastonelabel) | Retrieves all ClusterServiceVersions (CSVs) within the given `namespace` that carry at least one of the supplied label key/value pairs. The function aggregates results across labels and returns a combined list. | +| [func findPodsMatchingAtLeastOneLabel(oc corev1client.CoreV1Interface, labels []labelObject, namespace string) *corev1.PodList](#findpodsmatchingatleastonelabel) | Builds a `PodList` containing all pods in the specified `namespace` that match at least one label from the provided slice. | +| [func findStatefulSetsByLabels( appClient appv1client.AppsV1Interface, labels []labelObject, namespaces []string, ) []appsv1.StatefulSet](#findstatefulsetsbylabels) | Enumerates all StatefulSets in the supplied namespaces, filtering by label matches when provided. Returns a slice of matching `StatefulSet` objects. | +| [func findSubscriptions(olmClient v1alpha1.OperatorsV1alpha1Interface, namespaces []string) []olmv1Alpha.Subscription](#findsubscriptions) | Collects all `Subscription` objects from the provided list of Kubernetes namespaces using an OLM client. | +| [func getAllCatalogSources(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*olmv1Alpha.CatalogSource)](#getallcatalogsources) | Collects every `CatalogSource` resource present in the Kubernetes cluster and returns them as a slice of pointers. | +| [func getAllInstallPlans(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*olmv1Alpha.InstallPlan)](#getallinstallplans) | Collects every `InstallPlan` resource present in the cluster and returns them as a slice of pointers. | +| [func getAllNamespaces(oc corev1client.CoreV1Interface) ([]string, error)](#getallnamespaces) | Queries the cluster for every namespace and returns a slice of their names. | +| [func getAllOperators(olmClient v1alpha1.OperatorsV1alpha1Interface) ([]*olmv1Alpha.ClusterServiceVersion, error)](#getalloperators) | Fetches every ClusterServiceVersion (CSV) across all namespaces using the supplied OLM client and returns a slice of pointers to those CSV objects. | +| [func getAllPackageManifests(olmPkgClient olmpkgclient.PackageManifestInterface) (out []*olmpkgv1.PackageManifest)](#getallpackagemanifests) | Gathers every `PackageManifest` resource present in the cluster and returns them as a slice of pointers. | +| [func getAllStorageClasses(client storagev1typed.StorageV1Interface) ([]storagev1.StorageClass, error)](#getallstorageclasses) | Fetches every `StorageClass` defined in the cluster via the Kubernetes API and returns them as a slice. | +| [func getClusterCrdNames() ([]*apiextv1.CustomResourceDefinition, error)](#getclustercrdnames) | Queries the Kubernetes API for every `CustomResourceDefinition` (CRD) in the cluster and returns them as a slice of pointers. | +| [func getClusterRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.ClusterRoleBinding, error)](#getclusterrolebindings) | Fetches every `ClusterRoleBinding` object present in the Kubernetes cluster. These bindings are non‑namespaced and apply cluster‑wide. | +| [func getCrScaleObjects(crs []unstructured.Unstructured, crd *apiextv1.CustomResourceDefinition) []ScaleObject](#getcrscaleobjects) | For each custom resource in `crs`, fetch its corresponding Scale subresource via the Kubernetes Scaling API and return a slice of `ScaleObject` structs that bundle the retrieved scale data with its GroupResource schema. | +| [func getHelmList(restConfig *rest.Config, namespaces []string) map[string][]*release.Release](#gethelmlist) | Queries the Kubernetes cluster via Helm client to list all deployed releases per namespace and returns a mapping from namespace name to its release objects. | +| [func getNetworkAttachmentDefinitions(client *clientsholder.ClientsHolder, namespaces []string) ([]nadClient.NetworkAttachmentDefinition, error)](#getnetworkattachmentdefinitions) | Enumerates all `NetworkAttachmentDefinition` resources in the specified Kubernetes namespaces and aggregates them into a single slice. | +| [func getNetworkPolicies(oc networkingv1client.NetworkingV1Interface) ([]networkingv1.NetworkPolicy, error)](#getnetworkpolicies) | Queries the Kubernetes API for all `NetworkPolicy` objects across every namespace and returns them as a slice. | +| [func getOpenshiftVersion(oClient clientconfigv1.ConfigV1Interface) (ver string, err error)](#getopenshiftversion) | Fetches the OpenShift API server version by querying the `ClusterOperator` CRD for `openshift-apiserver`. If not found, returns a sentinel value indicating a non‑OpenShift cluster. | +| [func getOperandPodsFromTestCsvs([]*olmv1Alpha.ClusterServiceVersion, []corev1.Pod)([]*corev1.Pod, error)](#getoperandpodsfromtestcsvs) | Filters a pod list to those whose top‑level owner CR is managed by any of the supplied test CSVs. | +| [func getOperatorCsvPods(csvList []*olmv1Alpha.ClusterServiceVersion) (map[types.NamespacedName][]*corev1.Pod, error)](#getoperatorcsvpods) | For each ClusterServiceVersion (CSV), fetch the namespace where its operator runs and gather all pods owned by that CSV. Returns a map keyed by `types.NamespacedName` of the CSV to the list of managed pods. | +| [func getPersistentVolumeClaims(oc corev1client.CoreV1Interface) ([]corev1.PersistentVolumeClaim, error)](#getpersistentvolumeclaims) | Queries the Kubernetes API for every PersistentVolumeClaim (PVC) in all namespaces and returns them as a slice. | +| [func getPersistentVolumes(oc corev1client.CoreV1Interface) ([]corev1.PersistentVolume, error)](#getpersistentvolumes) | Queries the Kubernetes API for all PersistentVolume resources and returns them as a slice. | +| [func getPodDisruptionBudgets(oc policyv1client.PolicyV1Interface, namespaces []string) ([]policyv1.PodDisruptionBudget, error)](#getpoddisruptionbudgets) | Gathers all `PodDisruptionBudget` resources across the supplied namespaces and returns them as a single slice. | +| [func getPodsOwnedByCsv(csvName, operatorNamespace string, client *clientsholder.ClientsHolder) ([]*corev1.Pod, error)](#getpodsownedbycsv) | Returns all Pods in `operatorNamespace` whose top‑level owner is the CSV named `csvName`. These are typically operator/controller Pods. | +| [func getResourceQuotas(oc corev1client.CoreV1Interface) ([]corev1.ResourceQuota, error)](#getresourcequotas) | Queries Kubernetes for every `ResourceQuota` object across all namespaces and returns them as a slice. | +| [func getRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.RoleBinding, error)](#getrolebindings) | Collects every `RoleBinding` resource present in the Kubernetes cluster. | +| [func getRoles(client rbacv1typed.RbacV1Interface) ([]rbacv1.Role, error)](#getroles) | Enumerates every `Role` resource across all namespaces in a Kubernetes cluster. | +| [func getServiceAccounts(oc corev1client.CoreV1Interface, namespaces []string) ([]*corev1.ServiceAccount, error)](#getserviceaccounts) | Retrieves all `ServiceAccount` objects from the provided list of Kubernetes namespaces and returns them as a slice. | +| [func getServices(oc corev1client.CoreV1Interface, namespaces, ignoreList []string) (allServices []*corev1.Service, err error)](#getservices) | Gathers all `Service` resources from the provided `namespaces`, excluding any whose names appear in `ignoreList`. Returns a slice of pointers to the services and an error if any namespace query fails. | +| [func getSriovNetworkNodePolicies(client *clientsholder.ClientsHolder, namespaces []string) ([]unstructured.Unstructured, error)](#getsriovnetworknodepolicies) | Enumerates all `SriovNetworkNodePolicy` resources across the supplied Kubernetes namespaces using a dynamic client. Returns the combined list or an error if any non‑NotFound issue occurs. | +| [func getSriovNetworks(client *clientsholder.ClientsHolder, namespaces []string) ([]unstructured.Unstructured, error)](#getsriovnetworks) | Enumerates all `SriovNetwork` custom resources across the provided list of Kubernetes namespaces. Returns a slice of unstructured objects representing each network or an error if any request fails. | +| [func([]labelObject, string, *appsv1.Deployment)(bool)](#isdeploymentspodsmatchingatleastonelabel) | Checks whether the pod template of a given Deployment has a label that matches at least one label supplied in `labels`. If a match is found, the function returns `true`; otherwise it returns `false`. | +| [func(isIstioServiceMeshInstalled)(appv1client.AppsV1Interface, []string) bool](#isistioservicemeshinstalled) | Determines whether the Istio service mesh is installed in a cluster by verifying the presence of the `istio-system` namespace and the `istiod` deployment. | +| [func isStatefulSetsMatchingAtLeastOneLabel(labels []labelObject, namespace string, statefulSet *appsv1.StatefulSet) bool](#isstatefulsetsmatchingatleastonelabel) | Checks whether the pod template of a given StatefulSet contains at least one label that matches any key/value pair supplied in `labels`. Returns `true` on first match. | +| [func namespacesListToStringList(namespaceList []configuration.Namespace) (stringList []string)](#namespaceslisttostringlist) | Extracts the `Name` field from each `configuration.Namespace` in a slice and returns a new slice of those names. | + +## Structs + +### DiscoveredTestData + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `AbnormalEvents` | `[]corev1.Event` | Events flagged as abnormal during collection. | +| `ValidProtocolNames` | `[]string` | Protocol names considered valid for tests. | +| `ConnectAPIKey` | `string` | API key for Connect integration. | +| `AllCsvs` | `[]*olmv1Alpha.ClusterServiceVersion` | All operator CSVs available. | +| `Deployments` | `[]appsv1.Deployment` | Deployments matching test labels. | +| `AllSubscriptions` | `[]olmv1Alpha.Subscription` | All OLM subscriptions. | +| `Pods` | `[]corev1.Pod` | Pods selected as “under test” based on user labels. | +| `AllInstallPlans` | `[]*olmv1Alpha.InstallPlan` | Install plans for all operators. | +| `AllCatalogSources` | `[]*olmv1Alpha.CatalogSource` | Catalog sources in the cluster. | +| `Roles` | `[]rbacv1.Role` | Namespace‑level roles (all namespaces). | +| `PartnerName` | `string` | Name of the partner providing the test suite. | +| `NetworkPolicies` | `[]networkingv1.NetworkPolicy` | Network policy objects across namespaces. | +| `Crds` | `[]*apiextv1.CustomResourceDefinition` | CRDs filtered to those relevant for the test. | +| `PodDisruptionBudgets` | `[]policyv1.PodDisruptionBudget` | PDB objects present in the cluster. | +| `OCPStatus` | `string` | Determined lifecycle status of the OCP version. | +| `ScaleCrUnderTest` | `[]ScaleObject` | Custom resources that should be scaled during tests. | +| `CollectorAppEndpoint` | `string` | Endpoint URL for the collector app. | +| `ConnectAPIProxyPort` | `string` | Proxy port for Connect requests. | +| `AllCrds` | `[]*apiextv1.CustomResourceDefinition` | All CRDs in the cluster. | +| `HelmChartReleases` | `map[string][]*release.Release` | Helm releases per namespace. | +| `K8sVersion` | `string` | Kubernetes server version string. | +| `IstioServiceMeshFound` | `bool` | Flag indicating whether Istio is installed. | +| `SriovNetworkNodePolicies` | `[]unstructured.Unstructured` | SriovNetworkNodePolicy CRs in target namespaces. | +| `PersistentVolumeClaims` | `[]corev1.PersistentVolumeClaim` | PVCs in target namespaces. | +| `AllPods` | `[]corev1.Pod` | All pods in the cluster (or namespaces) at discovery time. | +| `AllServices` | `[]*corev1.Service` | All services cluster‑wide. | +| `ServiceAccounts` | `[]*corev1.ServiceAccount` | Service accounts in target namespaces. | +| `Services` | `[]*corev1.Service` | Services matching test labels. | +| `Nodes` | `*corev1.NodeList` | List of all cluster nodes. | +| `Namespaces` | `[]string` | Target namespaces specified in configuration. | +| `ClusterRoleBindings` | `[]rbacv1.ClusterRoleBinding` | Cluster‑wide RBAC bindings. | +| `OpenshiftVersion` | `string` | OpenShift release identifier. | +| `ConnectProjectID` | `string` | Project ID used in Connect. | +| `PodStates` | `PodStates` | Snapshot of pod counts before execution, used for post‑test comparison. | +| `OperandPods` | `[]*corev1.Pod` | Operand (runtime) pods discovered via best‑effort matching. | +| `RoleBindings` | `[]rbacv1.RoleBinding` | Namespace‑level rolebindings (all namespaces). | +| `Subscriptions` | `[]olmv1Alpha.Subscription` | Subscriptions matching test labels. | +| `StorageClasses` | `[]storagev1.StorageClass` | Storage classes in the cluster. | +| `AllNamespaces` | `[]string` | All namespaces available in the cluster. | +| `ClusterOperators` | `[]configv1.ClusterOperator` | Cluster‑level operators (OpenShift). | +| `Hpas` | `[]*scalingv1.HorizontalPodAutoscaler` | HPAs in target namespaces. | +| `ExecutedBy` | `string` | Identifier of the test executor (user or CI). | +| `AllPackageManifests` | `[]*olmPkgv1.PackageManifest` | Package manifests from OLM. | +| `SriovNetworks` | `[]unstructured.Unstructured` | SriovNetwork CRs in target namespaces. | +| `AllSriovNetworkNodePolicies` | `[]unstructured.Unstructured` | SriovNetworkNodePolicy CRs cluster‑wide. | +| `ConnectAPIBaseURL` | `string` | Base URL for Connect API calls. | +| `CollectorAppPassword` | `string` | Password for the collector application. | +| `StatefulSet` | `[]appsv1.StatefulSet` | StatefulSets matching test labels. | +| `CSVToPodListMap` | `map[types.NamespacedName][]*corev1.Pod` | Mapping of operator CSV names to their running pods. | +| `ResourceQuotaItems` | `[]corev1.ResourceQuota` | Resource quotas defined in the target namespaces. | +| `AllSriovNetworks` | `[]unstructured.Unstructured` | SriovNetwork CRs cluster‑wide. | +| `ServicesIgnoreList` | `[]string` | Service names to exclude from discovery. | +| `ConnectAPIProxyURL` | `string` | Proxy host for Connect requests. | +| `Env` | `configuration.TestParameters` | Test configuration parameters supplied by the caller. | +| `Csvs` | `[]*olmv1Alpha.ClusterServiceVersion` | Operator CSVs matching user‑supplied labels. | +| `NetworkAttachmentDefinitions` | `[]nadClient.NetworkAttachmentDefinition` | CNI network attachment definitions in target namespaces. | +| `PersistentVolumes` | `[]corev1.PersistentVolume` | All PVs in the cluster. | +| `AllServiceAccounts` | `[]*corev1.ServiceAccount` | All service accounts cluster‑wide. | +| `ProbePods` | `[]corev1.Pod` | Pods belonging to the probe helper DaemonSet. | + +--- + +### PodStates + +#### Fields + +| Field | Type | Description | +|----------------|-----------------|-------------| +| `BeforeExecution` | `map[string]int` | Stores counts of pods before a specific operation; the key is typically an identifier (e.g., pod name or namespace), and the value represents how many times that pod was observed. | +| `AfterExecution` | `map[string]int` | Records counts of pods after the same operation, using the same key/value scheme as `BeforeExecution`. | + +#### Purpose + +`PodStates` is a lightweight container used to capture snapshots of pod counts at two distinct moments—prior to and following an execution step. By comparing the two maps, callers can determine how many pods were added, removed, or remained unchanged during that step. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| *none* | | + +--- + +--- + +### ScaleObject + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `Scale` | `*scalingv1.Scale` | Pointer to the Kubernetes *scale* subresource, which holds replica count and other scaling information. | +| `GroupResourceSchema` | `schema.GroupResource` | The group‑resource pair (e.g., `"apps"`, `"deployments"`), identifying the type of resource that owns this scale object. | + +#### Purpose + +`ScaleObject` bundles together the *scale* subresource and its owning resource’s group/resource identification. +It is used by autodiscovery logic to gather scaling information for custom resources that expose a `/scale` endpoint, enabling tools to query or manipulate replica counts programmatically. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetScaleCrUnderTest` | Enumerates all scalable custom resources in given namespaces and returns their corresponding `ScaleObject`s. | +| `getCrScaleObjects` | For each custom resource instance, retrieves its scale subresource via the Scaling API client and constructs a `ScaleObject`. | + +--- + +--- + +## Exported Functions + +### CountPodsByStatus + +**CountPodsByStatus** - Returns a map with the number of *ready* (`PodRunning` phase) and *non‑ready* pods from a list. + +#### Signature (Go) + +```go +func CountPodsByStatus(allPods []corev1.Pod) map[string]int +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a map with the number of *ready* (`PodRunning` phase) and *non‑ready* pods from a list. | +| **Parameters** | `allPods []corev1.Pod` – slice of Kubernetes pod objects to evaluate. | +| **Return value** | `map[string]int` with keys `"ready"` and `"non-ready"`. | +| **Key dependencies** | • `k8s.io/api/core/v1` for `PodRunning` constant.
• Iterates over the input slice; no external calls. | +| **Side effects** | None – purely functional, no mutation of inputs or I/O. | +| **How it fits the package** | Used during autodiscovery to capture pod health before and after test execution (see `DoAutoDiscover`). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Initialize map with ready=0, non-ready=0"] + B{"For each pod in allPods"} + C{"pod.Status.Phase == PodRunning?"} + D["Increment ready"] + E["Increment non-ready"] + F["Return map"] + A --> B + B --> C + C -- Yes --> D --> B + C -- No --> E --> B + B -- Done --> F +``` + +#### Function dependencies + +None – this function is not called by any other functions within the package. + +```mermaid +graph TD + Note["None – this function is currently not referenced elsewhere in the package."] +``` + +#### Functions calling `CountPodsByStatus` + +- `github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover.DoAutoDiscover` + (uses it to populate `data.PodStates.BeforeExecution` and later compare with `AfterExecution`). + +```mermaid +graph TD + func_DoAutoDiscover --> func_CountPodsByStatus +``` + +#### Usage example + +```go +// Minimal example invoking CountPodsByStatus +import ( + "fmt" + corev1 "k8s.io/api/core/v1" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" +) + +func main() { + pods := []corev1.Pod{ + {Status: corev1.PodStatus{Phase: corev1.PodRunning}}, + {Status: corev1.PodStatus{Phase: corev1.PodPending}}, + } + counts := autodiscover.CountPodsByStatus(pods) + fmt.Printf("Ready: %d, Non-ready: %d\n", counts["ready"], counts["non-ready"]) +} +``` + +--- + +### CreateLabels + +**CreateLabels** - Parses each input string using a predefined regular expression (`labelRegex`) and converts matched key‑value pairs into `labelObject` instances. Invalid strings are logged as errors and skipped. + +#### Signature (Go) + +```go +func CreateLabels(labelStrings []string) (labelObjects []labelObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses each input string using a predefined regular expression (`labelRegex`) and converts matched key‑value pairs into `labelObject` instances. Invalid strings are logged as errors and skipped. | +| **Parameters** | *`labelStrings []string`* – slice of raw label expressions (e.g., `"app=web"`). | +| **Return value** | *`labelObjects []labelObject`* – slice containing successfully parsed labels; order corresponds to the input slice after filtering out invalid entries. | +| **Key dependencies** | • `regexp.MustCompile(labelRegex)`
• `FindStringSubmatch` on compiled regex
• `len` (to validate match count)
• `log.Error` for error reporting
• Built‑in `append` to accumulate results | +| **Side effects** | Emits log messages via the package logger; otherwise pure function with no external I/O or state mutation. | +| **How it fits the package** | Used by autodiscovery routines (`DoAutoDiscover`, `Run`) to translate configuration label strings into objects consumed by pod‑ and operator‑lookup helpers. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over input slice"} + B --> C["Compile regex from labelRegex"] + C --> D["Find submatches in current string"] + D --> E{"Match count == labelRegexMatches?"} + E -- No --> F["Log error via log.Error; continue loop"] + E -- Yes --> G["Create labelObject with key=values1, value=values2"] + G --> H["Append to result slice"] + H --> B + B --> I["Return accumulated slice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CreateLabels --> func_MustCompile + func_CreateLabels --> func_FindStringSubmatch + func_CreateLabels --> func_len + func_CreateLabels --> func_Error + func_CreateLabels --> func_append +``` + +#### Functions calling `CreateLabels` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_CreateLabels + func_Run --> func_CreateLabels +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CreateLabels +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" +) + +func main() { + raw := []string{"app=web", "tier=frontend", "invalidLabel"} + labels := autodiscover.CreateLabels(raw) + fmt.Printf("Parsed labels: %+v\n", labels) +} +``` + +--- + +--- + +### DoAutoDiscover + +**DoAutoDiscover** - Queries a running cluster to gather namespaces, pods, operators, CRDs, and other Kubernetes objects that match user‑supplied labels or are otherwise relevant for test execution. The gathered data is returned as a `DiscoveredTestData` struct which the rest of the framework consumes to build the test environment. + +Collects Kubernetes and OpenShift objects needed for test execution by performing an auto‑discovery of resources defined in the supplied configuration. + +```go +func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Queries a running cluster to gather namespaces, pods, operators, CRDs, and other Kubernetes objects that match user‑supplied labels or are otherwise relevant for test execution. The gathered data is returned as a `DiscoveredTestData` struct which the rest of the framework consumes to build the test environment. | +| **Parameters** | `config *configuration.TestConfiguration` – configuration containing target namespaces, label filters, and other discovery options. | +| **Return value** | `DiscoveredTestData` – a populated struct holding lists of objects such as storage classes, pods, operators, CRDs, network policies, etc. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – provides typed clients for the cluster.
• Helper functions: `getAllStorageClasses`, `getAllNamespaces`, `findSubscriptions`, `getAllOperators`, `getAllInstallPlans`, `getAllCatalogSources`, `FindPodsByLabels`, `CountPodsByStatus`, `findAbnormalEvents`, `getResourceQuotas`, `getPodDisruptionBudgets`, `getNetworkPolicies`, `getClusterCrdNames`, `FindTestCrdNames`, `GetScaleCrUnderTest`, `findOperatorsByLabels`, `getHelmList`, `findClusterOperators`, `getOperatorCsvPods`, `getOperandPodsFromTestCsvs`, `getOpenshiftVersion`, `isIstioServiceMeshInstalled`, role/cluster‑role binding helpers, and various resource retrieval functions for PVCs, services, etc. | +| **Side effects** | • Uses the global logger (`log`) to report errors or fatal conditions; a failure in any lookup aborts execution with `log.Fatal`.
• Does not modify cluster state—only reads resources.
• May populate internal global data structures via helper functions but no external mutation is performed. | +| **How it fits the package** | It is the core of the `autodiscover` package, orchestrating all discovery logic and providing a single entry point for building the test environment used by other packages (e.g., provider). | + +```mermaid +flowchart TD + A["GetClientsHolder"] --> B["getAllStorageClasses"] + A --> C["getAllNamespaces"] + A --> D["findSubscriptions"] + A --> E["getAllOperators"] + A --> F["getAllInstallPlans"] + A --> G["getAllCatalogSources"] + H["CreateLabels(config.PodsUnderTestLabels)"] --> I["FindPodsByLabels"] + I --> J["CountPodsByStatus"] + A --> K["findAbnormalEvents"] + A --> L["getResourceQuotas"] + A --> M["getPodDisruptionBudgets"] + A --> N["getNetworkPolicies"] + A --> O["getClusterCrdNames"] + O --> P["FindTestCrdNames"] + P --> Q["GetScaleCrUnderTest"] + A --> R["findOperatorsByLabels"] + A --> S["getHelmList"] + A --> T["findClusterOperators"] + T --> U["getOperatorCsvPods"] + V["FindPodsByLabels"] --> W["getOperandPodsFromTestCsvs"] + A --> X["getOpenshiftVersion"] + A --> Y["isIstioServiceMeshInstalled"] + A --> Z["getClusterRoleBindings"] + A --> AA["getRoleBindings"] + A --> AB["getRoles"] + A --> AC["findHpaControllers"] + A --> AD["getPersistentVolumes"] + A --> AE["getPersistentVolumeClaims"] + A --> AF["getServices"] + A --> AG["getAllServices"] + A --> AH["getServiceAccounts"] + A --> AI["getSriovNetworks"] + A --> AJ["getSriovNetworkNodePolicies"] + A --> AK["getNetworkAttachmentDefinitions"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_DoAutoDiscover --> func_GetClientsHolder + func_DoAutoDiscover --> func_getAllStorageClasses + func_DoAutoDiscover --> func_getAllNamespaces + func_DoAutoDiscover --> func_findSubscriptions + func_DoAutoDiscover --> func_getAllOperators + func_DoAutoDiscover --> func_getAllInstallPlans + func_DoAutoDiscover --> func_getAllCatalogSources + func_DoAutoDiscover --> func_FindPodsByLabels + func_DoAutoDiscover --> func_CountPodsByStatus + func_DoAutoDiscover --> func_findAbnormalEvents + func_DoAutoDiscover --> func_getResourceQuotas + func_DoAutoDiscover --> func_getPodDisruptionBudgets + func_DoAutoDiscover --> func_getNetworkPolicies + func_DoAutoDiscover --> func_getClusterCrdNames + func_DoAutoDiscover --> func_FindTestCrdNames + func_DoAutoDiscover --> func_GetScaleCrUnderTest + func_DoAutoDiscover --> func_findOperatorsByLabels + func_DoAutoDiscover --> func_getHelmList + func_DoAutoDiscover --> func_findClusterOperators + func_DoAutoDiscover --> func_getOperatorCsvPods + func_DoAutoDiscover --> func_getOperandPodsFromTestCsvs + func_DoAutoDiscover --> func_getOpenshiftVersion + func_DoAutoDiscover --> func_isIstioServiceMeshInstalled + func_DoAutoDiscover --> func_getClusterRoleBindings + func_DoAutoDiscover --> func_getRoleBindings + func_DoAutoDiscover --> func_getRoles + func_DoAutoDiscover --> func_findHpaControllers + func_DoAutoDiscover --> func_getPersistentVolumes + func_DoAutoDiscover --> func_getPersistentVolumeClaims + func_DoAutoDiscover --> func_getServices + func_DoAutoDiscover --> func_getAllServices + func_DoAutoDiscover --> func_getServiceAccounts + func_DoAutoDiscover --> func_getSriovNetworks + func_DoAutoDiscover --> func_getSriovNetworkNodePolicies + func_DoAutoDiscover --> func_getNetworkAttachmentDefinitions +``` + +#### Functions calling `DoAutoDiscover` + +```mermaid +graph TD + func_buildTestEnvironment --> func_DoAutoDiscover +``` + +#### Usage example (Go) + +```go +// Minimal example invoking DoAutoDiscover +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration" +) + +func main() { + cfg := configuration.TestConfiguration{ + // Populate fields as needed for discovery + } + data := autodiscover.DoAutoDiscover(&cfg) + // `data` now contains discovered resources for use in tests. +} +``` + +--- + +### FindCrObjectByNameByNamespace + +**FindCrObjectByNameByNamespace** - Looks up a Kubernetes `Scale` subresource for the specified CR (Custom Resource) within a given namespace. + +#### Signature (Go) + +```go +func FindCrObjectByNameByNamespace(scale.ScalesGetter, string, string, schema.GroupResource) (*scalingv1.Scale, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Looks up a Kubernetes `Scale` subresource for the specified CR (Custom Resource) within a given namespace. | +| **Parameters** | `scale.ScalesGetter` – client capable of accessing scale resources.
`string ns` – target namespace.
`string name` – name of the custom resource.
`schema.GroupResource groupResourceSchema` – Group/Resource pair identifying the CR type. | +| **Return value** | `*scalingv1.Scale` – the retrieved scale object, or `nil` on error; `error` describing any failure. | +| **Key dependencies** | • `ScalesGetter.Scales(ns).Get(context.TODO(), …)`
• `log.Error` for logging failures | +| **Side effects** | No state mutation; only reads from the Kubernetes API and logs errors. | +| **How it fits the package** | Provides a low‑level helper that other components (e.g., provider logic) use to obtain scaling information about arbitrary CRs during autodiscovery. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Receive scalesGetter, ns, name, groupResourceSchema"] --> B["Call Scales(ns).Get(context.TODO(), groupResourceSchema, name, metav1.GetOptions{})"] + B --> C{"err?"} + C -- yes --> D["log.Error(Cannot retrieve deployment…) → Return nil, err"] + C -- no --> E["Return crScale, nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_FindCrObjectByNameByNamespace --> func_ScalesGetter.Scales + func_FindCrObjectByNameByNamespace --> func_Get + func_FindCrObjectByNameByNamespace --> func_Context.TODO + func_FindCrObjectByNameByNamespace --> func_Log.Error +``` + +#### Functions calling `FindCrObjectByNameByNamespace` + +```mermaid +graph TD + func_GetUpdatedCrObject --> func_FindCrObjectByNameByNamespace +``` + +#### Usage example (Go) + +```go +// Minimal example invoking FindCrObjectByNameByNamespace +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + "k8s.io/client-go/kubernetes/scheme" +) + +func main() { + // Assume sg is a scale.ScalesGetter obtained from a Kubernetes client + var sg scale.ScalesGetter + + ns := "example-namespace" + name := "my-custom-resource" + + // The GroupResource for the CRD, e.g., group "apps.example.com", resource "widgets" + gr := schema.GroupResource{Group: "apps.example.com", Resource: "widgets"} + + scaleObj, err := autodiscover.FindCrObjectByNameByNamespace(sg, ns, name, gr) + if err != nil { + // handle error + } + + // use scaleObj as needed +} +``` + +--- + +### FindDeploymentByNameByNamespace + +**FindDeploymentByNameByNamespace** - Fetches a Kubernetes Deployment resource identified by its namespace and name using the AppsV1 client. + +#### Signature (Go) + +```go +func FindDeploymentByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.Deployment, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Fetches a Kubernetes Deployment resource identified by its namespace and name using the AppsV1 client. | +| **Parameters** | `appClient appv1client.AppsV1Interface` – client for interacting with AppsV1 APIs;
`namespace string` – target namespace;
`name string` – deployment name. | +| **Return value** | `(*appsv1.Deployment, error)` – the Deployment object on success, or an error if retrieval fails. | +| **Key dependencies** | • `appClient.Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})`
• `log.Error` from internal logging package
• `context.TODO()` and `metav1.GetOptions{}` | +| **Side effects** | None other than network I/O to the Kubernetes API server; logs an error message if retrieval fails. | +| **How it fits the package** | Provides a low‑level helper used by higher‑level functions (e.g., `GetUpdatedDeployment`) to locate Deployments during provider operations. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Call appClient.Deployments(namespace).Get"} + B -- success --> C["Return Deployment"] + B -- error --> D["Log error via log.Error"] + D --> E["Return nil, err"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_FindDeploymentByNameByNamespace --> func_Get + func_FindDeploymentByNameByNamespace --> func_Deployments + func_FindDeploymentByNameByNamespace --> func_ContextTODO + func_FindDeploymentByNameByNamespace --> func_LogError +``` + +#### Functions calling `FindDeploymentByNameByNamespace` + +```mermaid +graph TD + func_GetUpdatedDeployment --> func_FindDeploymentByNameByNamespace +``` + +#### Usage example (Go) + +```go +// Minimal example invoking FindDeploymentByNameByNamespace +import ( + appv1client "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +func main() { + var client appv1client.AppsV1Interface // obtain from Kubernetes config + ns := "default" + name := "my-deployment" + + deployment, err := FindDeploymentByNameByNamespace(client, ns, name) + if err != nil { + fmt.Printf("Error retrieving deployment: %v\n", err) + return + } + fmt.Printf("Found deployment: %s in namespace %s\n", deployment.Name, deployment.Namespace) +} +``` + +--- + +### FindPodsByLabels + +**FindPodsByLabels** - Enumerates pods in the provided namespaces that match any of the supplied label selectors. It returns two slices: one containing only running (or allowed non‑running) pods and another with all retrieved pods, excluding those marked for deletion. + +#### Signature (Go) + +```go +func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, namespaces []string) (runningPods, allPods []corev1.Pod) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Enumerates pods in the provided namespaces that match any of the supplied label selectors. It returns two slices: one containing only running (or allowed non‑running) pods and another with all retrieved pods, excluding those marked for deletion. | +| **Parameters** | `oc corev1client.CoreV1Interface` – Kubernetes Core V1 client; ` []labelObject` – label selectors to match; ` []string` – namespaces to search. | +| **Return value** | Two slices of `corev1.Pod`: `runningPods` (filtered by phase and deletion timestamp) and `allPods` (complete list). | +| **Key dependencies** | • `configuration.GetTestParameters()` for `AllowNonRunning` flag.
• `findPodsMatchingAtLeastOneLabel(oc, labels, ns)` to filter pods by label.
• Kubernetes client methods: `oc.Pods(ns).List`.
• Logging via `log.Debug`, `log.Error`. | +| **Side effects** | No global state mutation. Performs network I/O (Kubernetes API calls) and logs diagnostics. | +| **How it fits the package** | Core part of autodiscovery: used by higher‑level functions such as `DoAutoDiscover` to gather pod information for testing and reporting. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate namespaces"} + B -->|"For each ns"| C["Call findPodsMatchingAtLeastOneLabel or List all pods"] + C --> D["Filter out pods with DeletionTimestamp set"] + D --> E["If allowNonRunning OR pod.Status.Phase == Running, add to runningPods"] + E --> F["Add pod to allPods"] + B -->|"Next ns"| B + F --> G["Return runningPods, allPods"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_FindPodsByLabels --> func_configuration.GetTestParameters + func_FindPodsByLabels --> func_findPodsMatchingAtLeastOneLabel + func_FindPodsByLabels --> func_oc.Pods + func_FindPodsByLabels --> func_log.Debug + func_FindPodsByLabels --> func_log.Error +``` + +#### Functions calling `FindPodsByLabels` + +```mermaid +graph TD + func_DoAutoDiscover --> func_FindPodsByLabels + func_Run --> func_FindPodsByLabels +``` + +#### Usage example (Go) + +```go +// Minimal example invoking FindPodsByLabels +import ( + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Assume oc is a configured CoreV1Interface and labelObject type defined elsewhere. +var oc corev1client.CoreV1Interface +labels := []labelObject{ + {LabelKey: "app", LabelValue: "demo"}, +} +namespaces := []string{"default", "kube-system"} + +running, all := FindPodsByLabels(oc, labels, namespaces) + +// running contains only pods that are Running or allowed non‑running. +// all contains every pod found in the listed namespaces (excluding those marked for deletion). +``` + +--- + +### FindStatefulsetByNameByNamespace + +**FindStatefulsetByNameByNamespace** - Fetches a Kubernetes StatefulSet resource identified by its `namespace` and `name`. Returns the object or an error if retrieval fails. + +#### Signature (Go) + +```go +func FindStatefulsetByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.StatefulSet, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Fetches a Kubernetes StatefulSet resource identified by its `namespace` and `name`. Returns the object or an error if retrieval fails. | +| **Parameters** | `appClient appv1client.AppsV1Interface –` client for Apps V1 API;
`namespace string –` namespace of the StatefulSet;
`name string –` name of the StatefulSet. | +| **Return value** | `*appsv1.StatefulSet –` pointer to the retrieved StatefulSet (or `nil` on error).
`error –` non‑nil if the API call fails or the resource is not found. | +| **Key dependencies** | • `appClient.StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})`
• Logging via `log.Error`. | +| **Side effects** | Makes a read‑only API request; logs an error message on failure. No state mutations in the caller’s context. | +| **How it fits the package** | Utility function used by higher‑level discovery logic to obtain StatefulSet information needed for policy checks and remediation. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Call appClient.StatefulSets(namespace).Get"} + B -- Success --> C["Return StatefulSet"] + B -- Failure --> D["Log error + return nil, err"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_FindStatefulsetByNameByNamespace --> func_Get + func_FindStatefulsetByNameByNamespace --> func_StatefulSets + func_FindStatefulsetByNameByNamespace --> Logger.Error +``` + +#### Functions calling `FindStatefulsetByNameByNamespace` (Mermaid) + +```mermaid +graph TD + func_GetUpdatedStatefulset --> func_FindStatefulsetByNameByNamespace +``` + +#### Usage example (Go) + +```go +// Minimal example invoking FindStatefulsetByNameByNamespace +import ( + "k8s.io/client-go/kubernetes" + appv1client "k8s.io/client-go/applyconfigurations/apps/v1" +) + +func main() { + // Assume kubeClient is a configured kubernetes.Clientset + var kubeClient *kubernetes.Clientset + + appClient := appv1client.NewAppsV1(kubeClient.RESTClient()) + ns, name := "default", "my-statefulset" + + ss, err := FindStatefulsetByNameByNamespace(appClient, ns, name) + if err != nil { + // handle error + } + // use ss... +} +``` + +--- + +### FindTestCrdNames + +**FindTestCrdNames** - Filters a list of cluster CRDs to only those whose names end with any suffix specified in `crdFilters`. + +Retrieves the subset of cluster Custom Resource Definitions (CRDs) whose names match any configured suffix filter. + +#### Signature (Go) + +```go +func FindTestCrdNames(clusterCrds []*apiextv1.CustomResourceDefinition, crdFilters []configuration.CrdFilter) (targetCrds []*apiextv1.CustomResourceDefinition) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Filters a list of cluster CRDs to only those whose names end with any suffix specified in `crdFilters`. | +| **Parameters** | *`clusterCrds`* – slice of pointers to `apiextv1.CustomResourceDefinition`;
*`crdFilters`* – slice of `configuration.CrdFilter`, each containing a `NameSuffix` string. | +| **Return value** | Slice of CRDs that match at least one suffix filter. If no CRDs are present, returns an empty slice and logs an error. | +| **Key dependencies** | • `log.Error` (internal logging)
• `len`
• `strings.HasSuffix`
• built‑in `append` | +| **Side effects** | Logs an error when the input CRD list is empty; otherwise no external state changes. | +| **How it fits the package** | Used by `DoAutoDiscover` to determine which cluster CRDs should be considered “under test” based on configuration filters. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check if clusterCrds is empty"] -->|"Yes"| B["Log error & return empty slice"] + A -->|"No"| C{"Iterate over each CRD"} + C --> D{"For each crdFilter"} + D --> E{"Does crd.Name end with crdFilter.NameSuffix?"} + E -- Yes --> F["Append crd to targetCrds; break inner loop"] + E -- No --> G["Continue checking next filter"] + F --> H["Proceed to next CRD"] + G --> D + H --> C + C --> I["Return targetCrds"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_FindTestCrdNames --> log.Error + func_FindTestCrdNames --> strings.HasSuffix + func_FindTestCrdNames --> append + func_FindTestCrdNames --> len +``` + +#### Functions calling `FindTestCrdNames` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_FindTestCrdNames +``` + +#### Usage example (Go) + +```go +// Minimal example invoking FindTestCrdNames +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/configuration" +) + +func main() { + // Assume we already have cluster CRDs and filter config + var crds []*v1.CustomResourceDefinition + filters := []configuration.CrdFilter{ + {NameSuffix: ".operator.openshift.io"}, + {NameSuffix: ".custom.example.com"}, + } + + targetCRDs := autodiscover.FindTestCrdNames(crds, filters) + + // Use targetCRDs as needed... +} +``` + +--- + +### GetScaleCrUnderTest + +**GetScaleCrUnderTest** - For each namespace‑scoped CRD that supports the `scale` subresource, gather all custom resources (CRs) in the provided namespaces and return their scale objects. + +#### Signature (Go) + +```go +func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDefinition) []ScaleObject +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | For each namespace‑scoped CRD that supports the `scale` subresource, gather all custom resources (CRs) in the provided namespaces and return their scale objects. | +| **Parameters** | `namespaces []string` – list of target namespace names.
`crds []*apiextv1.CustomResourceDefinition` – CRDs to inspect. | +| **Return value** | `[]ScaleObject` – slice containing a scale object for every discovered scalable CR. | +| **Key dependencies** | *`clientsholder.GetClientsHolder()` – obtains dynamic client.
* `log.Warn`, `log.Info`, `log.Debug`, `log.Fatal` – logging utilities.
* `getCrScaleObjects(crs, crd)` – helper that extracts scale objects from a list of CRs. | +| **Side effects** | Emits log messages (warn/info/debug/fatal). Does not modify external state. | +| **How it fits the package** | Used by `autodiscover.DoAutoDiscover` to populate `data.ScaleCrUnderTest`, enabling further analysis of scalable resources during autodiscovery. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Get dynamic client"] --> B{"Iterate CRDs"} + B -->|"Namespace‑scoped?"| C{"Check scope"} + C -->|"Cluster‑wide"| D["Warn & skip"] + C -->|"Namespace‑scoped"| E["Iterate versions"] + E --> F{"Has scale subresource?"} + F -->|"No"| G["Info & skip"] + F -->|"Yes"| H["Log debug"] + H --> I["For each namespace"] + I --> J["List CRs via dynamic client"] + J --> K{"Error?"} + K -->|"Yes"| L["Fatal log"] + K -->|"No"| M{"CRs exist?"} + M -->|"Yes"| N["Append scale objects"] + M -->|"No"| O["Warn no CRs found"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetScaleCrUnderTest --> clientsholder.GetClientsHolder + func_GetScaleCrUnderTest --> log.Warn + func_GetScaleCrUnderTest --> log.Info + func_GetScaleCrUnderTest --> log.Debug + func_GetScaleCrUnderTest --> log.Fatal + func_GetScaleCrUnderTest --> getCrScaleObjects +``` + +#### Functions calling `GetScaleCrUnderTest` + +```mermaid +graph TD + func_DoAutoDiscover --> func_GetScaleCrUnderTest +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetScaleCrUnderTest +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +func main() { + // Example namespaces and CRDs (normally obtained from the cluster) + namespaces := []string{"default", "test-namespace"} + var crds []*apiextv1.CustomResourceDefinition + + // Call the function + scaleObjs := autodiscover.GetScaleCrUnderTest(namespaces, crds) + + // Use the returned scale objects... + _ = scaleObjs +} +``` + +--- + +## Local Functions + +### findAbnormalEvents + +**findAbnormalEvents** - Gathers all Kubernetes events whose `type` is not `"Normal"` from the supplied namespaces. + +#### Signature (Go) + +```go +func findAbnormalEvents(oc corev1client.CoreV1Interface, namespaces []string) (abnormalEvents []corev1.Event) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Gathers all Kubernetes events whose `type` is not `"Normal"` from the supplied namespaces. | +| **Parameters** | `oc corev1client.CoreV1Interface – client for CoreV1 API`
`namespaces []string – list of namespace names to query` | +| **Return value** | `[]corev1.Event – slice containing every non‑Normal event found` | +| **Key dependencies** | • `oc.Events(ns).List(context.TODO(), metav1.ListOptions{FieldSelector: "type!=Normal"})`
• `log.Error` for error logging
• Standard library `context` and `append` | +| **Side effects** | None beyond returning data; logs errors to the package logger. | +| **How it fits the package** | Used by `DoAutoDiscover` to populate the `AbnormalEvents` field of the discovered test data structure. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Namespaces loop"} + B --> C["Call oc.Events(ns).List with FieldSelector"] + C --> D{"Error?"} + D -- Yes --> E["log.Error & continue"] + D -- No --> F["Append Items to abnormalEvents"] + F --> B + B --> G["Return abnormalEvents"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_findAbnormalEvents --> corev1client.CoreV1Interface.List + func_findAbnormalEvents --> log.Error + func_findAbnormalEvents --> context.TODO + func_findAbnormalEvents --> append +``` + +#### Functions calling `findAbnormalEvents` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_findAbnormalEvents +``` + +#### Usage example (Go) + +```go +// Minimal example invoking findAbnormalEvents +import ( + "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +func main() { + // Assume kubeClient is a configured kubernetes.Clientset + var kubeClient *kubernetes.Clientset + + coreV1 := kubeClient.CoreV1() + namespaces := []string{"default", "kube-system"} + + events, err := findAbnormalEvents(coreV1, namespaces) + if err != nil { + // handle error (function actually never returns an error directly) + } + + for _, ev := range events { + fmt.Printf("%s: %s\n", ev.Type, ev.Message) + } +} +``` + +--- + +### findClusterOperators + +**findClusterOperators** - Queries the Kubernetes API for all `ClusterOperator` resources, returning them as a slice of `configv1.ClusterOperator`. If the CR is missing, it logs a debug message and returns `nil, nil`. + +#### Signature (Go) + +```go +func findClusterOperators(client clientconfigv1.ClusterOperatorInterface) ([]configv1.ClusterOperator, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Queries the Kubernetes API for all `ClusterOperator` resources, returning them as a slice of `configv1.ClusterOperator`. If the CR is missing, it logs a debug message and returns `nil, nil`. | +| **Parameters** | `client clientconfigv1.ClusterOperatorInterface –` an interface to the OpenShift ClusterOperators API. | +| **Return value** | `([]configv1.ClusterOperator, error) –` slice of found operators or an error if the list operation fails for reasons other than “not found”. | +| **Key dependencies** | • `client.List(context.TODO(), metav1.ListOptions{})`
• `k8serrors.IsNotFound(err)`
• `log.Debug(msg, args…)` | +| **Side effects** | Emits a debug log when the CR is absent. No other state changes or I/O. | +| **How it fits the package** | Used by the autodiscovery routine (`DoAutoDiscover`) to gather cluster‑wide operator status before proceeding with further discovery steps. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Call client.List"} + B -->|"error"| C{"Check !IsNotFound(err)"} + C -- true --> D["Return error"] + C -- false --> E{"err is IsNotFound?"} + E -- true --> F["log.Debug(ClusterOperator CR not found)"] + F --> G["Return nil, nil"] + E -- false --> H["Return clusterOperators.Items, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_findClusterOperators --> func_List + func_findClusterOperators --> func_IsNotFound + func_findClusterOperators --> func_Debug +``` + +#### Functions calling `findClusterOperators` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_findClusterOperators +``` + +#### Usage example (Go) + +```go +// Minimal example invoking findClusterOperators +package main + +import ( + "fmt" + + clientconfigv1 "github.com/openshift/client-go/config/v1" + configv1 "github.com/openshift/api/config/v1" +) + +func main() { + // Assume `client` is an initialized ClusterOperatorInterface. + var client clientconfigv1.ClusterOperatorInterface + operators, err := findClusterOperators(client) + if err != nil { + fmt.Printf("Error retrieving operators: %v\n", err) + return + } + fmt.Printf("Found %d cluster operators\n", len(operators)) +} +``` + +--- + +### findDeploymentsByLabels + +**findDeploymentsByLabels** - Enumerates all deployments in the specified namespaces that contain at least one of the supplied label key/value pairs. If no labels are provided, every deployment in those namespaces is returned. + +#### Signature (Go) + +```go +func findDeploymentsByLabels(appClient appv1client.AppsV1Interface, labels []labelObject, namespaces []string) []appsv1.Deployment +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Enumerates all deployments in the specified namespaces that contain at least one of the supplied label key/value pairs. If no labels are provided, every deployment in those namespaces is returned. | +| **Parameters** | `appClient appv1client.AppsV1Interface` – Kubernetes Apps V1 client;
`labels []labelObject` – slice of key/value pairs to filter on;
`namespaces []string` – list of namespace names to search. | +| **Return value** | `[]appsv1.Deployment` – collection of deployments that satisfy the label criteria (empty slice if none found). | +| **Key dependencies** | • `appClient.Deployments(ns).List(context.TODO(), metav1.ListOptions{})`
• `isDeploymentsPodsMatchingAtLeastOneLabel(labels, ns, &deployment)`
• Logging functions (`log.Error`, `log.Warn`, `log.Debug`, `log.Info`) | +| **Side effects** | • Emits log messages for errors and informational events.
• No state mutation beyond local variables; no external I/O besides API calls. | +| **How it fits the package** | Supports autodiscovery of testable deployments by filtering Kubernetes objects according to user‑defined labels, enabling the CNF suite to target relevant workloads. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate namespaces"} + B -->|"for each ns"| C["List deployments in ns"] + C --> D{"Check error"} + D -- Error --> E["Log error & continue"] + D -- No error --> F{"No deployments?"} + F -- Yes --> G["Log warning"] + F -- No --> H{"Iterate deployments"} + H --> I{"Labels provided?"} + I -- Yes --> J{"Match at least one label?"} + J -- Yes --> K["Append deployment"] + J -- No --> L["Skip"] + I -- No --> M["Append all deployments"] + M --> N["Log info"] + H --> O["End loop"] + O --> P["Return list"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_findDeploymentsByLabels --> appClient_Deployments_ns_List + func_findDeploymentsByLabels --> isDeploymentsPodsMatchingAtLeastOneLabel + func_findDeploymentsByLabels --> log_Error + func_findDeploymentsByLabels --> log_Warn + func_findDeploymentsByLabels --> log_Debug + func_findDeploymentsByLabels --> log_Info +``` + +#### Functions calling `findDeploymentsByLabels` (Mermaid) + +```mermaid +graph TD + DoAutoDiscover --> findDeploymentsByLabels +``` + +#### Usage example (Go) + +```go +// Minimal example invoking findDeploymentsByLabels +import ( + appv1client "k8s.io/client-go/kubernetes/typed/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func example(client appv1client.AppsV1Interface) { + // Define label filters (key/value) + labels := []labelObject{ + {LabelKey: "app", LabelValue: "nginx"}, + {LabelKey: "tier", LabelValue: "frontend"}, + } + // Target namespaces + namespaces := []string{"default", "prod"} + + deployments := findDeploymentsByLabels(client, labels, namespaces) + + for _, d := range deployments { + fmt.Printf("Found deployment %s in namespace %s\n", d.Name, d.Namespace) + } +} +``` + +--- + +--- + +### findHpaControllers + +**findHpaControllers** - Collects every `HorizontalPodAutoscaler` object from the supplied namespaces and returns a slice of pointers to them. + +#### Signature (Go) + +```go +func(findHpaControllers)(kubernetes.Interface, []string) []*scalingv1.HorizontalPodAutoscaler +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Collects every `HorizontalPodAutoscaler` object from the supplied namespaces and returns a slice of pointers to them. | +| **Parameters** | `cs kubernetes.Interface` – client for interacting with the Kubernetes API.
`namespaces []string` – list of namespace names to query. | +| **Return value** | `[]*scalingv1.HorizontalPodAutoscaler` – aggregated HPA objects; empty slice if none found or an error occurs. | +| **Key dependencies** | *`cs.AutoscalingV1().HorizontalPodAutoscalers(ns).List(context.TODO(), metav1.ListOptions{})` – API call to list HPAs.
* `log.Error`, `log.Info` – logging utilities for error and info messages. | +| **Side effects** | None beyond reading from the cluster; logs errors or informational messages. | +| **How it fits the package** | Used by `DoAutoDiscover` to populate the `Hpas` field of the discovered data structure, enabling downstream analysis of autoscaling configurations. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + start(Start) --> iterate{"For each namespace"} + iterate --> list["List HPAs"] + list -- success --> append["Append items to result"] + append --> nextIter{"Next namespace?"} + nextIter -- yes --> iterate + nextIter -- no --> checkEmpty{"Result empty?"} + checkEmpty -- yes --> info["Log_Cannot_find_any_deployed_HPA"] + checkEmpty -- no --> endNode["Return slice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_findHpaControllers --> func_List + func_findHpaControllers --> func_HorizontalPodAutoscalers + func_findHpaControllers --> func_AutoscalingV1 + func_findHpaControllers --> func_Context_TODO + func_findHpaControllers --> func_Log_Error + func_findHpaControllers --> func_Log_Info +``` + +#### Functions calling `findHpaControllers` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_findHpaControllers +``` + +#### Usage example (Go) + +```go +// Minimal example invoking findHpaControllers +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + "k8s.io/client-go/kubernetes" +) + +func example(client kubernetes.Interface) { + namespaces := []string{"default", "production"} + hpas := autodiscover.findHpaControllers(client, namespaces) + // hpas now contains pointers to all HPAs found in the specified namespaces +} +``` + +--- + +### findOperatorsByLabels + +**findOperatorsByLabels** - Scans each namespace in `namespaces` for ClusterServiceVersions (CSVs). If `labels` are provided, only CSVs that have at least one of those labels are considered; otherwise all CSVs are fetched. It then filters the results to include only those whose controller pods run inside any namespace listed in `namespaces`. + +#### Signature (Go) + +```go +func findOperatorsByLabels(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespaces []configuration.Namespace) (csvs []*olmv1Alpha.ClusterServiceVersion) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Scans each namespace in `namespaces` for ClusterServiceVersions (CSVs). If `labels` are provided, only CSVs that have at least one of those labels are considered; otherwise all CSVs are fetched. It then filters the results to include only those whose controller pods run inside any namespace listed in `namespaces`. | +| **Parameters** | *olmClient* – OLM client interface for querying CSV resources.
*labels* – Slice of label objects (`labelObject`) used as selectors when searching.
*namespaces* – List of target namespaces (`configuration.Namespace`). | +| **Return value** | A slice of pointers to `olmv1Alpha.ClusterServiceVersion` that satisfy the label and namespace constraints. | +| **Key dependencies** | *findOperatorsMatchingAtLeastOneLabel* – helper for label‑based lookup.
*olmClient.ClusterServiceVersions* – OLM API call.
*log.Debug/Info/Error* – logging utilities.
*context.TODO*, *metav1.ListOptions* – Kubernetes client helpers. | +| **Side effects** | Emits debug, info, and error logs; no state mutation outside of local variables. | +| **How it fits the package** | Used by `DoAutoDiscover` to gather operator information for test configuration. It bridges OLM resources with namespace‑scoped testing logic. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over namespaces"} + B -->|"labels present"| C["Call findOperatorsMatchingAtLeastOneLabel"] + B -->|"no labels"| D["List all CSVs in namespace"] + C & D --> E["Collect csvList"] + E --> F["For each CSV"] + F --> G{"Has controller ns annotation?"} + G -- No --> H["Log error, skip"] + G -- Yes --> I{"controller ns in target set?"} + I -- Yes --> J["Append to result slice"] + I -- No --> K["Skip"] + J & K --> L["Next CSV"] + L --> M["Next namespace"] + M --> N["Log found CSVs"] + N --> O["Return csvs"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_findOperatorsByLabels --> func_findOperatorsMatchingAtLeastOneLabel + func_findOperatorsByLabels --> log.Debug + func_findOperatorsByLabels --> log.Error + func_findOperatorsByLabels --> log.Info + func_findOperatorsByLabels --> olmClient.ClusterServiceVersions +``` + +#### Functions calling `findOperatorsByLabels` + +```mermaid +graph TD + func_DoAutoDiscover --> func_findOperatorsByLabels +``` + +#### Usage example (Go) + +```go +// Minimal example invoking findOperatorsByLabels +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + v1alpha1 "k8s.io/client-go/kubernetes/typed/operator/v1alpha1" +) + +func main() { + var olmClient v1alpha1.OperatorsV1alpha1Interface // initialized elsewhere + labels := []autodiscover.labelObject{ + {LabelKey: "app", LabelValue: "my-operator"}, + } + namespaces := []autodiscover.configuration.Namespace{ + {Name: "default"}, + {Name: "operators"}, + } + + csvs := autodiscover.findOperatorsByLabels(olmClient, labels, namespaces) + for _, csv := range csvs { + println(csv.Name) + } +} +``` + +--- + +### findOperatorsMatchingAtLeastOneLabel + +**findOperatorsMatchingAtLeastOneLabel** - Retrieves all ClusterServiceVersions (CSVs) within the given `namespace` that carry at least one of the supplied label key/value pairs. The function aggregates results across labels and returns a combined list. + +```go +func(v1alpha1.OperatorsV1alpha1Interface, []labelObject, configuration.Namespace)(*olmv1Alpha.ClusterServiceVersionList) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Retrieves all ClusterServiceVersions (CSVs) within the given `namespace` that carry at least one of the supplied label key/value pairs. The function aggregates results across labels and returns a combined list. | +| **Parameters** | `olmClient v1alpha1.OperatorsV1alpha1Interface` – OLM client used to query CSVs.
`labels []labelObject` – Slice of label objects (`LabelKey`, `LabelValue`) to filter by.
`namespace configuration.Namespace` – Target namespace for the search. | +| **Return value** | `*olmv1Alpha.ClusterServiceVersionList` – Aggregated list of CSVs that matched any label; may be empty if no matches are found or an error occurs during queries. | +| **Key dependencies** | • `olmClient.ClusterServiceVersions(namespace.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: …})`
• Logging via `log.Debug` and `log.Error`
• Standard Go `append` for slice concatenation | +| **Side effects** | No mutation of input parameters.
Logs debug information for each label query and errors if list operations fail. | +| **How it fits the package** | Used by `findOperatorsByLabels` to discover operators that satisfy at least one user‑defined label criterion within a set of namespaces, enabling targeted operator selection during auto‑discovery. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> InitializeList["Create empty ClusterServiceVersionList"] + Init --> Loop["For each label in labels"] + Loop --> Query{"Call List API with LabelSelector"} + Query -- Success --> Append["Append returned CSV items to list"] + Query -- Failure --> LogError["Log error, continue loop"] + Append --> NextLabel + NextLabel -->|"More labels?"|Loop + NextLabel --> End["Return aggregated list"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_findOperatorsMatchingAtLeastOneLabel --> Logger_Debug + func_findOperatorsMatchingAtLeastOneLabel --> OLMClient_List + func_findOperatorsMatchingAtLeastOneLabel --> Logger_Error + func_findOperatorsMatchingAtLeastOneLabel --> append +``` + +#### Functions calling `findOperatorsMatchingAtLeastOneLabel` (Mermaid) + +```mermaid +graph TD + findOperatorsByLabels --> func_findOperatorsMatchingAtLeastOneLabel +``` + +#### Usage example (Go) + +```go +// Minimal example invoking findOperatorsMatchingAtLeastOneLabel + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + v1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + configuration "github.com/redhat-best-practices-for-k8s/certsuite/internal/configuration" +) + +func example() { + var olmClient v1alpha1.OperatorsV1alpha1Interface // initialized elsewhere + labels := []labelObject{ + {LabelKey: "app", LabelValue: "example"}, + {LabelKey: "tier", LabelValue: "backend"}, + } + ns := configuration.Namespace{Name: "demo-namespace"} + + csvList := findOperatorsMatchingAtLeastOneLabel(olmClient, labels, ns) + // csvList now contains all CSVs in "demo-namespace" that have either + // the label app=example or tier=backend. +} +``` + +--- + +### findPodsMatchingAtLeastOneLabel + +**findPodsMatchingAtLeastOneLabel** - Builds a `PodList` containing all pods in the specified `namespace` that match at least one label from the provided slice. + +#### Signature (Go) + +```go +func findPodsMatchingAtLeastOneLabel(oc corev1client.CoreV1Interface, labels []labelObject, namespace string) *corev1.PodList +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `PodList` containing all pods in the specified `namespace` that match at least one label from the provided slice. | +| **Parameters** | `oc corev1client.CoreV1Interface` – Kubernetes client;
`labels []labelObject` – list of key/value pairs to search for;
`namespace string` – target namespace. | +| **Return value** | `*corev1.PodList` – aggregated pods matching any label. | +| **Key dependencies** | • `log.Debug`, `log.Error` from internal logging package
• `oc.Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: …})` to query the API
• Standard slice append operation | +| **Side effects** | No mutation of global state; only performs API calls and logs. | +| **How it fits the package** | Helper for `FindPodsByLabels`, used during automatic discovery of CNF pods across namespaces. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"labels slice empty?"} + B -- no --> C["Initialize empty PodList"] + B -- no --> D["Loop over each label l"] + D --> E["Log debug: searching with l"] + E --> F["List pods via oc.Pods(namespace).List(...)"] + F --> G{"error?"} + G -- yes --> H["Log error, continue loop"] + G -- no --> I["Append found pods to allPods.Items"] + I --> D + D --> J["Return allPods"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_findPodsMatchingAtLeastOneLabel --> log.Debug + func_findPodsMatchingAtLeastOneLabel --> oc.Pods + func_findPodsMatchingAtLeastOneLabel --> List + func_findPodsMatchingAtLeastOneLabel --> context.TODO + func_findPodsMatchingAtLeastOneLabel --> log.Error + func_findPodsMatchingAtLeastOneLabel --> append +``` + +#### Functions calling `findPodsMatchingAtLeastOneLabel` (Mermaid) + +```mermaid +graph TD + func_FindPodsByLabels --> func_findPodsMatchingAtLeastOneLabel +``` + +#### Usage example (Go) + +```go +// Minimal example invoking findPodsMatchingAtLeastOneLabel +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// Assume `oc` is a pre‑configured CoreV1Interface client, +// and `labels` contains desired labelObject entries. +var oc corev1client.CoreV1Interface +var labels []autodiscover.labelObject + +func main() { + ns := "default" + podList := autodiscover.findPodsMatchingAtLeastOneLabel(oc, labels, ns) + // podList now holds all pods in `ns` that match any of the provided labels. +} +``` + +--- + +--- + +### findStatefulSetsByLabels + +**findStatefulSetsByLabels** - Enumerates all StatefulSets in the supplied namespaces, filtering by label matches when provided. Returns a slice of matching `StatefulSet` objects. + +#### Signature (Go) + +```go +func findStatefulSetsByLabels( + appClient appv1client.AppsV1Interface, + labels []labelObject, + namespaces []string, +) []appsv1.StatefulSet +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Enumerates all StatefulSets in the supplied namespaces, filtering by label matches when provided. Returns a slice of matching `StatefulSet` objects. | +| **Parameters** | `appClient appv1client.AppsV1Interface` – Kubernetes AppsV1 client.
`labels []labelObject` – Optional list of key/value pairs used to filter StatefulSets.
`namespaces []string` – Namespaces to search within. | +| **Return value** | Slice of `appsv1.StatefulSet` that satisfy the label criteria (or all if no labels supplied). | +| **Key dependencies** | *`List` on `StatefulSets(ns)`
* `isStatefulSetsMatchingAtLeastOneLabel` helper
* Logging functions (`log.Error`, `log.Warn`, `log.Debug`, `log.Info`) | +| **Side effects** | No mutation of input arguments.
Logs errors and warnings; does not propagate errors to caller. | +| **How it fits the package** | Used by the autodiscover routine to gather StatefulSet resources that may contain CNF pods for later analysis. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Loop over namespaces"} + B -->|"For each ns"| C["List StatefulSets"] + C --> D{"Check error"} + D -- Yes --> E["Log error & continue"] + D -- No --> F{"Items present?"} + F -- None --> G["Warn no statefulsets"] + F -- Some --> H["Loop over items"] + H --> I{"Labels supplied?"} + I -- Yes --> J["Check label match"] + J -->|"Match"| K["Append to result"] + J -->|"No match"| L["Skip"] + I -- No --> M["Append all items"] + M --> N["Log info"] + B --> O["End loop"] + O --> P["Warn if no results"] --> Q["Return slice"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_findStatefulSetsByLabels --> List + func_findStatefulSetsByLabels --> StatefulSets + func_findStatefulSetsByLabels --> isStatefulSetsMatchingAtLeastOneLabel + func_findStatefulSetsByLabels --> Logger.Error + func_findStatefulSetsByLabels --> Warn + func_findStatefulSetsByLabels --> Debug + func_findStatefulSetsByLabels --> Info +``` + +#### Functions calling `findStatefulSetsByLabels` + +```mermaid +graph TD + DoAutoDiscover --> findStatefulSetsByLabels +``` + +#### Usage example (Go) + +```go +// Minimal example invoking findStatefulSetsByLabels +import ( + appv1client "k8s.io/client-go/kubernetes/typed/apps/v1" + appsv1 "k8s.io/api/apps/v1" +) + +// Assume `client` is an initialized AppsV1Interface. +var client appv1client.AppsV1Interface + +// Labels to filter by +labels := []labelObject{ + {LabelKey: "app", LabelValue: "my-cnf"}, +} + +// Namespaces to search in +namespaces := []string{"cnf-namespace", "default"} + +// Retrieve StatefulSets matching the labels +statefulSets := findStatefulSetsByLabels(client, labels, namespaces) + +// statefulSets now contains all matching StatefulSet objects. +``` + +--- + +### findSubscriptions + +**findSubscriptions** - Collects all `Subscription` objects from the provided list of Kubernetes namespaces using an OLM client. + +#### Signature (Go) + +```go +func findSubscriptions(olmClient v1alpha1.OperatorsV1alpha1Interface, namespaces []string) []olmv1Alpha.Subscription +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Collects all `Subscription` objects from the provided list of Kubernetes namespaces using an OLM client. | +| **Parameters** | `olmClient v1alpha1.OperatorsV1alpha1Interface –` interface to interact with Operator Lifecycle Manager resources.
`namespaces []string –` slice of namespace names; an empty string represents all namespaces. | +| **Return value** | `[]olmv1Alpha.Subscription` – aggregated list of subscriptions found across the supplied namespaces. | +| **Key dependencies** | • `olmClient.Subscriptions(ns).List(context.TODO(), metav1.ListOptions{})`
• Logging via `log.Debug`, `log.Error`, and `log.Info`. | +| **Side effects** | Performs read‑only API calls; logs progress and errors but does not modify cluster state. | +| **How it fits the package** | Used by autodiscovery routines to discover operator subscriptions, which are later used for pod mapping and test data collection. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over namespaces"} + B -->|"Non‑empty ns"| C["List subscriptions in ns"] + B -->|"Empty ns"| D["List subscriptions in all namespaces"] + C --> E["Handle error & continue"] + D --> E + E --> F["Append items to slice"] + F --> G{"Next namespace?"} + G -- Yes --> B + G -- No --> H["Log each found subscription"] + H --> I["Return aggregated list"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_findSubscriptions --> func_List + func_findSubscriptions --> func_Subscriptions + func_findSubscriptions --> func_Debug + func_findSubscriptions --> func_Error + func_findSubscriptions --> func_Info +``` + +#### Functions calling `findSubscriptions` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_findSubscriptions +``` + +#### Usage example (Go) + +```go +// Minimal example invoking findSubscriptions +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + v1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + olmv1Alpha "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" +) + +func main() { + var olmClient v1alpha1.OperatorsV1alpha1Interface // obtain from OLM client set + namespaces := []string{"default", "kube-system"} + subs := autodiscover.findSubscriptions(olmClient, namespaces) + for _, sub := range subs { + println(sub.Name, sub.Namespace) + } +} +``` + +--- + +### getAllCatalogSources + +**getAllCatalogSources** - Collects every `CatalogSource` resource present in the Kubernetes cluster and returns them as a slice of pointers. + +#### 1) Signature (Go) + +```go +func getAllCatalogSources(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*olmv1Alpha.CatalogSource) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Collects every `CatalogSource` resource present in the Kubernetes cluster and returns them as a slice of pointers. | +| **Parameters** | `olmClient v1alpha1.OperatorsV1alpha1Interface` – an OLM client capable of listing catalog sources across all namespaces. | +| **Return value** | `out []*olmv1Alpha.CatalogSource` – a slice containing references to each catalog source found; empty if none or on error. | +| **Key dependencies** | • `olmClient.CatalogSources("")`
• `context.TODO()`
• `metav1.ListOptions{}`
• `log.Error` (internal logger)
• `append` | +| **Side effects** | Logs an error message if the list operation fails; otherwise no state mutation or I/O beyond the API call. | +| **How it fits the package** | Used by `DoAutoDiscover` to populate `data.AllCatalogSources`, enabling downstream analysis of operator catalog sources within the cluster. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["List CatalogSources"] + B -->|"Success"| C{"Iterate Items"} + C --> D["Append to slice"] + D --> E["Return slice"] + B -->|"Error"| F["Log error"] + F --> G["Return empty slice"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_getAllCatalogSources --> func_List + func_getAllCatalogSources --> func_CatalogSources + func_getAllCatalogSources --> func_ContextTODO + func_getAllCatalogSources --> func_LogError + func_getAllCatalogSources --> func_Append +``` + +#### 5) Functions calling `getAllCatalogSources` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getAllCatalogSources +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getAllCatalogSources +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + v1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func main() { + var olmClient v1alpha1.OperatorsV1alpha1Interface // assume initialized elsewhere + catalogSources := autodiscover.GetAllCatalogSources(olmClient) + for _, cs := range catalogSources { + println(cs.Name) // or any other processing + } +} +``` + +> **Note**: The function is unexported; the example assumes an exported wrapper `GetAllCatalogSources` exists for external use. + +--- + +### getAllInstallPlans + +**getAllInstallPlans** - Collects every `InstallPlan` resource present in the cluster and returns them as a slice of pointers. + +#### Signature (Go) + +```go +func getAllInstallPlans(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*olmv1Alpha.InstallPlan) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Collects every `InstallPlan` resource present in the cluster and returns them as a slice of pointers. | +| **Parameters** | `olmClient v1alpha1.OperatorsV1alpha1Interface` – OLM client used to query InstallPlans. | +| **Return value** | `[]*olmv1Alpha.InstallPlan` – Slice containing references to all retrieved InstallPlans; empty if an error occurs. | +| **Key dependencies** | *`olmClient.InstallPlans("")` – API call to list InstallPlans.
* `context.TODO()` – context for the request.
*`metav1.ListOptions{}` – default listing options.
* `log.Error` – logs failures. | +| **Side effects** | None that modify external state; only logs errors. | +| **How it fits the package** | Used by `DoAutoDiscover` to gather InstallPlan data for autodiscovery of operator status and relationships. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["List InstallPlans via olmClient"] + B --> C{"Error?"} + C -- Yes --> D["Log error & return empty slice"] + C -- No --> E["Iterate over items"] + E --> F["Append each item to output slice"] + F --> G["Return slice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getAllInstallPlans --> func_List + func_getAllInstallPlans --> func_InstallPlans + func_getAllInstallPlans --> func_Context_TODO + func_getAllInstallPlans --> func_Log_Error + func_getAllInstallPlans --> func_append +``` + +#### Functions calling `getAllInstallPlans` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getAllInstallPlans +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getAllInstallPlans +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + olm "k8s.io/client-go/kubernetes/typed/operator/v1alpha1" +) + +// Assume `client` is a pre‑configured OperatorsV1alpha1Interface. +installPlans := autodiscover.getAllInstallPlans(client) +// installPlans now contains all InstallPlan objects in the cluster. +``` + +--- + +### getAllNamespaces + +**getAllNamespaces** - Queries the cluster for every namespace and returns a slice of their names. + +#### Signature (Go) + +```go +func getAllNamespaces(oc corev1client.CoreV1Interface) ([]string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Queries the cluster for every namespace and returns a slice of their names. | +| **Parameters** | `oc` – a CoreV1 client interface used to list namespaces. | +| **Return value** | A slice of strings (`[]string`) containing each namespace name, or an error if the list operation fails. | +| **Key dependencies** | • `oc.Namespaces().List(context.TODO(), metav1.ListOptions{})`
• `context.TODO()`
• `metav1.ListOptions{}`
• `fmt.Errorf` | +| **Side effects** | None; purely functional – no state mutation or external I/O beyond the API call. | +| **How it fits the package** | Provides a foundational list of namespaces for higher‑level autodiscovery functions that need to iterate over all namespaces (e.g., service discovery, operator enumeration). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B["List namespaces via client"] + B --> C{"Error?"} + C -- Yes --> D["Return error with message"] + C -- No --> E["Iterate over items"] + E --> F["Append name to slice"] + F --> G["Return allNs, nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_getAllNamespaces --> func_List + func_getAllNamespaces --> func_Namespaces + func_getAllNamespaces --> func_Context_TODO + func_getAllNamespaces --> func_Fmt_Errorf + func_getAllNamespaces --> func_append +``` + +#### Functions calling `getAllNamespaces` + +```mermaid +graph TD + func_DoAutoDiscover --> func_getAllNamespaces +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getAllNamespaces +import ( + "fmt" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +func example(client corev1client.CoreV1Interface) { + nsList, err := getAllNamespaces(client) + if err != nil { + fmt.Printf("Failed to list namespaces: %v\n", err) + return + } + fmt.Println("Cluster namespaces:", nsList) +} +``` + +--- + +### getAllOperators + +**getAllOperators** - Fetches every ClusterServiceVersion (CSV) across all namespaces using the supplied OLM client and returns a slice of pointers to those CSV objects. + +#### 1) Signature (Go) + +```go +func getAllOperators(olmClient v1alpha1.OperatorsV1alpha1Interface) ([]*olmv1Alpha.ClusterServiceVersion, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Fetches every ClusterServiceVersion (CSV) across all namespaces using the supplied OLM client and returns a slice of pointers to those CSV objects. | +| **Parameters** | `olmClient v1alpha1.OperatorsV1alpha1Interface` – an interface to interact with the Operator Lifecycle Manager API. | +| **Return value** | `([]*olmv1Alpha.ClusterServiceVersion, error)` – slice of CSV pointers or an error if listing fails. | +| **Key dependencies** | • Calls `olmClient.ClusterServiceVersions("").List(...)`
• Uses `context.TODO()` for request context
• Relies on `metav1.ListOptions{}`
• Formats errors with `fmt.Errorf`
• Logs each found CSV via `log.Info`. | +| **Side effects** | No state mutation; only reads from the OLM API and logs information. | +| **How it fits the package** | Supports the autodiscovery routine by providing a complete list of installed operators for subsequent filtering, mapping to pods, or status checks. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["getAllOperators"] --> B["olmClient.ClusterServiceVersions()"] + B --> C["List(context.TODO(), metav1.ListOptions{})"] + C --> D{"err?"} + D -- yes --> E["Return error via fmt.Errorf"] + D -- no --> F["Iterate csvList.Items"] + F --> G["Append csvList.Items(i) to csvs slice"] + G --> H["Log each CSV with log.Info"] + H --> I["Return csvs, nil"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_getAllOperators --> func_List + func_getAllOperators --> func_ClusterServiceVersions + func_getAllOperators --> func_ContextTODO + func_getAllOperators --> func_Metav1ListOptions + func_getAllOperators --> func_FmtErrorf + func_getAllOperators --> func_LogInfo +``` + +#### 5) Functions calling `getAllOperators` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getAllOperators +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getAllOperators +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + olmclientv1alpha1 "k8s.io/client-go/kubernetes/typed/operator/v1alpha1" // placeholder import path +) + +func main() { + var olmClient olmclientv1alpha1.OperatorsV1alpha1Interface + // Assume olmClient is initialized elsewhere + + csvs, err := autodiscover.getAllOperators(olmClient) + if err != nil { + log.Fatalf("Failed to get operators: %v", err) + } + fmt.Printf("Found %d CSVs\n", len(csvs)) +} +``` + +--- + +--- + +### getAllPackageManifests + +**getAllPackageManifests** - Gathers every `PackageManifest` resource present in the cluster and returns them as a slice of pointers. + +#### Signature (Go) + +```go +func getAllPackageManifests(olmPkgClient olmpkgclient.PackageManifestInterface) (out []*olmpkgv1.PackageManifest) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Gathers every `PackageManifest` resource present in the cluster and returns them as a slice of pointers. | +| **Parameters** | `olmPkgClient olmpkgclient.PackageManifestInterface` – client capable of listing PackageManifests. | +| **Return value** | `[]*olmpkgv1.PackageManifest` – slice containing references to each manifest; empty on error. | +| **Key dependencies** | • `olmPkgClient.List(context.TODO(), metav1.ListOptions{})`
• `log.Error(msg string, args ...any)`
• Built‑in `append` | +| **Side effects** | No mutation of input arguments; logs an error message on failure. | +| **How it fits the package** | Used by `DoAutoDiscover` to populate the `AllPackageManifests` field in autodiscovery data. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["List PackageManifests"] + B --> C{"Error?"} + C -- Yes --> D["Log error & return empty slice"] + C -- No --> E["Iterate items"] + E --> F["Append pointer to out"] + F --> G["Return out"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getAllPackageManifests --> func_List + func_getAllPackageManifests --> func_Context_TODO + func_getAllPackageManifests --> func_Log_Error + func_getAllPackageManifests --> func_Append +``` + +#### Functions calling `getAllPackageManifests` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getAllPackageManifests +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getAllPackageManifests +package main + +import ( + "fmt" + + olmpkgclient "github.com/operator-framework/olm/pkg/client" + olmpkgv1 "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func main() { + var client olmpkgclient.PackageManifestInterface // obtain via client factory + pkgs := getAllPackageManifests(client) + fmt.Printf("Found %d package manifests\n", len(pkgs)) +} +``` + +--- + +### getAllStorageClasses + +**getAllStorageClasses** - Fetches every `StorageClass` defined in the cluster via the Kubernetes API and returns them as a slice. + +#### Signature (Go) + +```go +func getAllStorageClasses(client storagev1typed.StorageV1Interface) ([]storagev1.StorageClass, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Fetches every `StorageClass` defined in the cluster via the Kubernetes API and returns them as a slice. | +| **Parameters** | `client storagev1typed.StorageV1Interface` – A typed client for interacting with Storage v1 resources. | +| **Return value** | `([]storagev1.StorageClass, error)` – Slice of retrieved `StorageClass` objects; an error if the API call fails. | +| **Key dependencies** | • `client.StorageClasses().List(context.TODO(), metav1.ListOptions{})`
• `log.Error` from internal logging package | +| **Side effects** | None beyond the API request; does not modify cluster state or global variables. | +| **How it fits the package** | Supplies autodiscovery with storage class data, used by `DoAutoDiscover` to populate the `StorageClasses` field of discovered test data. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["getAllStorageClasses"] --> B["List StorageClasses"] + B --> C["Check error"] + C -- OK --> D["Return Items"] + C -- Error --> E["Log error & return nil, err"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getAllStorageClasses --> func_List + func_getAllStorageClasses --> func_Error +``` + +#### Functions calling `getAllStorageClasses` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getAllStorageClasses +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getAllStorageClasses +import ( + "context" + storagev1typed "k8s.io/client-go/kubernetes/typed/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func example(client storagev1typed.StorageV1Interface) { + classes, err := getAllStorageClasses(client) + if err != nil { + // handle error + return + } + for _, sc := range classes { + fmt.Println("StorageClass:", sc.Name) + } +} +``` + +--- + +### getClusterCrdNames + +**getClusterCrdNames** - Queries the Kubernetes API for every `CustomResourceDefinition` (CRD) in the cluster and returns them as a slice of pointers. + +#### Signature (Go) + +```go +func getClusterCrdNames() ([]*apiextv1.CustomResourceDefinition, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Queries the Kubernetes API for every `CustomResourceDefinition` (CRD) in the cluster and returns them as a slice of pointers. | +| **Parameters** | None | +| **Return value** | `([]*apiextv1.CustomResourceDefinition, error)` – on success a slice containing one pointer per CRD; on failure an error describing the issue. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` to obtain a shared client holder
• `oc.APIExtClient.ApiextensionsV1().CustomResourceDefinitions().List` for the API call
• `context.TODO()` and `metav1.ListOptions{}` for request context
• `fmt.Errorf` for error wrapping | +| **Side effects** | No mutation of external state; performs read‑only network I/O to the Kubernetes API. | +| **How it fits the package** | Used by `DoAutoDiscover` to gather all cluster‑wide CRDs, which are later filtered and processed for test discovery. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + getClusterCrdNames --> GetClientsHolder + GetClientsHolder --> APIExtClient + APIExtClient --> ListCRDs + ListCRDs --> BuildSlice + BuildSlice --> ReturnResults +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getClusterCrdNames --> clientsholder.GetClientsHolder + func_getClusterCrdNames --> fmt.Errorf + func_getClusterCrdNames --> metav1.ListOptions + func_getClusterCrdNames --> context.TODO +``` + +#### Functions calling `getClusterCrdNames` (Mermaid) + +```mermaid +graph TD + DoAutoDiscover --> getClusterCrdNames +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getClusterCrdNames +crds, err := autodiscover.getClusterCrdNames() +if err != nil { + log.Fatalf("Failed to retrieve CRDs: %v", err) +} +for _, crd := range crds { + fmt.Println(crd.Name) +} +``` + +--- + +### getClusterRoleBindings + +**getClusterRoleBindings** - Fetches every `ClusterRoleBinding` object present in the Kubernetes cluster. These bindings are non‑namespaced and apply cluster‑wide. + +#### 1) Signature (Go) + +```go +func getClusterRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.ClusterRoleBinding, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Fetches every `ClusterRoleBinding` object present in the Kubernetes cluster. These bindings are non‑namespaced and apply cluster‑wide. | +| **Parameters** | `client rbacv1typed.RbacV1Interface` – typed client used to query RBAC resources. | +| **Return value** | `([]rbacv1.ClusterRoleBinding, error)` – a slice of the retrieved bindings or an error if the API call fails. | +| **Key dependencies** | • `client.ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{})`
• `log.Error(msg string, args ...any)` | +| **Side effects** | None beyond network I/O to the Kubernetes API; logs an error on failure. | +| **How it fits the package** | Part of autodiscovery: collects cluster‑wide RBAC data that is later aggregated into `DiscoveredTestData`. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Call List on ClusterRoleBindings"} + B -- Success --> C["Return Items"] + B -- Failure --> D["Log Error & Return nil, err"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_getClusterRoleBindings --> func_List + func_getClusterRoleBindings --> func_ClusterRoleBindings + func_getClusterRoleBindings --> pkg_context_TODO + func_getClusterRoleBindings --> func_Log.Error +``` + +#### 5) Functions calling `getClusterRoleBindings` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getClusterRoleBindings +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getClusterRoleBindings +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + rbacv1typed "k8s.io/client-go/kubernetes/typed/rbac/v1" +) + +func example(client rbacv1typed.RbacV1Interface) { + crbs, err := autodiscover.getClusterRoleBindings(client) + if err != nil { + // handle error + } + fmt.Printf("Found %d cluster role bindings\n", len(crbs)) +} +``` + +--- + +--- + +### getCrScaleObjects + +**getCrScaleObjects** - For each custom resource in `crs`, fetch its corresponding Scale subresource via the Kubernetes Scaling API and return a slice of `ScaleObject` structs that bundle the retrieved scale data with its GroupResource schema. + +#### 1) Signature (Go) + +```go +func getCrScaleObjects(crs []unstructured.Unstructured, crd *apiextv1.CustomResourceDefinition) []ScaleObject +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | For each custom resource in `crs`, fetch its corresponding Scale subresource via the Kubernetes Scaling API and return a slice of `ScaleObject` structs that bundle the retrieved scale data with its GroupResource schema. | +| **Parameters** | *`crs []unstructured.Unstructured` – raw CR objects to process.
*`crd *apiextv1.CustomResourceDefinition` – definition of the CRD owning those resources. | +| **Return value** | `[]ScaleObject` – slice containing a `ScaleObject` for every successfully fetched scale. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – provides access to Kubernetes clients.
• `clients.ScalingClient.Scales(namespace).Get(...)` – retrieves the Scale subresource.
• `cr.GetName()`, `cr.GetNamespace()` – extract metadata from each CR. | +| **Side effects** | On failure to retrieve a scale, logs a fatal error and terminates the process via `log.Fatal`. No other external state is mutated. | +| **How it fits the package** | This helper underpins higher‑level discovery logic in `autodiscover`, enabling the system to understand scaling characteristics of custom resources across namespaces. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over crs"} + B --> C["Create GroupResource schema"] + C --> D["Get CR name & namespace"] + D --> E["Call ScalingClient.Scales(namespace).Get(...)"] + E -- success --> F["Append ScaleObject to slice"] + E -- failure --> G["log.Fatal (terminate)"] + F --> B + G --> H["End"] +``` + +#### 4) Function dependencies + +```mermaid +graph TD + func_getCrScaleObjects --> func_GetClientsHolder + func_getCrScaleObjects --> func_Scales + func_getCrScaleObjects --> func_Get + func_getCrScaleObjects --> func_Logger_Fatal +``` + +#### 5) Functions calling `getCrScaleObjects` + +```mermaid +graph TD + func_GetScaleCrUnderTest --> func_getCrScaleObjects +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getCrScaleObjects +crs := []unstructured.Unstructured{ /* populated CRs */ } +crd := &apiextv1.CustomResourceDefinition{ /* populated CRD */ } + +scaleObjs := getCrScaleObjects(crs, crd) +fmt.Printf("Fetched %d scale objects\n", len(scaleObjs)) +``` + +--- + +--- + +### getHelmList + +**getHelmList** - Queries the Kubernetes cluster via Helm client to list all deployed releases per namespace and returns a mapping from namespace name to its release objects. + +#### Signature (Go) + +```go +func getHelmList(restConfig *rest.Config, namespaces []string) map[string][]*release.Release +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Queries the Kubernetes cluster via Helm client to list all deployed releases per namespace and returns a mapping from namespace name to its release objects. | +| **Parameters** | `restConfig *rest.Config` – REST configuration for accessing the cluster.
`namespaces []string` – Slice of namespace names to query. | +| **Return value** | `map[string][]*release.Release` – Map where each key is a namespace and the value is a slice of Helm releases installed in that namespace. | +| **Key dependencies** | • `github.com/mittwald/go-helm-client.NewClientFromRestConf`
• `panic` (used on client creation error)
• `helmclient.Client.ListDeployedReleases()` | +| **Side effects** | Creates a Helm client for each namespace; may panic if the client cannot be instantiated. No external I/O beyond cluster API calls. | +| **How it fits the package** | Used by `DoAutoDiscover` to populate the `HelmChartReleases` field of the discovery data, enabling analysis of operator deployments managed through Helm. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> IterateNamespaces + IterateNamespaces --> CreateOptions + CreateOptions --> NewClientFromRestConf + NewClientFromRestConf -->|"Success"| ListDeployedReleases + NewClientFromRestConf -->|"Error"| Panic + ListDeployedReleases --> StoreInMap + StoreInMap --> NextNamespace + NextNamespace --> End +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getHelmList --> func_NewClientFromRestConf + func_getHelmList --> func_ListDeployedReleases +``` + +#### Functions calling `getHelmList` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getHelmList +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getHelmList +import ( + "k8s.io/client-go/rest" + "github.com/mittwald/go-helm-client/release" +) + +func main() { + // Assume restConfig is already configured to point at the target cluster + var restConfig *rest.Config + + namespaces := []string{"default", "openshift-operators"} + + helmReleases := getHelmList(restConfig, namespaces) + + for ns, releases := range helmReleases { + fmt.Printf("Namespace %s has %d Helm releases\n", ns, len(releases)) + for _, r := range releases { + fmt.Println("-", r.Name) + } + } +} +``` + +--- + +--- + +### getNetworkAttachmentDefinitions + +**getNetworkAttachmentDefinitions** - Enumerates all `NetworkAttachmentDefinition` resources in the specified Kubernetes namespaces and aggregates them into a single slice. + +#### Signature (Go) + +```go +func getNetworkAttachmentDefinitions(client *clientsholder.ClientsHolder, namespaces []string) ([]nadClient.NetworkAttachmentDefinition, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Enumerates all `NetworkAttachmentDefinition` resources in the specified Kubernetes namespaces and aggregates them into a single slice. | +| **Parameters** | `client *clientsholder.ClientsHolder` – holds various Kubernetes clients.
`namespaces []string` – list of namespace names to query. | +| **Return value** | `([]nadClient.NetworkAttachmentDefinition, error)` – the collected definitions or an error if any request fails (excluding “not found” errors). | +| **Key dependencies** | • `client.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(ns).List`
• `context.TODO()`
• `metav1.ListOptions{}`
• `kerrors.IsNotFound(err)`
• Go slice `append` | +| **Side effects** | No state mutations or I/O beyond the Kubernetes API calls; returns data only. | +| **How it fits the package** | Provides network‑attachment data for autodiscovery, used by `DoAutoDiscover` to populate the `NetworkAttachmentDefinitions` field of `DiscoveredTestData`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over namespaces"} + B -->|"For each ns"| C["List NADs in ns"] + C --> D{"Check error"} + D -- "Error && !IsNotFound" --> E["Return nil, err"] + D -- "Else" --> F["Append items to nadList"] + F --> G["Next namespace or end loop"] + G --> H["Return nadList, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getNetworkAttachmentDefinitions --> func_List + func_getNetworkAttachmentDefinitions --> func_K8sCniCncfIoV1 + func_getNetworkAttachmentDefinitions --> func_Context_TODO + func_getNetworkAttachmentDefinitions --> func_Metav1_ListOptions + func_getNetworkAttachmentDefinitions --> func_IsNotFound + func_getNetworkAttachmentDefinitions --> func_append +``` + +#### Functions calling `getNetworkAttachmentDefinitions` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getNetworkAttachmentDefinitions +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getNetworkAttachmentDefinitions +client := clientsholder.GetClientsHolder() +namespaces := []string{"default", "kube-system"} + +nads, err := getNetworkAttachmentDefinitions(client, namespaces) +if err != nil { + log.Fatalf("Failed to retrieve NADs: %v", err) +} +fmt.Printf("Retrieved %d NetworkAttachmentDefinitions\n", len(nads)) +``` + +--- + +### getNetworkPolicies + +**getNetworkPolicies** - Queries the Kubernetes API for all `NetworkPolicy` objects across every namespace and returns them as a slice. + +#### Signature (Go) + +```go +func getNetworkPolicies(oc networkingv1client.NetworkingV1Interface) ([]networkingv1.NetworkPolicy, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Queries the Kubernetes API for all `NetworkPolicy` objects across every namespace and returns them as a slice. | +| **Parameters** | `oc networkingv1client.NetworkingV1Interface – A client interface to the Networking v1 API group, typically obtained from a kube‑config or in‑cluster configuration. | +| **Return value** | `([]networkingv1.NetworkPolicy, error)` – On success, a slice of all network policies; on failure, an empty slice and the encountered error. | +| **Key dependencies** | • `oc.NetworkPolicies("").List(context.TODO(), metav1.ListOptions{})`
• `context.TODO()`
• `metav1.ListOptions{}` | +| **Side effects** | None. The function only performs read‑only API calls; it does not modify cluster state or local variables beyond its return values. | +| **How it fits the package** | Part of the autodiscover module, it supplies network policy data used by higher‑level discovery logic (e.g., `DoAutoDiscover`) to assess networking constraints affecting test workloads. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["getNetworkPolicies"] --> B["Call oc.NetworkPolicies()"] + B --> C["List with context.TODO() & metav1.ListOptions{}"] + C --> D["Return nps.Items or error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getNetworkPolicies --> func_List + func_getNetworkPolicies --> func_NetworkPolicies + func_getNetworkPolicies --> func_TODO +``` + +#### Functions calling `getNetworkPolicies` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getNetworkPolicies +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getNetworkPolicies +package main + +import ( + "fmt" + + networkingv1client "k8s.io/client-go/kubernetes/typed/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + // Assume clientset is already created elsewhere: + // clientset, _ := kubernetes.NewForConfig(config) +) + +func main() { + var oc networkingv1client.NetworkingV1Interface = clientset.NetworkingV1() + policies, err := getNetworkPolicies(oc) + if err != nil { + fmt.Printf("Error retrieving network policies: %v\n", err) + return + } + fmt.Printf("Found %d network policies\n", len(policies)) +} +``` + +--- + +### getOpenshiftVersion + +**getOpenshiftVersion** - Fetches the OpenShift API server version by querying the `ClusterOperator` CRD for `openshift-apiserver`. If not found, returns a sentinel value indicating a non‑OpenShift cluster. + +#### Signature (Go) + +```go +func getOpenshiftVersion(oClient clientconfigv1.ConfigV1Interface) (ver string, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Fetches the OpenShift API server version by querying the `ClusterOperator` CRD for `openshift-apiserver`. If not found, returns a sentinel value indicating a non‑OpenShift cluster. | +| **Parameters** | `oClient clientconfigv1.ConfigV1Interface` – Kubernetes client capable of accessing OpenShift configuration APIs. | +| **Return value** | `ver string` – the detected OpenShift version or an empty string if not found.
`err error` – non‑nil on unexpected errors (e.g., network issues). | +| **Key dependencies** | • `oClient.ClusterOperators().Get(context.TODO(), "openshift-apiserver", metav1.GetOptions{})`
• `kerrors.IsNotFound(err)` from `k8s.io/apimachinery/pkg/api/errors`
• Logging via `log.Warn` and `log.Info`
• `errors.New` for error construction | +| **Side effects** | None beyond logging. No state mutation or I/O besides the API call. | +| **How it fits the package** | Used during auto‑discovery to populate the detected OpenShift version in the collected test data. It is invoked by `DoAutoDiscover`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Get ClusterOperator openshift-apiserver"] --> B{"Error?"} + B -- Yes --> C{"IsNotFound?"} + C -- Yes --> D["Return NonOpenshiftClusterVersion"] + C -- No --> E["Return err"] + B -- No --> F["Iterate clusterOperator.Status.Versions"] + F --> G{"ver.Name == tnfCsvTargetLabelName"} + G -- Yes --> H["Log version, return ver.Version"] + G -- No --> I["Continue loop"] + I --> F + F --> J{"Found?"} + J -- No --> K["Return error: could not get openshift version"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getOpenshiftVersion --> func_Get + func_getOpenshiftVersion --> func_ClusterOperators + func_getOpenshiftVersion --> func_IsNotFound + func_getOpenshiftVersion --> func_Warn + func_getOpenshiftVersion --> func_Info + func_getOpenshiftVersion --> func_New +``` + +#### Functions calling `getOpenshiftVersion` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getOpenshiftVersion +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getOpenshiftVersion +package main + +import ( + "fmt" + configv1 "k8s.io/client-go/kubernetes/typed/config/v1" +) + +func main() { + // Assume oClient is a configured ConfigV1Interface + var oClient configv1.ConfigV1Interface + + version, err := getOpenshiftVersion(oClient) + if err != nil { + fmt.Printf("Error retrieving OpenShift version: %v\n", err) + return + } + fmt.Printf("OpenShift version: %s\n", version) +} +``` + +--- + +### getOperandPodsFromTestCsvs + +**getOperandPodsFromTestCsvs** - Filters a pod list to those whose top‑level owner CR is managed by any of the supplied test CSVs. + +#### 1) Signature (Go) + +```go +func getOperandPodsFromTestCsvs([]*olmv1Alpha.ClusterServiceVersion, []corev1.Pod)([]*corev1.Pod, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Filters a pod list to those whose top‑level owner CR is managed by any of the supplied test CSVs. | +| **Parameters** | `testCsvs []*olmv1Alpha.ClusterServiceVersion` – operator definitions;
`pods []corev1.Pod` – all candidate pods | +| **Return value** | `[]*corev1.Pod, error` – selected operand pods or an error if owner resolution fails | +| **Key dependencies** | *strings.Cut*, *path.Join*, *log.Info*, *podhelper.GetPodTopOwner*, *fmt.Errorf* | +| **Side effects** | Emits informational logs; no state mutation outside the function. | +| **How it fits the package** | Used by `DoAutoDiscover` to identify pods that belong to operators under test, enabling further analysis of operator‑managed workloads. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Build CRD map"} + B --> C{"For each CSV"} + C --> D["Extract OwnedCRDs"] + D --> E["Parse name → group, version"] + E --> F["Store in crds map"] + F --> G{"Iterate pods"} + G --> H["Get top owners via podhelper"] + H --> I{"For each owner"} + I --> J["Lookup CSV by CRD path"] + J -- Not found --> K["Continue to next owner"] + J -- Found --> L["Log match, add pod"] + L --> M["Break owner loop"] + M --> N["Next pod"] + N --> O["Return operandPods"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_getOperandPodsFromTestCsvs --> strings.Cut + func_getOperandPodsFromTestCsvs --> fmt.Errorf + func_getOperandPodsFromTestCsvs --> path.Join + func_getOperandPodsFromTestCsvs --> log.Info + func_getOperandPodsFromTestCsvs --> podhelper.GetPodTopOwner +``` + +#### 5) Functions calling `getOperandPodsFromTestCsvs` (Mermaid) + +```mermaid +graph TD + DoAutoDiscover --> getOperandPodsFromTestCsvs +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getOperandPodsFromTestCsvs +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + olmv1Alpha "github.com/operator-framework/api/pkg/operators/v1alpha1" + corev1 "k8s.io/api/core/v1" +) + +func example() { + // Assume csvs and pods are populated elsewhere + var csvs []*olmv1Alpha.ClusterServiceVersion + var allPods []corev1.Pod + + operandPods, err := autodiscover.getOperandPodsFromTestCsvs(csvs, allPods) + if err != nil { + log.Fatalf("Failed to get operand pods: %v", err) + } + fmt.Printf("Found %d operand pods\n", len(operandPods)) +} +``` + +--- + +### getOperatorCsvPods + +**getOperatorCsvPods** - For each ClusterServiceVersion (CSV), fetch the namespace where its operator runs and gather all pods owned by that CSV. Returns a map keyed by `types.NamespacedName` of the CSV to the list of managed pods. + +#### Signature (Go) + +```go +func getOperatorCsvPods(csvList []*olmv1Alpha.ClusterServiceVersion) (map[types.NamespacedName][]*corev1.Pod, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | For each ClusterServiceVersion (CSV), fetch the namespace where its operator runs and gather all pods owned by that CSV. Returns a map keyed by `types.NamespacedName` of the CSV to the list of managed pods. | +| **Parameters** | `csvList []*olmv1Alpha.ClusterServiceVersion` – slice of CSV objects to process. | +| **Return value** | `map[types.NamespacedName][]*corev1.Pod, error` – mapping from CSV identifiers to their operator pods; non‑nil error if any lookup fails. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – obtain Kubernetes client set.
• `strings.TrimSpace` – sanitize namespace annotation.
• `getPodsOwnedByCsv(csvName, ns, client)` – list pods owned by the CSV.
• `fmt.Errorf` – error formatting. | +| **Side effects** | No global state mutation; only network calls to the Kubernetes API via the clients holder. | +| **How it fits the package** | Used during auto‑discovery (`DoAutoDiscover`) to associate each operator’s CSV with its controller pods, enabling later analysis of operator behavior and health. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["getOperatorCsvPods"] --> B["GetClientsHolder"] + A --> C{"Iterate csvList"} + C --> D["Read nsAnnotation from csv"] + D --> E["TrimSpace(ns)"] + E --> F["getPodsOwnedByCsv(csv.Name, trimmedNS, client)"] + F --> G["Add to map with key csvNamespacedName"] + G --> H["Return map"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getOperatorCsvPods --> func_GetClientsHolder + func_getOperatorCsvPods --> func_getPodsOwnedByCsv + func_getOperatorCsvPods --> func_strings.TrimSpace + func_getOperatorCsvPods --> func_fmt.Errorf +``` + +#### Functions calling `getOperatorCsvPods` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getOperatorCsvPods +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getOperatorCsvPods +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + olmv1Alpha "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func example() error { + // Assume csvList is obtained elsewhere + var csvList []*olmv1Alpha.ClusterServiceVersion + + podMap, err := autodiscover.GetOperatorCsvPods(csvList) + if err != nil { + return err + } + + for csvKey, pods := range podMap { + fmt.Printf("CSV %s/%s has %d operator pods\n", + csvKey.Namespace, csvKey.Name, len(pods)) + } + return nil +} +``` + +--- + +### getPersistentVolumeClaims + +**getPersistentVolumeClaims** - Queries the Kubernetes API for every PersistentVolumeClaim (PVC) in all namespaces and returns them as a slice. + +```go +func getPersistentVolumeClaims(oc corev1client.CoreV1Interface) ([]corev1.PersistentVolumeClaim, error) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Queries the Kubernetes API for every PersistentVolumeClaim (PVC) in all namespaces and returns them as a slice. | +| **Parameters** | `oc corev1client.CoreV1Interface` – A typed client that can access Core V1 resources. | +| **Return value** | `([]corev1.PersistentVolumeClaim, error)` – The list of PVCs or an error if the API call fails. | +| **Key dependencies** | • `oc.PersistentVolumeClaims("").List(context.TODO(), metav1.ListOptions{})`
• `context.TODO()`
• `metav1.ListOptions{}` | +| **Side effects** | None. The function only performs a read‑only API call and returns data. | +| **How it fits the package** | It is used by `DoAutoDiscover` to populate the `PersistentVolumeClaims` field of the discovered test data structure, enabling downstream analysis of storage usage. | + +#### Internal workflow + +```mermaid +flowchart TD + A["getPersistentVolumeClaims"] --> B["Call oc.PersistentVolumeClaims().List"] + B --> C["Return pvcs.Items or error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_getPersistentVolumeClaims --> func_List + func_getPersistentVolumeClaims --> func_PersistentVolumeClaims + func_getPersistentVolumeClaims --> func_TODO +``` + +#### Functions calling `getPersistentVolumeClaims` + +```mermaid +graph TD + func_DoAutoDiscover --> func_getPersistentVolumeClaims +``` + +#### Usage example + +```go +// Minimal example invoking getPersistentVolumeClaims +package main + +import ( + "fmt" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" +) + +func main() { + var client corev1client.CoreV1Interface // Assume this is initialized elsewhere + + pvcs, err := autodiscover.getPersistentVolumeClaims(client) + if err != nil { + fmt.Printf("Error retrieving PVCs: %v\n", err) + return + } + + fmt.Printf("Found %d PersistentVolumeClaims\n", len(pvcs)) +} +``` + +--- + +--- + +### getPersistentVolumes + +**getPersistentVolumes** - Queries the Kubernetes API for all PersistentVolume resources and returns them as a slice. + +#### Signature (Go) + +```go +func getPersistentVolumes(oc corev1client.CoreV1Interface) ([]corev1.PersistentVolume, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Queries the Kubernetes API for all PersistentVolume resources and returns them as a slice. | +| **Parameters** | `oc` – A CoreV1 client interface used to access core Kubernetes resources. | +| **Return value** | `<[]corev1.PersistentVolume>` – List of PV objects; `` if the API call fails. | +| **Key dependencies** | - `oc.PersistentVolumes().List(context.TODO(), metav1.ListOptions{})`
- `context` package
- `metav1` package | +| **Side effects** | No state mutation; performs a read‑only API request. | +| **How it fits the package** | Provides volume data for autodiscovery, enabling the suite to analyze storage resources in the cluster. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["getPersistentVolumes"] --> B["Create context.TODO()"] + B --> C["List PersistentVolumes via CoreV1 client"] + C --> D{"Error?"} + D -- Yes --> E["Return nil, err"] + D -- No --> F["Return pvs.Items, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + getPersVol --> CoreV1Interface + getPersVol --> ContextTODO + getPersVol --> ListOptions +``` + +#### Functions calling `getPersistentVolumes` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getPersistentVolumes +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getPersistentVolumes +import ( + "fmt" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +func example(oc corev1client.CoreV1Interface) { + pvs, err := getPersistentVolumes(oc) + if err != nil { + fmt.Printf("Failed to list PVs: %v\n", err) + return + } + fmt.Printf("Found %d PersistentVolumes\n", len(pvs)) +} +``` + +--- + +### getPodDisruptionBudgets + +**getPodDisruptionBudgets** - Gathers all `PodDisruptionBudget` resources across the supplied namespaces and returns them as a single slice. + +#### Signature (Go) + +```go +func getPodDisruptionBudgets(oc policyv1client.PolicyV1Interface, namespaces []string) ([]policyv1.PodDisruptionBudget, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Gathers all `PodDisruptionBudget` resources across the supplied namespaces and returns them as a single slice. | +| **Parameters** | `oc policyv1client.PolicyV1Interface` – Kubernetes Policy V1 client used to query PDBs.
`namespaces []string` – List of namespace names to search for PDBs. | +| **Return value** | `([]policyv1.PodDisruptionBudget, error)` – A slice containing all found PDB objects; an error if any list operation fails. | +| **Key dependencies** | • `oc.PodDisruptionBudgets(ns).List(context.TODO(), metav1.ListOptions{})`
• `append` to accumulate results | +| **Side effects** | No state is modified outside the function. It performs read‑only API calls and returns data. | +| **How it fits the package** | Supports autodiscovery by collecting cluster resources needed for test analysis; called from `DoAutoDiscover`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["getPodDisruptionBudgets"] --> B["Initialize empty slice"] + B --> C{"For each namespace"} + C -->|"Yes"| D["List PDBs via client"] + D --> E{"Error?"} + E -- Yes --> F["Return nil, error"] + E -- No --> G["Append retrieved items"] + G --> H["Continue loop"] + H --> I{"End loop?"} + I -- Yes --> J["Return accumulated slice, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getPodDisruptionBudgets --> func_List + func_getPodDisruptionBudgets --> func_PodDisruptionBudgets + func_getPodDisruptionBudgets --> func_append + func_getPodDisruptionBudgets --> context_TODO +``` + +#### Functions calling `getPodDisruptionBudgets` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getPodDisruptionBudgets +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getPodDisruptionBudgets +package main + +import ( + "context" + policyv1client "k8s.io/client-go/kubernetes/typed/policy/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func main() { + var client policyv1client.PolicyV1Interface // initialized elsewhere + namespaces := []string{"default", "kube-system"} + + pdbs, err := getPodDisruptionBudgets(client, namespaces) + if err != nil { + panic(err) + } + + for _, pdb := range pdbs { + println("PDB:", pdb.Name, "in namespace", pdb.Namespace) + } +} +``` + +--- + +### getPodsOwnedByCsv + +**getPodsOwnedByCsv** - Returns all Pods in `operatorNamespace` whose top‑level owner is the CSV named `csvName`. These are typically operator/controller Pods. + +#### Signature (Go) + +```go +func getPodsOwnedByCsv(csvName, operatorNamespace string, client *clientsholder.ClientsHolder) ([]*corev1.Pod, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns all Pods in `operatorNamespace` whose top‑level owner is the CSV named `csvName`. These are typically operator/controller Pods. | +| **Parameters** | *`csvName string` – name of the target ClusterServiceVersion.
*`operatorNamespace string` – namespace where the operator’s Pods run.
*`client *clientsholder.ClientsHolder` – client holder providing access to Kubernetes APIs. | +| **Return value** | `([]*corev1.Pod, error)` – slice of matching Pod pointers or an error if listing or owner resolution fails. | +| **Key dependencies** | • `client.K8sClient.CoreV1().Pods(...).List`
• `podhelper.GetPodTopOwner`
• `fmt.Errorf`
• `append` | +| **Side effects** | None; only reads from the cluster and allocates local data structures. | +| **How it fits the package** | Used by `getOperatorCsvPods` to build a mapping of CSVs to their operator Pods during autodiscovery. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["List all Pods in operatorNamespace"] + B --> C{"For each Pod"} + C --> D["Get top owners via podhelper.GetPodTopOwner"] + D --> E{"Does owner match csvName & kind CSV?"} + E -- Yes --> F["Add Pod to result slice"] + E -- No --> G["Continue loop"] + G --> C + F --> H["Return result"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getPodsOwnedByCsv --> func_List + func_getPodsOwnedByCsv --> func_GetPodTopOwner + func_getPodsOwnedByCsv --> func_Errorf + func_getPodsOwnedByCsv --> func_append +``` + +#### Functions calling `getPodsOwnedByCsv` (Mermaid) + +```mermaid +graph TD + func_getOperatorCsvPods --> func_getPodsOwnedByCsv +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getPodsOwnedByCsv +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/clientsholder" +) + +func demo() error { + client := clientsholder.GetClientsHolder() + pods, err := autodiscover.getPodsOwnedByCsv("my-operator.v1", "operators-ns", client) + if err != nil { + return err + } + // use pods slice... + _ = pods + return nil +} +``` + +--- + +### getResourceQuotas + +**getResourceQuotas** - Queries Kubernetes for every `ResourceQuota` object across all namespaces and returns them as a slice. + +#### Signature + +```go +func getResourceQuotas(oc corev1client.CoreV1Interface) ([]corev1.ResourceQuota, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Queries Kubernetes for every `ResourceQuota` object across all namespaces and returns them as a slice. | +| **Parameters** | `oc corev1client.CoreV1Interface` – client capable of accessing Core V1 resources. | +| **Return value** | `([]corev1.ResourceQuota, error)` – the list of quotas or an error if the API call fails. | +| **Key dependencies** | • `oc.ResourceQuotas("").List(context.TODO(), metav1.ListOptions{})`
• `context.TODO()`
• `metav1.ListOptions{}` | +| **Side effects** | No state mutations; performs a read‑only API request. | +| **How it fits the package** | Provides the data needed by `DoAutoDiscover` to populate the `ResourceQuotaItems` field of the autodiscovery result. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Build ListOptions"} + B --> C["oc.ResourceQuotas().List(context.TODO(), metav1.ListOptions{})"] + C --> D{"Check error"} + D -- Yes --> E["Return nil, err"] + D -- No --> F["rql.Items"] + F --> G["Return items, nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_getResourceQuotas --> func_List + func_getResourceQuotas --> func_ResourceQuotas + func_getResourceQuotas --> func_TODO +``` + +#### Functions calling `getResourceQuotas` + +```mermaid +graph TD + func_DoAutoDiscover --> func_getResourceQuotas +``` + +#### Usage example + +```go +// Minimal example invoking getResourceQuotas +package main + +import ( + "fmt" + + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +func main() { + var client corev1client.CoreV1Interface // obtain from a real Kubernetes config + quotas, err := autodiscover.getResourceQuotas(client) + if err != nil { + fmt.Println("Error retrieving resource quotas:", err) + return + } + fmt.Printf("Found %d resource quotas\n", len(quotas)) +} +``` + +--- + +### getRoleBindings + +**getRoleBindings** - Collects every `RoleBinding` resource present in the Kubernetes cluster. + +#### Signature (Go) + +```go +func getRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.RoleBinding, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Collects every `RoleBinding` resource present in the Kubernetes cluster. | +| **Parameters** | `client` (rbacv1typed.RbacV1Interface) – typed client for the RBAC API group. | +| **Return value** | Slice of `rbacv1.RoleBinding` objects and an error if the list operation fails. | +| **Key dependencies** | - `client.RoleBindings("")` to target all namespaces.
- `List(context.TODO(), metav1.ListOptions{})` to fetch resources.
- `log.Error` for failure reporting. | +| **Side effects** | None (pure read‑only). Logs an error on failure but does not modify cluster state. | +| **How it fits the package** | Used by `DoAutoDiscover` to populate the discovered rolebinding data used in further analysis. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Call List on client.RoleBindings()"} + B --> C["Check for error"] + C -- Error --> D["log.Error & return nil, err"] + C -- Success --> E["Return roleList.Items"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_getRoleBindings --> func_List + func_getRoleBindings --> func_RoleBindings + func_getRoleBindings --> func_Context_TODO + func_getRoleBindings --> func_Log_Error +``` + +#### Functions calling `getRoleBindings` + +```mermaid +graph TD + func_DoAutoDiscover --> func_getRoleBindings +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getRoleBindings +package main + +import ( + "fmt" + rbacv1typed "k8s.io/client-go/kubernetes/typed/rbac/v1" + + // Assume kubeClient is a configured *kubernetes.Clientset +) + +func main() { + var client rbacv1typed.RbacV1Interface = kubeClient.RbacV1() + roleBindings, err := getRoleBindings(client) + if err != nil { + fmt.Printf("Failed to retrieve rolebindings: %v\n", err) + return + } + fmt.Printf("Found %d rolebindings\n", len(roleBindings)) +} +``` + +--- + +--- + +### getRoles + +**getRoles** - Enumerates every `Role` resource across all namespaces in a Kubernetes cluster. + +#### Signature (Go) + +```go +func getRoles(client rbacv1typed.RbacV1Interface) ([]rbacv1.Role, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Enumerates every `Role` resource across all namespaces in a Kubernetes cluster. | +| **Parameters** | `client` – an RbacV1 client interface used to query the API server. | +| **Return value** | A slice of `rbacv1.Role` objects and an error if the list operation fails. | +| **Key dependencies** | • `client.Roles("")` – request all namespaces
• `context.TODO()` – context for the API call
• `metav1.ListOptions{}` – default listing options
• `log.Error` – error logging | +| **Side effects** | None beyond API interaction and optional logging. No state mutation occurs within the function itself. | +| **How it fits the package** | Part of the autodiscover module’s data‑collection phase, gathering RBAC information to populate the discovered test data structure. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Call client.Roles()"} + B -->|"Success"| C["Return roleList.Items"] + B -->|"Error"| D["log.Error & return error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getRoles --> func_Roles + func_getRoles --> func_List + func_getRoles --> func_Error +``` + +- `func_Roles` – method on the RbacV1 client that scopes the request to a namespace; empty string selects all namespaces. +- `func_List` – performs the actual API call. +- `func_Error` – logs failures via `log.Error`. + +#### Functions calling `` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getRoles +``` + +The function is invoked during the autodiscovery process to populate the `Roles` field of the returned data structure. + +#### Usage example (Go) + +```go +// Minimal example invoking getRoles +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + rbacv1typed "k8s.io/client-go/kubernetes/typed/rbac/v1" +) + +func main() { + // Assume `client` is an initialized RbacV1Interface. + var client rbacv1typed.RbacV1Interface + roles, err := autodiscover.getRoles(client) + if err != nil { + panic(err) + } + fmt.Printf("Found %d Roles\n", len(roles)) +} +``` + +--- + +--- + +### getServiceAccounts + +**getServiceAccounts** - Retrieves all `ServiceAccount` objects from the provided list of Kubernetes namespaces and returns them as a slice. + +#### Signature (Go) + +```go +func getServiceAccounts(oc corev1client.CoreV1Interface, namespaces []string) ([]*corev1.ServiceAccount, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Retrieves all `ServiceAccount` objects from the provided list of Kubernetes namespaces and returns them as a slice. | +| **Parameters** | `oc corev1client.CoreV1Interface – client for Core V1 API`
`namespaces []string – names of namespaces to query` | +| **Return value** | `[]*corev1.ServiceAccount – collected service accounts`
`error – nil if all queries succeed, otherwise the first encountered error` | +| **Key dependencies** | • `oc.ServiceAccounts(ns).List(context.TODO(), metav1.ListOptions{})`
• `context.TODO()`
• `metav1.ListOptions{}`
• `append()` (slice manipulation) | +| **Side effects** | No state mutation; only network calls to the Kubernetes API. | +| **How it fits the package** | Used by `DoAutoDiscover` to gather service accounts for both target namespaces and all namespaces, enabling later analysis of roles and permissions. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over namespaces"} + B -->|"For each ns"| C["List ServiceAccounts in ns"] + C --> D{"Check error"} + D -->|"Err"| E["Return collected accounts, err"] + D -->|"OK"| F["Append items to result slice"] + F --> G["Continue loop"] + G -->|"All processed"| H["Return all accounts, nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_getServiceAccounts --> func_List + func_getServiceAccounts --> func_ServiceAccounts + func_getServiceAccounts --> context_TODO + func_getServiceAccounts --> append +``` + +#### Functions calling `getServiceAccounts` + +```mermaid +graph TD + func_DoAutoDiscover --> func_getServiceAccounts +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getServiceAccounts +import ( + "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +func main() { + // Assume kubeClient is a configured *kubernetes.Clientset + var kubeClient *kubernetes.Clientset + + oc := kubeClient.CoreV1() + namespaces := []string{"default", "openshift"} + + serviceAccounts, err := getServiceAccounts(oc, namespaces) + if err != nil { + log.Fatalf("Failed to fetch service accounts: %v", err) + } + + for _, sa := range serviceAccounts { + fmt.Printf("Found ServiceAccount: %s/%s\n", sa.Namespace, sa.Name) + } +} +``` + +--- + +### getServices + +**getServices** - Gathers all `Service` resources from the provided `namespaces`, excluding any whose names appear in `ignoreList`. Returns a slice of pointers to the services and an error if any namespace query fails. + +#### Signature (Go) + +```go +func getServices(oc corev1client.CoreV1Interface, namespaces, ignoreList []string) (allServices []*corev1.Service, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Gathers all `Service` resources from the provided `namespaces`, excluding any whose names appear in `ignoreList`. Returns a slice of pointers to the services and an error if any namespace query fails. | +| **Parameters** | `oc corev1client.CoreV1Interface` – client for Core V1 API;
`namespaces []string` – list of namespaces to scan;
`ignoreList []string` – names of services to skip | +| **Return value** | `[]*corev1.Service` – pointers to the collected Service objects;
`error` – nil on success or error from any namespace query | +| **Key dependencies** | • `oc.Services(ns).List(context.TODO(), metav1.ListOptions{})` – fetches services per namespace
• `stringhelper.StringInSlice(ignoreList, name, false)` – checks ignore list
• `append` – builds result slice | +| **Side effects** | None. The function only reads from the cluster and constructs an in‑memory slice. | +| **How it fits the package** | Used by `DoAutoDiscover` to populate the discovered services for the test suite, ensuring that user‑specified ignore patterns are respected. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"For each namespace"} + B --> C["Call oc.Services(ns).List(...)"] + C --> D{"Check error"} + D -- Yes --> E["Return allServices, err"] + D -- No --> F["Iterate over s.Items"] + F --> G{"If name in ignoreList?"} + G -- Yes --> H["Skip service"] + G -- No --> I["Append & address of item to allServices"] + I --> F + E --> Z["End with error"] + H --> F + Z --> End +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getServices --> func_List + func_getServices --> func_Services + func_getServices --> pkg_context_TODO + func_getServices --> pkg_metav1_ListOptions + func_getServices --> func_StringInSlice + func_getServices --> builtin_append +``` + +#### Functions calling `getServices` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getServices +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getServices +package main + +import ( + "fmt" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + autodiscover "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" +) + +func main() { + // Assume `oc` is a pre‑configured CoreV1Interface client + var oc corev1client.CoreV1Interface + + namespaces := []string{"default", "kube-system"} + ignoreList := []string{"kubernetes"} + + services, err := autodiscover.GetServices(oc, namespaces, ignoreList) + if err != nil { + fmt.Printf("Error retrieving services: %v\n", err) + return + } + + fmt.Printf("Found %d services:\n", len(services)) + for _, svc := range services { + fmt.Println(svc.Name) + } +} +``` + +> **Note**: In the actual package, `getServices` is unexported; callers import it via the package's public API (`DoAutoDiscover`). The example assumes an exported wrapper or uses reflection for demonstration. + +--- + +### getSriovNetworkNodePolicies + +**getSriovNetworkNodePolicies** - Enumerates all `SriovNetworkNodePolicy` resources across the supplied Kubernetes namespaces using a dynamic client. Returns the combined list or an error if any non‑NotFound issue occurs. + +#### Signature (Go) + +```go +func getSriovNetworkNodePolicies(client *clientsholder.ClientsHolder, namespaces []string) ([]unstructured.Unstructured, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Enumerates all `SriovNetworkNodePolicy` resources across the supplied Kubernetes namespaces using a dynamic client. Returns the combined list or an error if any non‑NotFound issue occurs. | +| **Parameters** | `client *clientsholder.ClientsHolder` – holds the dynamic client used for discovery.
`namespaces []string` – slice of namespace names to query. | +| **Return value** | `[]unstructured.Unstructured` – flattened list of all policies found.
`error` – non‑nil if a listing operation fails with an error other than “not found”. | +| **Key dependencies** | • `client.DynamicClient.Resource(SriovNetworkNodePolicyGVR).Namespace(ns).List(...)`
• `context.TODO()`
• `metav1.ListOptions{}`
• `kerrors.IsNotFound(err)` | +| **Side effects** | None. The function only performs read‑only API calls and returns data. | +| **How it fits the package** | Part of the autodiscover module’s collection routines, used by `DoAutoDiscover` to gather network‑policy information for further analysis. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Validate client"} + B -- valid --> C["Initialize empty list"] + B -- nil --> D["Return empty slice, no error"] + C --> E["Iterate over namespaces"] + E --> F["List policies via dynamic client"] + F --> G{"Error?"} + G -- IsNotFound --> H["Continue next namespace"] + G -- other --> I["Return nil, error"] + H --> J{"End of list?"} + J -- no --> E + J -- yes --> K["Return aggregated list"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getSriovNetworkNodePolicies --> func_List + func_getSriovNetworkNodePolicies --> func_Namespace + func_getSriovNetworkNodePolicies --> func_Resource + func_getSriovNetworkNodePolicies --> func_TODO + func_getSriovNetworkNodePolicies --> func_IsNotFound + func_getSriovNetworkNodePolicies --> func_append +``` + +#### Functions calling `getSriovNetworkNodePolicies` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getSriovNetworkNodePolicies +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getSriovNetworkNodePolicies +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/clientsholder" +) + +func main() { + holder := clientsholder.GetClientsHolder() + namespaces := []string{"default", "kube-system"} + + policies, err := autodiscover.getSriovNetworkNodePolicies(holder, namespaces) + if err != nil { + log.Fatalf("Failed to get Sriov Network Node Policies: %v", err) + } + + fmt.Printf("Found %d policies\n", len(policies)) +} +``` + +--- + +### getSriovNetworks + +**getSriovNetworks** - Enumerates all `SriovNetwork` custom resources across the provided list of Kubernetes namespaces. Returns a slice of unstructured objects representing each network or an error if any request fails. + +#### Signature (Go) + +```go +func getSriovNetworks(client *clientsholder.ClientsHolder, namespaces []string) ([]unstructured.Unstructured, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Enumerates all `SriovNetwork` custom resources across the provided list of Kubernetes namespaces. Returns a slice of unstructured objects representing each network or an error if any request fails. | +| **Parameters** | `client *clientsholder.ClientsHolder` – holds a dynamic client for API calls.
`namespaces []string` – target namespace names to search. | +| **Return value** | `[]unstructured.Unstructured` – collected SriovNetwork objects.
`error` – non‑nil if any list operation fails (excluding “not found” errors). | +| **Key dependencies** | • `client.DynamicClient.Resource(SriovNetworkGVR)`
• `Namespace(ns).List(context.TODO(), metav1.ListOptions{})`
• `kerrors.IsNotFound(err)`
• Standard packages: `context`, `metav1`, `unstructured`. | +| **Side effects** | None – purely read‑only interactions with the Kubernetes API. | +| **How it fits the package** | Provides network discovery for SriovNetworks, used by higher‑level autodiscover logic to populate `Data.SriovNetworks` and `Data.AllSriovNetworks`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"client or DynamicClient nil?"} + B -- Yes --> C["Return empty slice, nil"] + B -- No --> D["Initialize sriovNetworkList"] + D --> E["Loop over namespaces"] + E --> F["List SriovNetworks in ns"] + F --> G{"Error && not IsNotFound?"} + G -- Yes --> H["Return nil, err"] + G -- No --> I{"snl != nil?"} + I -- Yes --> J["Append snl.Items to list"] + I -- No --> K["Continue loop"] + J --> K + K --> L["End loop"] + L --> M["Return sriovNetworkList, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getSriovNetworks --> func_List + func_getSriovNetworks --> func_Namespace + func_getSriovNetworks --> func_Resource + func_getSriovNetworks --> func_IsNotFound + func_getSriovNetworks --> func_append +``` + +#### Functions calling `getSriovNetworks` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_getSriovNetworks +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getSriovNetworks +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + "k8s.io/client-go/dynamic" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func main() { + // Assume dynamicClient is already created and configured + var dynamicClient dynamic.Interface + + clientHolder := &autodiscover.ClientsHolder{ + DynamicClient: dynamicClient, + } + + namespaces := []string{"default", "openshift-sriov-network"} + sriovs, err := getSriovNetworks(clientHolder, namespaces) + if err != nil { + fmt.Printf("Error retrieving SriovNetworks: %v\n", err) + return + } + fmt.Printf("Found %d SriovNetwork(s)\n", len(sriovs)) +} +``` + +--- + +### isDeploymentsPodsMatchingAtLeastOneLabel + +**isDeploymentsPodsMatchingAtLeastOneLabel** - Checks whether the pod template of a given Deployment has a label that matches at least one label supplied in `labels`. If a match is found, the function returns `true`; otherwise it returns `false`. + +#### Signature (Go) + +```go +func([]labelObject, string, *appsv1.Deployment)(bool) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the pod template of a given Deployment has a label that matches at least one label supplied in `labels`. If a match is found, the function returns `true`; otherwise it returns `false`. | +| **Parameters** | `labels []labelObject` – list of key/value pairs to test against.
`namespace string` – namespace where the Deployment resides (used only for logging).
`deployment *appsv1.Deployment` – the Deployment whose pod template labels are examined. | +| **Return value** | `bool` – `true` if any label matches, otherwise `false`. | +| **Key dependencies** | • `log.Debug` – logs each comparison attempt.
• `log.Info` – records when a match is found. | +| **Side effects** | None beyond logging; does not modify the Deployment or labels. | +| **How it fits the package** | Used by `findDeploymentsByLabels` to filter deployments in specified namespaces so that only those whose pods satisfy at least one label are returned for further processing. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph "Check each supplied label" + A["Start"] --> B{"For each labelObject"} + B --> C["label matches?"] + C -- Yes --> D["Log match, return true"] + C -- No --> E["Continue loop"] + E --> B + B --> F["No labels matched, return false"] + end +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_isDeploymentsPodsMatchingAtLeastOneLabel --> func_Log.Debug + func_isDeploymentsPodsMatchingAtLeastOneLabel --> func_Log.Info +``` + +#### Functions calling `isDeploymentsPodsMatchingAtLeastOneLabel` (Mermaid) + +```mermaid +graph TD + func_findDeploymentsByLabels --> func_isDeploymentsPodsMatchingAtLeastOneLabel +``` + +#### Usage example (Go) + +```go +// Minimal example invoking isDeploymentsPodsMatchingAtLeastOneLabel +labels := []labelObject{ + {LabelKey: "app", LabelValue: "web"}, +} +deployment := &appsv1.Deployment{ /* populated elsewhere */ } +namespace := "default" + +matched := isDeploymentsPodsMatchingAtLeastOneLabel(labels, namespace, deployment) +fmt.Printf("Deployment matches at least one label: %v\n", matched) +``` + +--- + +--- + +### isIstioServiceMeshInstalled + +**isIstioServiceMeshInstalled** - Determines whether the Istio service mesh is installed in a cluster by verifying the presence of the `istio-system` namespace and the `istiod` deployment. + +#### Signature (Go) + +```go +func(isIstioServiceMeshInstalled)(appv1client.AppsV1Interface, []string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether the Istio service mesh is installed in a cluster by verifying the presence of the `istio-system` namespace and the `istiod` deployment. | +| **Parameters** | `appClient appv1client.AppsV1Interface` – client for accessing Apps‑V1 resources.
`allNs []string` – list of all namespace names in the cluster. | +| **Return value** | `bool` – `true` if both the namespace and deployment exist; otherwise `false`. | +| **Key dependencies** | • `github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper.StringInSlice`
• `github.com/redhat-best-practices-for-k8s/certsuite/internal/log` (`Info`, `Warn`, `Error`)
• `k8s.io/apimachinery/pkg/api/errors.IsNotFound` | +| **Side effects** | Emits informational, warning, or error logs; no state mutations. | +| **How it fits the package** | Used by `autodiscover.DoAutoDiscover` to flag whether Istio is present when collecting cluster metadata. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check if istio-system namespace exists"] --> B{"Namespace found?"} + B -- No --> C["Log info, return false"] + B -- Yes --> D["Get Deployment istiod"] + D --> E{"Error?"} + E -- IsNotFound --> F["Log warning, return false"] + E -- Other error --> G["Log error, return false"] + E -- None --> H["Log info, return true"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_isIstioServiceMeshInstalled --> stringhelper_StringInSlice + func_isIstioServiceMeshInstalled --> log_Info + func_isIstioServiceMeshInstalled --> appClient_Deployments_Get + func_isIstioServiceMeshInstalled --> errors_IsNotFound + func_isIstioServiceMeshInstalled --> log_Warn + func_isIstioServiceMeshInstalled --> log_Error +``` + +#### Functions calling `isIstioServiceMeshInstalled` (Mermaid) + +```mermaid +graph TD + autodiscover_DoAutoDiscover --> func_isIstioServiceMeshInstalled +``` + +#### Usage example (Go) + +```go +// Minimal example invoking isIstioServiceMeshInstalled +import ( + appv1 "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +func checkIstio(client appv1.AppsV1Interface, namespaces []string) bool { + return isIstioServiceMeshInstalled(client, namespaces) +} +``` + +--- + +### isStatefulSetsMatchingAtLeastOneLabel + +**isStatefulSetsMatchingAtLeastOneLabel** - Checks whether the pod template of a given StatefulSet contains at least one label that matches any key/value pair supplied in `labels`. Returns `true` on first match. + +#### Signature (Go) + +```go +func isStatefulSetsMatchingAtLeastOneLabel(labels []labelObject, namespace string, statefulSet *appsv1.StatefulSet) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the pod template of a given StatefulSet contains at least one label that matches any key/value pair supplied in `labels`. Returns `true` on first match. | +| **Parameters** | • `labels []labelObject` – slice of label objects containing `LabelKey` and `LabelValue`.
• `namespace string` – namespace where the StatefulSet resides (used only for logging).
• `statefulSet *appsv1.StatefulSet` – pointer to the StatefulSet to inspect. | +| **Return value** | `bool` – `true` if any label matches; otherwise `false`. | +| **Key dependencies** | • `log.Debug` – logs search attempts.
• `log.Info` – records a successful match. | +| **Side effects** | No state mutation; only emits log messages. | +| **How it fits the package** | Used by `findStatefulSetsByLabels` to filter StatefulSets based on label criteria during auto‑discovery. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"labels empty?"} + B -- No --> C["Iterate over labels"] + C --> D{"Match found?"} + D -- Yes --> E["Log match & return true"] + D -- No --> F["Continue loop"] + F --> C + C --> G["End: return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_isStatefulSetsMatchingAtLeastOneLabel --> log.Debug + func_isStatefulSetsMatchingAtLeastOneLabel --> log.Info +``` + +#### Functions calling `isStatefulSetsMatchingAtLeastOneLabel` (Mermaid) + +```mermaid +graph TD + func_findStatefulSetsByLabels --> func_isStatefulSetsMatchingAtLeastOneLabel +``` + +#### Usage example (Go) + +```go +// Minimal example invoking isStatefulSetsMatchingAtLeastOneLabel + +import ( + appsv1 "k8s.io/api/apps/v1" +) + +// Assume labels, namespace and statefulSet are already defined. +labels := []labelObject{ + {LabelKey: "app", LabelValue: "my-app"}, +} +namespace := "default" + +var ss appsv1.StatefulSet +// ... populate ss ... + +match := isStatefulSetsMatchingAtLeastOneLabel(labels, namespace, &ss) +if match { + fmt.Println("StatefulSet matches at least one label.") +} else { + fmt.Println("No matching labels found.") +} +``` + +--- + +### namespacesListToStringList + +**namespacesListToStringList** - Extracts the `Name` field from each `configuration.Namespace` in a slice and returns a new slice of those names. + +#### Signature (Go) + +```go +func namespacesListToStringList(namespaceList []configuration.Namespace) (stringList []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Extracts the `Name` field from each `configuration.Namespace` in a slice and returns a new slice of those names. | +| **Parameters** | `namespaceList []configuration.Namespace` – input list of Namespace objects to be processed. | +| **Return value** | `stringList []string` – slice containing the name of every namespace from the input. | +| **Key dependencies** | • Calls the built‑in `append` function.
• Relies on the exported field `Name` of `configuration.Namespace`. | +| **Side effects** | No mutation of the input; only constructs and returns a new slice. | +| **How it fits the package** | Used by `DoAutoDiscover` to transform user‑supplied namespace specifications into plain string lists for downstream filtering logic. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over namespaceList"} + B -->|"for each ns"| C["Append ns.Name to stringList"] + C --> B + B --> D["Return stringList"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_namespacesListToStringList --> append +``` + +#### Functions calling `namespacesListToStringList` (Mermaid) + +```mermaid +graph TD + func_DoAutoDiscover --> func_namespacesListToStringList +``` + +#### Usage example (Go) + +```go +// Minimal example invoking namespacesListToStringList +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/autodiscover" + "github.com/redhat-best-practices-for-k8s/certsuite/configuration" +) + +func main() { + // Example namespace objects + nsList := []configuration.Namespace{ + {Name: "dev"}, + {Name: "prod"}, + } + + names := autodiscover.NamespacesListToStringList(nsList) + fmt.Println(names) // Output: [dev prod] +} +``` + +--- diff --git a/docs/pkg/certsuite/certsuite.md b/docs/pkg/certsuite/certsuite.md new file mode 100644 index 000000000..42642adb0 --- /dev/null +++ b/docs/pkg/certsuite/certsuite.md @@ -0,0 +1,537 @@ +# Package certsuite + +**Path**: `pkg/certsuite` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [LoadChecksDB](#loadchecksdb) + - [LoadInternalChecksDB](#loadinternalchecksdb) + - [Run](#run) + - [Shutdown](#shutdown) + - [Startup](#startup) +- [Local Functions](#local-functions) + - [getK8sClientsConfigFileNames](#getk8sclientsconfigfilenames) + +## Overview + +Orchestrates the complete CNF Certification Suite run—discovering target resources, executing checks, building claim artifacts, and optionally sending data to collectors or Red Hat Connect. + +### Key Features + +- Loads all internal test suites into a global database +- Discovers Kubernetes objects via label selectors and runs relevant checks +- Generates claim files, uploads results, and cleans temporary artefacts + +### Design Notes + +- Initialises global state (log file, client holder, label evaluator) before any run; errors terminate the process immediately +- Label filtering uses an expression evaluator to include/exclude checks dynamically +- Run returns a single error that aggregates all failures for downstream handling + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func LoadChecksDB(labelsExpr string)](#loadchecksdb) | Loads internal check definitions and, if the supplied label filter allows it, triggers the Pre‑Flight checks. | +| [func LoadInternalChecksDB()](#loadinternalchecksdb) | Calls `LoadChecks()` from every internal test‑suite package to register all checks in the global check database. | +| [func Run(labelsFilter, outputFolder string) error](#run) | Orchestrates a full run of the CNF Certification Suite: discovers target resources, executes checks, builds claim artifacts, optionally sends data to collectors or Red Hat Connect, and cleans up temporary files. | +| [func Shutdown()](#shutdown) | Closes the global log file and terminates the process with an error exit if closing fails. | +| [func Startup()](#startup) | Sets up global state for a stand‑alone run of Certsuite: creates log file, label evaluator, client holder, and loads checks. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func getK8sClientsConfigFileNames() []string](#getk8sclientsconfigfilenames) | Builds a slice of file paths that point to Kubernetes configuration files, prioritising the user‑supplied `kubeconfig` flag and falling back to the default location (`$HOME/.kube/config`) if it exists. | + +## Exported Functions + +### LoadChecksDB + +**LoadChecksDB** - Loads internal check definitions and, if the supplied label filter allows it, triggers the Pre‑Flight checks. + +Initialises the checks database and conditionally loads the Pre‑Flight test suite based on a label filter expression. + +```go +func LoadChecksDB(labelsExpr string) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Loads internal check definitions and, if the supplied label filter allows it, triggers the Pre‑Flight checks. | +| **Parameters** | `labelsExpr` (string) – A logical expression used to decide whether Pre‑Flight tests should run. | +| **Return value** | None. The function performs side effects only. | +| **Key dependencies** | • `LoadInternalChecksDB()`
• `preflight.ShouldRun(labelsExpr)`
• `preflight.LoadChecks()` | +| **Side effects** | *Modifies the global checks database by calling `LoadInternalChecksDB`.
* Conditionally invokes Pre‑Flight test loading, which registers checks in the same database.
* No I/O or concurrency is performed directly; logging and check registration happen inside called functions. | +| **How it fits the package** | It is a central bootstrap routine used by both the CLI startup flow (`Startup`) and the web server handler to prepare the test environment before executing any tests. | + +#### Internal workflow + +```mermaid +flowchart TD + A["LoadInternalChecksDB()"] --> B{"preflight.ShouldRun(labelsExpr)"} + B -- true --> C["preflight.LoadChecks()"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_LoadChecksDB --> func_LoadInternalChecksDB + func_LoadChecksDB --> preflight_ShouldRun + func_LoadChecksDB --> preflight_LoadChecks +``` + +#### Functions calling `LoadChecksDB` + +```mermaid +graph TD + certsuite_Startup --> func_LoadChecksDB + webserver_runHandler --> func_LoadChecksDB +``` + +#### Usage example + +```go +// Minimal example invoking LoadChecksDB +package main + +import "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite" + +func main() { + // The label filter expression can be empty to run all checks. + certsuite.LoadChecksDB("") +} +``` + +--- + +### LoadInternalChecksDB + +**LoadInternalChecksDB** - Calls `LoadChecks()` from every internal test‑suite package to register all checks in the global check database. + +Initialises all internal test check groups by invoking the load functions of each test suite package. + +--- + +#### Signature (Go) + +```go +func LoadInternalChecksDB() +``` + +--- + +#### Summary Table + +| Aspect | Details | +|-----------------|---------| +| **Purpose** | Calls `LoadChecks()` from every internal test‑suite package to register all checks in the global check database. | +| **Parameters** | None | +| **Return value**| None | +| **Key dependencies** |
  • accesscontrol.LoadChecks()
  • certification.LoadChecks()
  • lifecycle.LoadChecks()
  • manageability.LoadChecks()
  • networking.LoadChecks()
  • observability.LoadChecks()
  • performance.LoadChecks()
  • platform.LoadChecks()
  • operator.LoadChecks()
| +| **Side effects** | Registers checks globally; no I/O, concurrency or state mutation beyond the check database. | +| **How it fits the package** | Part of the `certsuite` package’s public API that prepares all tests before execution (called by `LoadChecksDB` and indirectly by command‑line utilities). | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["accesscontrol.LoadChecks()"] + B --> C["certification.LoadChecks()"] + C --> D["lifecycle.LoadChecks()"] + D --> E["manageability.LoadChecks()"] + E --> F["networking.LoadChecks()"] + F --> G["observability.LoadChecks()"] + G --> H["performance.LoadChecks()"] + H --> I["platform.LoadChecks()"] + I --> J["operator.LoadChecks()"] + J --> Z["End"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_LoadInternalChecksDB --> func_accesscontrol_LoadChecks + func_LoadInternalChecksDB --> func_certification_LoadChecks + func_LoadInternalChecksDB --> func_lifecycle_LoadChecks + func_LoadInternalChecksDB --> func_manageability_LoadChecks + func_LoadInternalChecksDB --> func_networking_LoadChecks + func_LoadInternalChecksDB --> func_observability_LoadChecks + func_LoadInternalChecksDB --> func_performance_LoadChecks + func_LoadInternalChecksDB --> func_platform_LoadChecks + func_LoadInternalChecksDB --> func_operator_LoadChecks +``` + +--- + +#### Functions calling `LoadInternalChecksDB` (Mermaid) + +```mermaid +graph TD + func_getMatchingTestIDs --> func_LoadInternalChecksDB + func_LoadChecksDB --> func_LoadInternalChecksDB +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking LoadInternalChecksDB +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite" +) + +func main() { + certsuite.LoadInternalChecksDB() + // At this point all checks are registered and can be queried or executed. +} +``` + +--- + +### Run + +**Run** - Orchestrates a full run of the CNF Certification Suite: discovers target resources, executes checks, builds claim artifacts, optionally sends data to collectors or Red Hat Connect, and cleans up temporary files. + +#### Signature (Go) + +```go +func Run(labelsFilter, outputFolder string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Orchestrates a full run of the CNF Certification Suite: discovers target resources, executes checks, builds claim artifacts, optionally sends data to collectors or Red Hat Connect, and cleans up temporary files. | +| **Parameters** | `labelsFilter string` – expression used to filter test cases by labels.
`outputFolder string` – directory where results (claim file, logs, artifacts) are written. | +| **Return value** | `error` – non‑nil if any step fails that prevents normal completion (e.g., failure to compress artifacts or send data). | +| **Key dependencies** | • `configuration.GetTestParameters()`
• `provider.GetTestEnvironment()`
• `checksdb.RunChecks()`
• `autodiscover.FindPodsByLabels()`, `autodiscover.CountPodsByStatus()`
• `claimhelper.NewClaimBuilder()`
• `results.CreateResultsWebFiles()`, `results.CompressResultsArtifacts()`
• `collector.SendClaimFileToCollector()`
• `results.GetCertIDFromConnectAPI()`, `results.SendResultsToConnectAPI()`
• Logging via `log.Logger` | +| **Side effects** | • Prints status to stdout.
• Writes log file, claim JSON/JUnit XML, HTML artifacts, and optional ZIP archive.
• May delete temporary ZIP or web files based on configuration.
• Sends HTTP requests to external services (collector, Red Hat Connect). | +| **How it fits the package** | Top‑level entry point for both CLI (`runTestSuite`) and web server (`runHandler`). Coordinates all major subsystems of the certsuite package. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Get test parameters"] --> B["Print discovery message"] + B --> C["Initialize environment"] + C --> D["Run checks (timeout)"] + D --> E["Record execution time"] + E --> F["Find pods & count states"] + F --> G["Build claim (JSON/JUnit)"] + G --> H{"Sanitize claim?"} + H -- Yes --> I["Sanitize file"] + H -- No --> J + I --> J + J --> K{"Send to collector?"} + K -- Yes --> L["Send claim via HTTP"] + K -- No --> M + L --> M + M --> N{"Create ZIP?"} + N -- Yes --> O["Compress artifacts"] + O --> P{"Send to Connect?"} + P -- Yes --> Q["Get CertID & upload ZIP"] + P -- No --> R + Q --> R + R --> S{"Delete ZIP if omitted?"} + S -- Yes --> T["Remove ZIP file"] + S -- No --> U + T --> V{"Delete web files?"} + V -- Yes --> W["Remove each web file"] + V -- No --> X + U --> X + X --> Y["Return nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Run --> func_GetTestParameters + func_Run --> func_Println + func_Run --> func_Print + func_Run --> func_GetTestEnvironment + func_Run --> func_Info + func_Run --> func_Now + func_Run --> func_RunChecks + func_Run --> func_FindPodsByLabels + func_Run --> func_CreateLabels + func_Run --> func_CountPodsByStatus + func_Run --> func_NewClaimBuilder + func_Run --> func_Build + func_Run --> func_ToJUnitXML + func_Run --> func_SanitizeClaimFile + func_Run --> func_SendClaimFileToCollector + func_Run --> func_CreateResultsWebFiles + func_Run --> func_CompressResultsArtifacts + func_Run --> func_GetCertIDFromConnectAPI + func_Run --> func_SendResultsToConnectAPI + func_Run --> func_Remove +``` + +#### Functions calling `Run` + +```mermaid +graph TD + func_runTestSuite --> func_Run +``` + +#### Usage example (Go) + +```go +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite" +) + +func main() { + labels := "app=web,component=backend" + outputDir := "./results" + + if err := certsuite.Run(labels, outputDir); err != nil { + log.Fatalf("Test suite failed: %v", err) + } +} +``` + +This example initiates a certification run that filters tests by the specified labels and writes all artifacts to `./results`. + +--- + +### Shutdown + +**Shutdown** - Closes the global log file and terminates the process with an error exit if closing fails. + +#### Signature (Go) + +```go +func Shutdown() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Closes the global log file and terminates the process with an error exit if closing fails. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | `log.CloseGlobalLogFile()` – closes the shared log file.
`fmt.Fprintf` – writes an error message to standard error.
`os.Exit(1)` – exits the process with status code 1. | +| **Side effects** | *Closes a global resource* (the log file).
*Prints an error and aborts the program* if the close operation fails. | +| **How it fits the package** | Acts as a cleanup routine called after running tests in stand‑alone mode, ensuring that logging resources are released before program termination. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start Shutdown"] --> B{"Close log file"} + B -- success --> C["End"] + B -- failure --> D["Print error to stderr"] + D --> E["Exit with status 1"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Shutdown --> func_CloseGlobalLogFile + func_Shutdown --> fmt.Fprintf + func_Shutdown --> os.Exit +``` + +#### Functions calling `Shutdown` + +```mermaid +graph TD + func_runTestSuite --> func_Shutdown +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Shutdown +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite" +) + +func main() { + // ... perform operations … + certsuite.Shutdown() +} +``` + +--- + +### Startup + +**Startup** - Sets up global state for a stand‑alone run of Certsuite: creates log file, label evaluator, client holder, and loads checks. + +#### Signature (Go) + +```go +func Startup() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Sets up global state for a stand‑alone run of Certsuite: creates log file, label evaluator, client holder, and loads checks. | +| **Parameters** | None | +| **Return value** | None – the function exits on fatal errors via `os.Exit`. | +| **Key dependencies** | - `configuration.GetTestParameters`
- `checksdb.InitLabelsExprEvaluator`
- `log.CreateGlobalLogFile`
- `clientsholder.GetClientsHolder`
- `LoadChecksDB`
- `versions.GitVersion`, `versions.ClaimFormatVersion`
- `cli.PrintBanner` | +| **Side effects** | • Writes to `os.Stderr` and may terminate the process with `os.Exit(1)`.
• Creates/overwrites a log file in the output directory.
• Populates global logger, client holder, and label evaluator.
• Prints diagnostic information to stdout. | +| **How it fits the package** | Called by the command‑line entry point when running Certsuite in stand‑alone mode; prepares all necessary resources before test execution begins. | + +#### Internal workflow + +```mermaid +flowchart TD + A["GetTestParameters"] --> B["InitLabelsExprEvaluator"] + B --> C{"Error?"} + C -->|"Yes"| D["Print error to stderr & Exit"] + C -->|"No"| E["CreateGlobalLogFile"] + E --> F{"Error?"} + F -->|"Yes"| G["Print error to stderr & Exit"] + F -->|"No"| H["Warn if no labels"] + H --> I["GetClientsHolder(getK8sClientsConfigFileNames())"] + I --> J["LoadChecksDB"] + J --> K["Log suite info"] + K --> L["Print banner and summary"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Startup --> func_GetTestParameters + func_Startup --> func_InitLabelsExprEvaluator + func_Startup --> func_CreateGlobalLogFile + func_Startup --> func_Warn + func_Startup --> func_GetClientsHolder + func_Startup --> func_LoadChecksDB + func_Startup --> func_GitVersion + func_Startup --> func_PrintBanner +``` + +#### Functions calling `Startup` + +```mermaid +graph TD + func_runTestSuite --> func_Startup +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Startup +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite" +) + +func main() { + certsuite.Startup() + // ... subsequent test run logic ... +} +``` + +--- + +## Local Functions + +### getK8sClientsConfigFileNames + +**getK8sClientsConfigFileNames** - Builds a slice of file paths that point to Kubernetes configuration files, prioritising the user‑supplied `kubeconfig` flag and falling back to the default location (`$HOME/.kube/config`) if it exists. + +#### 1) Signature (Go) + +```go +func getK8sClientsConfigFileNames() []string +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a slice of file paths that point to Kubernetes configuration files, prioritising the user‑supplied `kubeconfig` flag and falling back to the default location (`$HOME/.kube/config`) if it exists. | +| **Parameters** | None | +| **Return value** | A slice of strings containing zero or more absolute paths to kubeconfig files. | +| **Key dependencies** | • `github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration` – reads global test parameters.
• `os.Getenv`, `os.Stat` – access environment and file system.
• `path/filepath.Join` – construct default config path.
• `github.com/redhat-best-practices-for-k8s/certsuite/internal/log` – logs discovery steps. | +| **Side effects** | None beyond logging; does not modify global state or write files. | +| **How it fits the package** | Used during startup to populate the client holder with configuration file names, enabling subsequent Kubernetes interactions without hard‑coding paths. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Get global test params"} + B --> C["Initialize empty slice"] + C --> D{"Is Kubeconfig param set?"} + D -- Yes --> E["Append user‑supplied kubeconfig"] + D -- No --> F["Skip"] + E & F --> G["Read HOME env var"] + G --> H{"HOME defined?"} + H -- Yes --> I["Build default path `$HOME/.kube/config`"] + I --> J{"Does file exist?"} + J -- Exists --> K["Log existence; append to slice"] + J -- Not exists --> L["Log non‑existence"] + H -- No --> M["Skip default check"] + K & L & M --> N["Return slice"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_getK8sClientsConfigFileNames --> configuration.GetTestParameters + func_getK8sClientsConfigFileNames --> os.Getenv + func_getK8sClientsConfigFileNames --> filepath.Join + func_getK8sClientsConfigFileNames --> os.Stat + func_getK8sClientsConfigFileNames --> log.Info +``` + +#### 5) Functions calling `getK8sClientsConfigFileNames` (Mermaid) + +```mermaid +graph TD + func_Startup --> func_getK8sClientsConfigFileNames +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getK8sClientsConfigFileNames +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/certsuite" +) + +func main() { + configFiles := certsuite.GetKubeconfigPaths() + fmt.Println("Detected kubeconfig files:", configFiles) +} +``` + +*(Note: `GetKubeconfigPaths` is a public wrapper that internally calls `getK8sClientsConfigFileNames`.)* + +--- diff --git a/docs/pkg/checksdb/checksdb.md b/docs/pkg/checksdb/checksdb.md new file mode 100644 index 000000000..b6ae1113e --- /dev/null +++ b/docs/pkg/checksdb/checksdb.md @@ -0,0 +1,3764 @@ +# Package checksdb + +**Path**: `pkg/checksdb` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [Check](#check) + - [ChecksGroup](#checksgroup) +- [Exported Functions](#exported-functions) + - [Check.Abort](#check.abort) + - [Check.GetLogger](#check.getlogger) + - [Check.GetLogs](#check.getlogs) + - [Check.LogDebug](#check.logdebug) + - [Check.LogError](#check.logerror) + - [Check.LogFatal](#check.logfatal) + - [Check.LogInfo](#check.loginfo) + - [Check.LogWarn](#check.logwarn) + - [Check.Run](#check.run) + - [Check.SetAbortChan](#check.setabortchan) + - [Check.SetResult](#check.setresult) + - [Check.SetResultAborted](#check.setresultaborted) + - [Check.SetResultError](#check.setresulterror) + - [Check.SetResultSkipped](#check.setresultskipped) + - [Check.WithAfterCheckFn](#check.withaftercheckfn) + - [Check.WithBeforeCheckFn](#check.withbeforecheckfn) + - [Check.WithCheckFn](#check.withcheckfn) + - [Check.WithSkipCheckFn](#check.withskipcheckfn) + - [Check.WithSkipModeAll](#check.withskipmodeall) + - [Check.WithSkipModeAny](#check.withskipmodeany) + - [Check.WithTimeout](#check.withtimeout) + - [CheckResult.String](#checkresult.string) + - [ChecksGroup.Add](#checksgroup.add) + - [ChecksGroup.OnAbort](#checksgroup.onabort) + - [ChecksGroup.RecordChecksResults](#checksgroup.recordchecksresults) + - [ChecksGroup.RunChecks](#checksgroup.runchecks) + - [ChecksGroup.WithAfterAllFn](#checksgroup.withafterallfn) + - [ChecksGroup.WithAfterEachFn](#checksgroup.withaftereachfn) + - [ChecksGroup.WithBeforeAllFn](#checksgroup.withbeforeallfn) + - [ChecksGroup.WithBeforeEachFn](#checksgroup.withbeforeeachfn) + - [FilterCheckIDs](#filtercheckids) + - [GetReconciledResults](#getreconciledresults) + - [GetResults](#getresults) + - [GetTestSuites](#gettestsuites) + - [GetTestsCountByState](#gettestscountbystate) + - [GetTotalTests](#gettotaltests) + - [InitLabelsExprEvaluator](#initlabelsexprevaluator) + - [NewCheck](#newcheck) + - [NewChecksGroup](#newchecksgroup) + - [RunChecks](#runchecks) +- [Local Functions](#local-functions) + - [getResultsSummary](#getresultssummary) + - [onFailure](#onfailure) + - [printCheckResult](#printcheckresult) + - [printFailedChecksLog](#printfailedcheckslog) + - [recordCheckResult](#recordcheckresult) + - [runAfterAllFn](#runafterallfn) + - [runAfterEachFn](#runaftereachfn) + - [runBeforeAllFn](#runbeforeallfn) + - [runBeforeEachFn](#runbeforeeachfn) + - [runCheck](#runcheck) + - [shouldSkipCheck](#shouldskipcheck) + - [skipAll](#skipall) + - [skipCheck](#skipcheck) + +## Overview + +The checksdb package provides infrastructure for registering, configuring, executing, and recording the results of compliance tests (“checks”). It manages groups of checks, supports lifecycle hooks, skip logic, timeouts, logging, and a global results store used by the certsuite tool. + +### Key Features + +- Runtime registration and execution of check functions with support for before/after hooks and per‑check skip conditions +- Thread‑safe collection of check outcomes in an in‑memory database that can be queried or exported as claim data +- Integrated logger per check, abort handling via channels, and graceful handling of panics and errors during test runs + +### Design Notes + +- Check objects use a mutex to protect concurrent state changes; the package serialises group operations with a global lock +- Skip logic evaluates all registered skip functions and respects a SkipMode (All or Any) to decide whether to run a check +- Abort signals propagate through an abort channel, causing remaining checks in a group to be marked skipped + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**Check**](#check) | One‑line purpose | +| [**ChecksGroup**](#checksgroup) | Container for a set of checks and their execution lifecycle | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func (check *Check) Abort(reason string)](#check.abort) | Stops the current check immediately, emits an abort message on `abortChan`, and panics with a custom error. | +| [func (check *Check) GetLogger() *log.Logger](#check.getlogger) | Provides access to the logger associated with a specific `Check`. | +| [func (check *Check) GetLogs() string](#check.getlogs) | Returns the complete log message that has been collected during the execution of a check. | +| [func (check *Check) LogDebug(msg string, args ...any)](#check.logdebug) | Emits a formatted debug log entry tied to the specific `Check`. | +| [func (check *Check) LogError(msg string, args ...any)](#check.logerror) | Emits a formatted error‑level log entry associated with the `Check` instance. | +| [func (check *Check) LogFatal(msg string, args ...any)](#check.logfatal) | Emits a fatal log entry using the check’s logger, prints the message to standard error with a “FATAL:” prefix, and exits the program with status 1. | +| [func (check *Check) LogInfo(msg string, args ...any)](#check.loginfo) | Emits an informational log entry associated with the current `Check` object. The log is formatted using the supplied message and optional arguments. | +| [func (check *Check) LogWarn(msg string, args ...any)](#check.logwarn) | Emits a log entry at the *warn* level associated with a specific `Check` instance. | +| [func (check *Check) Run() error](#check.run) | Runs a registered check, handling setup, execution, and cleanup while recording timing and logging. | +| [func (check *Check) SetAbortChan(abortChan chan string)](#check.setabortchan) | Stores the provided `abortChan` in the `Check` instance so that the check can later signal an abort. | +| [func (check *Check) SetResult( compliantObjects []*testhelper.ReportObject, nonCompliantObjects []*testhelper.ReportObject, )](#check.setresult) | Persists the lists of compliant and non‑compliant objects for a check, updates the check’s result status accordingly, and records diagnostic details. | +| [func (check *Check) SetResultAborted(reason string)](#check.setresultaborted) | Records that the check has been aborted, storing the supplied reason and setting its result state accordingly. | +| [func (check *Check) SetResultError(reason string)](#check.setresulterror) | Sets the check’s result to `CheckResultError`, records a skip reason, and logs a warning if the result was already an error. It aborts if the check has been aborted. | +| [func (check *Check) SetResultSkipped(reason string)](#check.setresultskipped) | Flags the check’s result as *skipped*, recording why it was not executed. Skipping is idempotent and ignored if the check had already been aborted. | +| [func (check *Check) WithAfterCheckFn(afterCheckFn func(check *Check) error) *Check](#check.withaftercheckfn) | Assigns an optional function that will be executed after the check’s main logic finishes. The callback receives the same `*Check` instance and may return an error to signal post‑check failure. | +| [func (check *Check) WithBeforeCheckFn(beforeCheckFn func(check *Check) error) *Check](#check.withbeforecheckfn) | Registers a function that runs before the main check logic. If the check already contains an error, the hook is skipped. | +| [func (check *Check) WithCheckFn(checkFn func(check *Check) error) *Check](#check.withcheckfn) | Sets the check’s execution function (`CheckFn`) if no prior error exists, enabling custom validation logic. | +| [func (check *Check) WithSkipCheckFn(skipCheckFn ...func() (skip bool, reason string)) *Check](#check.withskipcheckfn) | Registers one or more functions that determine whether the check should be skipped and why. These callbacks are stored in `check.SkipCheckFns`. | +| [func (check *Check) WithSkipModeAll() *Check](#check.withskipmodeall) | Configures the `Check` instance so that all sub‑checks are skipped during execution. If the check is already in an error state, it returns unchanged. | +| [func (check *Check) WithSkipModeAny() *Check](#check.withskipmodeany) | Configures a `Check` instance so that it will skip only when *any* of its conditions are met. This is the default behaviour and therefore this modifier exists mainly for API completeness. | +| [func (check *Check) WithTimeout(duration time.Duration) *Check](#check.withtimeout) | Assigns a duration to the `Timeout` field of a `Check`, unless an error has already been set. | +| [func (cr CheckResult) String() string { return string(cr) }](#checkresult.string) | Converts the `CheckResult` value into its underlying string form. | +| [func (group *ChecksGroup) Add(check *Check)](#checksgroup.add) | Safely appends a new `*Check` to the group's internal slice, ensuring concurrent access is protected by a global lock. | +| [func (group *ChecksGroup) OnAbort(abortReason string) error](#checksgroup.onabort) | Marks the current and remaining checks in a group as aborted or skipped, depending on their state, when an abort event occurs. | +| [func (group *ChecksGroup) RecordChecksResults()](#checksgroup.recordchecksresults) | Iterates over every `Check` in the receiver’s check list and persists each outcome to the global results store, emitting an informational log. | +| [func (group *ChecksGroup) RunChecks(stopChan <-chan bool, abortChan chan string) ([]error, int)](#checksgroup.runchecks) | Runs all enabled checks in a `ChecksGroup`, respecting label filtering and lifecycle hooks (`BeforeAll`, `BeforeEach`, `AfterEach`, `AfterAll`). It returns any errors encountered and the count of failed checks. | +| [func (group *ChecksGroup) WithAfterAllFn(afterAllFn func(checks []*Check) error) *ChecksGroup](#checksgroup.withafterallfn) | Stores a user‑supplied function to be executed after all checks in the group have run. The callback receives the slice of `*Check` objects and may perform validation, cleanup, or aggregation. | +| [func (group *ChecksGroup) WithAfterEachFn(afterEachFn func(check *Check) error) *ChecksGroup](#checksgroup.withaftereachfn) | Sets the callback that will be invoked after each `Check` is executed, allowing custom post‑processing or cleanup. | +| [func (group *ChecksGroup) WithBeforeAllFn(beforeAllFn func(checks []*Check) error) *ChecksGroup](#checksgroup.withbeforeallfn) | Stores a user‑supplied function that will run once before all checks in the group are executed, allowing preparatory work or validation. | +| [func (group *ChecksGroup) WithBeforeEachFn(beforeEachFn func(check *Check) error) *ChecksGroup](#checksgroup.withbeforeeachfn) | Stores the supplied function to be run before each `Check` in the group, enabling pre‑processing or validation logic. | +| [func FilterCheckIDs() ([]string, error)](#filtercheckids) | Iterates over the internal check database (`dbByGroup`), evaluates each check’s labels against a global expression evaluator (`labelsExprEvaluator.Eval`). If the expression matches, the check’s ID is collected. Returns the list of matched IDs. | +| [func GetReconciledResults() map[string]claim.Result](#getreconciledresults) | Consolidates the global `resultsDB` store into a plain Go map suitable for inclusion in a Claim output. | +| [func GetResults() map[string]claim.Result](#getresults) | Exposes the in‑memory database of check results to callers. | +| [func GetTestSuites() []string](#gettestsuites) | Enumerates the keys in the package‑level `resultsDB` map and returns a slice containing each distinct test suite name once. | +| [func GetTestsCountByState(state string) int](#gettestscountbystate) | Returns how many test results currently hold the specified `state`. | +| [func GetTotalTests() int](#gettotaltests) | Provides a quick count of all test entries currently held in the internal `resultsDB`. | +| [func InitLabelsExprEvaluator(labelsFilter string) error](#initlabelsexprevaluator) | Builds and stores a `labels.LabelsExprEvaluator` that can evaluate label expressions supplied to the checks database. | +| [func NewCheck(id string, labels []string) *Check](#newcheck) | Instantiates a `Check` object pre‑configured for use in the checks database. It sets an initial result status and prepares a logger tied to the check’s ID. | +| [func NewChecksGroup(groupName string) *ChecksGroup](#newchecksgroup) | Returns a singleton `ChecksGroup` for the given name, creating it if absent. It ensures thread‑safe access to the global registry of groups. | +| [func RunChecks(timeout time.Duration) (failedCtr int, err error)](#runchecks) | Orchestrates execution of all `ChecksGroup`s, handles global aborts via timeout or OS signals, aggregates failures and errors. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func() map[string][]int](#getresultssummary) | Builds a summary map where each key is a group name and the value is a slice `[passed, failed, skipped]` reflecting the outcome of all checks in that group. | +| [func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error](#onfailure) | Marks the current check as failed, skips all subsequent checks in the group, and returns a generic error describing the failure. | +| [func printCheckResult(check *Check)](#printcheckresult) | Outputs the result of a single check to the console, formatting the message according to whether the check passed, failed, was skipped, aborted, or errored. | +| [func printFailedChecksLog()](#printfailedcheckslog) | Iterates over every check in the database, and for those that failed, outputs a formatted log header followed by the check’s archived logs. | +| [func recordCheckResult(check *Check)](#recordcheckresult) | Persists the outcome of a test check into the global `resultsDB`, enriching it with metadata such as timestamps, duration, and catalog classifications. | +| [func runAfterAllFn(group *ChecksGroup, checks []*Check) (err error)](#runafterallfn) | Runs the `afterAll` function registered on a `ChecksGroup`. If the hook panics or returns an error, it marks the last check as failed and skips any remaining checks. | +| [func runAfterEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) error](#runaftereachfn) | Runs a user‑defined `afterEach` callback after a check completes, handling panics and errors. | +| [func runBeforeAllFn(group *ChecksGroup, checks []*Check) (err error)](#runbeforeallfn) | Runs the `beforeAllFn` callback defined on a `ChecksGroup`. If the callback is absent, it does nothing. It also safeguards against panics and unexpected errors, converting them into a standardized failure state for the first check and skipping the rest. | +| [func runBeforeEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error)](#runbeforeeachfn) | Runs the optional `beforeEach` function defined on a `ChecksGroup`. Handles panics and errors by logging and marking the current check as failed while skipping subsequent checks. | +| [func runCheck(check *Check, group *ChecksGroup, remainingChecks []*Check) (err error)](#runcheck) | Safely runs an individual `Check`, handling panics and unexpected errors by logging, aborting the current check, and marking subsequent checks as skipped. | +| [func shouldSkipCheck(check *Check) (skip bool, reasons []string)](#shouldskipcheck) | Evaluates all skip functions attached to a `Check` instance. If any of them indicate that the check must be skipped (based on the configured `SkipMode`), it returns `true` and collects the corresponding reasons. It also safely recovers from panics in individual skip functions, logging details. | +| [func([]*Check, string)()](#skipall) | Iterates over a list of checks and marks each one as skipped, providing a uniform reason. | +| [func skipCheck(check *Check, reason string)](#skipcheck) | Marks a check as skipped with a given reason, logs the action, and prints the result. | + +## Structs + +### Check + +The `Check` type represents a single validation unit in the test framework. +It holds configuration, state, and helper methods for executing a check, +recording its result, and reporting logs. + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `mutex` | `sync.Mutex` | Synchronises concurrent access to mutable fields. | +| `ID` | `string` | Unique identifier of the check. | +| `Labels` | `[]string` | Labels used for selecting or filtering checks. | +| `BeforeCheckFn` | `func(check *Check) error` | Optional hook executed before the main `CheckFn`. | +| `AfterCheckFn` | `func(check *Check) error` | Optional hook executed after the main `CheckFn`. | +| `CheckFn` | `func(check *Check) error` | The core function that performs the check logic. | +| `SkipCheckFns` | `[]func() (skip bool, reason string)` | Functions determining whether to skip this check. | +| `SkipMode` | `skipMode` | Strategy for combining results of multiple skip functions (`Any`, `All`). | +| `Result` | `CheckResult` | Final status after execution: passed, failed, skipped, aborted, or error. | +| `CapturedOutput` | `string` | Stores any captured output (currently unused). | +| `details` | `string` | Human‑readable details about compliant/non‑compliant objects. | +| `skipReason` | `string` | Reason why the check was skipped. | +| `logger` | `*log.Logger` | Logger instance scoped to this check. | +| `logArchive` | `*strings.Builder` | Buffer that captures all log output for later retrieval. | +| `StartTime`, `EndTime` | `time.Time` | Timestamps marking the duration of the check run. | +| `Timeout` | `time.Duration` | Maximum allowed time for the check to complete. | +| `Error` | `error` | Any error that prevented execution (e.g., mis‑configured). | +| `abortChan` | `chan string` | Channel used to signal an external abort request. | + +#### Purpose + +A `Check` encapsulates everything needed to run a single validation: configuration (labels, timeouts), hooks for setup and teardown (`BeforeCheckFn`, `AfterCheckFn`), the core logic (`CheckFn`), and mechanisms for skipping or aborting execution. It also records logs and results, which are later persisted by the surrounding test suite. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NewCheck(id string, labels []string) *Check` | Constructor that initializes a check with default state and logger. | +| `Abort(reason string)` | Signals a non‑graceful abort via the abort channel and panics to unwind execution. | +| `Run() error` | Executes the check lifecycle: before hook, main function, after hook; records timing and logs. | +| `SetResult(compliantObjects, nonCompliantObjects []*testhelper.ReportObject)` | Finalises the result based on analysis of report objects; updates details string. | +| `SetResultAborted(reason string)`, `SetResultError(reason string)`, `SetResultSkipped(reason string)` | Mark the check with a specific terminal state and reason. | +| `WithBeforeCheckFn(func(check *Check) error) *Check` | Fluent setter for the before hook. | +| `WithAfterCheckFn(func(check *Check) error) *Check` | Fluent setter for the after hook. | +| `WithCheckFn(func(check *Check) error) *Check` | Fluent setter for the core check function. | +| `WithSkipCheckFn(...func() (skip bool, reason string)) *Check` | Adds one or more skip‑determining functions. | +| `WithSkipModeAll() *Check`, `WithSkipModeAny() *Check` | Configures how multiple skip functions are aggregated. | +| `WithTimeout(duration time.Duration) *Check` | Sets a maximum duration for the check run. | +| `GetLogger() *log.Logger` | Retrieves the logger instance. | +| `GetLogs() string` | Returns all captured log output. | +| `LogDebug(msg string, args ...any)` / `LogError`, `LogFatal`, `LogInfo`, `LogWarn` | Convenience wrappers that forward to the underlying logger with appropriate severity levels. | + +These methods collectively provide a flexible, thread‑safe API for defining, executing, and reporting on individual checks within the test framework. + +--- + +### ChecksGroup + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `name` | `string` | Identifier for the group; used in logs and to retrieve the group from the global registry. | +| `checks` | `[]*Check` | Ordered slice of checks belonging to this group. Checks are appended via `Add`. | +| `beforeAllFn`, `afterAllFn` | `func(checks []*Check) error` | Optional callbacks executed once before any check runs (`beforeAll`) and once after all checks finish (`afterAll`). They receive the entire list of checks. | +| `beforeEachFn`, `afterEachFn` | `func(check *Check) error` | Optional callbacks run immediately before or after each individual check, respectively. They operate on a single `*Check`. | +| `currentRunningCheckIdx` | `int` | Index of the currently executing check in the group's slice; set to `checkIdxNone` when no check is running. Used for abort handling and tracking progress. | + +#### Purpose + +`ChecksGroup` aggregates checks that belong together, typically representing a test suite or a logical grouping of validations. It manages execution order, optional lifecycle hooks (`beforeAll`, `afterEach`, etc.), and state needed to handle aborts or failures gracefully. The group is registered in a global map upon creation via `NewChecksGroup`. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `Add` | Appends a new check to the group's slice, guarded by a mutex. | +| `OnAbort` | Handles an abort event: marks running and remaining checks appropriately based on the abort reason and current execution index. | +| `RecordChecksResults` | Persists results of all checks in the group via logging/recording mechanisms. | +| `RunChecks` | Executes the group's checks sequentially, applying label filtering, lifecycle hooks, and error handling; returns collected errors and a count of failed checks. | +| `WithAfterAllFn`, `WithAfterEachFn`, `WithBeforeAllFn`, `WithBeforeEachFn` | Set the respective optional callback functions on the group and return the modified group for chaining. | +| `NewChecksGroup` | Creates or retrieves a singleton instance for a given name, initializing internal fields and registering it globally. | + +--- + +--- + +## Exported Functions + +### Check.Abort + +**Abort** - Stops the current check immediately, emits an abort message on `abortChan`, and panics with a custom error. + +#### Signature (Go) + +```go +func (check *Check) Abort(reason string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stops the current check immediately, emits an abort message on `abortChan`, and panics with a custom error. | +| **Parameters** | `reason` (string) – Text explaining why the abort is issued. | +| **Return value** | None (panics). | +| **Key dependencies** | • `check.mutex.Lock()` / `Unlock()` for thread safety
• `AbortPanicMsg(reason string)` to format panic message
• `panic` built‑in | +| **Side effects** | • Locks and unlocks the check’s mutex.
• Sends a formatted abort message to `abortChan`.
• Panics, terminating the current goroutine. | +| **How it fits the package** | Used internally by checks when an unrecoverable condition occurs; signals listeners via `abortChan` before halting execution. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Acquire mutex"] --> B["Format abortMsg"] + B --> C["Send to abortChan"] + C --> D["Trigger panic with AbortPanicMsg"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Check.Abort --> func_Lock + func_Check.Abort --> func_Unlock + func_Check.Abort --> func_panic + func_Check.Abort --> func_AbortPanicMsg +``` + +#### Functions calling `Check.Abort` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Check.Abort +check := &Check{ID: "test-check", abortChan: make(chan string)} +defer close(check.abortChan) +go func() { + msg := <-check.abortChan + fmt.Println("Abort received:", msg) +}() +check.Abort("unexpected error") +``` + +--- + +### Check.GetLogger + +**GetLogger** - Provides access to the logger associated with a specific `Check`. + +#### Signature (Go) + +```go +func (check *Check) GetLogger() *log.Logger +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides access to the logger associated with a specific `Check`. | +| **Parameters** | None – the method operates on the receiver `check`. | +| **Return value** | `*log.Logger` – the logger instance stored in the `Check` struct. | +| **Key dependencies** | • `log` package (standard library). | +| **Side effects** | None; purely accessor. | +| **How it fits the package** | Allows other components to log messages contextualized to a particular check without exposing internal fields. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph CheckObject["Check"] + A["check.logger"] --> B{"Return"} + end +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `Check.GetLogger` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Check.GetLogger + +import ( + "log" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + // Assume a Check instance has been created elsewhere + var myCheck *checksdb.Check + + // Retrieve the logger for this check + logger := myCheck.GetLogger() + + // Use the logger as needed + logger.Println("This is a log message from the check.") +} +``` + +--- + +### Check.GetLogs + +**GetLogs** - Returns the complete log message that has been collected during the execution of a check. + +#### Signature (Go) + +```go +func (check *Check) GetLogs() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the complete log message that has been collected during the execution of a check. | +| **Parameters** | None | +| **Return value** | `string` – the concatenated log content stored in the check’s internal buffer. | +| **Key dependencies** | *Field access*: `check.logArchive.String()` (likely a `strings.Builder`). | +| **Side effects** | None; purely read‑only. | +| **How it fits the package** | Provides a public accessor for other parts of the checks database to obtain diagnostic output when recording results or printing failures. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Retrieve log buffer"} + B --> C["Return check.logArchive.String()"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `Check.GetLogs` (Mermaid) + +```mermaid +graph TD + func_printFailedChecksLog --> func_Check.GetLogs + func_recordCheckResult --> func_Check.GetLogs +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Check.GetLogs +check := &Check{ + // ... initialize check, including logArchive ... +} +logOutput := check.GetLogs() +fmt.Println("Collected logs:", logOutput) +``` + +--- + +### Check.LogDebug + +**LogDebug** - Emits a formatted debug log entry tied to the specific `Check`. + +Logs a debug‑level message for the current check instance. + +#### Signature (Go) + +```go +func (check *Check) LogDebug(msg string, args ...any) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Emits a formatted debug log entry tied to the specific `Check`. | +| **Parameters** | `msg` – format string
`args` – variadic arguments for formatting | +| **Return value** | None (side‑effect only) | +| **Key dependencies** | • Calls `log.Logf` from `github.com/redhat-best-practices-for-k8s/certsuite/internal/log`
• Uses the check’s embedded logger (`check.logger`) and the debug log level constant | +| **Side effects** | Writes a log record to the configured logger; no state mutation in the `Check`. | +| **How it fits the package** | Provides a convenient wrapper for emitting debug logs within check logic, ensuring consistent logger usage across the checks database. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph Check_LogDebug + A["Call Check.LogDebug(msg, args...)"] --> B["Invoke log.Logf"] + B --> C["log.Logf(logger, LevelDebug, msg, args...)"] + end +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Check.LogDebug --> func_log.Logf +``` + +#### Functions calling `Check.LogDebug` (Mermaid) + +```mermaid +graph TD + func_recordCheckResult --> func_Check.LogDebug +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Check.LogDebug +check := &checksdb.Check{logger: myLogger} +check.LogDebug("Initializing check %s with value %d", "MyCheck", 42) +``` + +--- + +### Check.LogError + +**LogError** - Emits a formatted error‑level log entry associated with the `Check` instance. + +#### Signature (Go) + +```go +func (check *Check) LogError(msg string, args ...any) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Emits a formatted error‑level log entry associated with the `Check` instance. | +| **Parameters** | `msg string –` format string; `args …any –` values to interpolate into `msg`. | +| **Return value** | None (void). | +| **Key dependencies** | • Calls `log.Logf` from `github.com/redhat-best-practices-for-k8s/certsuite/internal/log`.
• Uses the check’s embedded logger (`check.logger`). | +| **Side effects** | Writes a log record to the configured logger; may trigger fatal exit if the underlying logger is misconfigured. | +| **How it fits the package** | Provides a convenient, consistent way for `Check` methods (e.g., `SetResult`, `runCheck`) to report errors without duplicating formatting logic. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Format msg & args"] --> B["Call log.Logf with LevelError"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Check_LogError --> func_log_Logf +``` + +#### Functions calling `Check.LogError` + +```mermaid +graph TD + func_Check_SetResult --> func_Check_LogError + func_runCheck --> func_Check_LogError + func_shouldSkipCheck --> func_Check_LogError +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Check.LogError +check := &checksdb.Check{logger: log.NewLogger()} +check.LogError("Failed to process item %d: %v", 42, err) +``` + +--- + +### Check.LogFatal + +**LogFatal** - Emits a fatal log entry using the check’s logger, prints the message to standard error with a “FATAL:” prefix, and exits the program with status 1. + +Logs a fatal message and terminates the process. + +#### Signature (Go) + +```go +func (check *Check) LogFatal(msg string, args ...any) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Emits a fatal log entry using the check’s logger, prints the message to standard error with a “FATAL:” prefix, and exits the program with status 1. | +| **Parameters** | `msg string` – message format; `` – optional formatting arguments. | +| **Return value** | None (the function never returns normally). | +| **Key dependencies** | • `github.com/redhat-best-practices-for-k8s/certsuite/internal/log.Logf`
• `fmt.Fprintf`
• `os.Exit` | +| **Side effects** | Writes to the logger and standard error, then terminates the process. | +| **How it fits the package** | Provides a convenient fatal‑exit helper for checks that must abort execution upon encountering unrecoverable errors. | + +#### Internal workflow + +```mermaid +flowchart TD + subgraph "LogFatal" + A["Check.LogFatal"] --> B["log.Logf(check.logger, log.LevelFatal, msg, args...)"] + A --> C["fmt.Fprintf(os.Stderr, \nFATAL: +msg+\n, args...)"] + A --> D["os.Exit(1)"] + end +``` + +#### Function dependencies + +```mermaid +graph TD + func_Check.LogFatal --> func_log.Logf + func_Check.LogFatal --> func_fmt.Fprintf + func_Check.LogFatal --> func_os.Exit +``` + +#### Functions calling `Check.LogFatal` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Check.LogFatal +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + check := &checksdb.Check{Logger: nil} // assume logger is set appropriately + check.LogFatal("unable to load configuration: %v", err) // program exits after printing +} +``` + +--- + +### Check.LogInfo + +**LogInfo** - Emits an informational log entry associated with the current `Check` object. The log is formatted using the supplied message and optional arguments. + +Logs a message at the *info* level for a specific check instance. + +```go +func (check *Check) LogInfo(msg string, args ...any) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Emits an informational log entry associated with the current `Check` object. The log is formatted using the supplied message and optional arguments. | +| **Parameters** | `msg string –` format string for the log.
`args ...any –` variadic arguments to substitute into the format string. | +| **Return value** | None. The function logs synchronously and returns immediately. | +| **Key dependencies** | • Calls `log.Logf` from `github.com/redhat-best-practices-for-k8s/certsuite/internal/log`.
• Relies on the `Check.logger` field to provide a logger instance. | +| **Side effects** | • Writes a log record via the underlying logger (which may output to stdout, files, or other sinks).
• No mutation of package‑level state; only uses the receiver’s `logger`. | +| **How it fits the package** | Serves as a convenience wrapper around the generic logging helper, ensuring that all check‑specific logs are emitted at the *info* level and carry the appropriate logger context. | + +#### Internal workflow + +```mermaid +flowchart TD + Check_LogInfo --> log_Logf +``` + +#### Function dependencies + +```mermaid +graph TD + func_Check.LogInfo --> func_log.Logf +``` + +#### Functions calling `Check.LogInfo` + +```mermaid +graph TD + func_Check.Run --> func_Check.LogInfo + func_recordCheckResult --> func_Check.LogInfo + func_skipCheck --> func_Check.LogInfo +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Check.LogInfo +check := &checksdb.Check{logger: log.NewLogger()} +check.LogInfo("Starting check for %s", "my-test-id") +``` + +--- + +### Check.LogWarn + +**LogWarn** - Emits a log entry at the *warn* level associated with a specific `Check` instance. + +#### 1. Signature (Go) + +```go +func (check *Check) LogWarn(msg string, args ...any) +``` + +#### 2. Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Emits a log entry at the *warn* level associated with a specific `Check` instance. | +| **Parameters** | `msg` – format string; `args` – optional arguments for formatting (variadic). | +| **Return value** | None (void). | +| **Key dependencies** | Calls `log.Logf` from `github.com/redhat-best-practices-for-k8s/certsuite/internal/log`. | +| **Side effects** | Writes a log record to the logger configured for the check; may trigger program termination if logging configuration fails. | +| **How it fits the package** | Provides a convenient wrapper so that checks can emit warnings without exposing the underlying logging implementation directly. | + +#### 3. Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check.LogWarn(msg, args...)"] --> B["log.Logf(check.logger, log.LevelWarn, msg, args...)"] +``` + +#### 4. Function dependencies (Mermaid) + +```mermaid +graph TD + func_Check.LogWarn --> func_log.Logf +``` + +#### 5. Functions calling `Check.LogWarn` (Mermaid) + +```mermaid +graph TD + func_Check.SetResult --> func_Check.LogWarn + func_Check.SetResultError --> func_Check.LogWarn +``` + +#### 6. Usage example (Go) + +```go +// Minimal example invoking Check.LogWarn +check := &checksdb.Check{ID: "example-check", logger: &log.Logger{}} +check.LogWarn("This check is not applicable for the current cluster: %s", "reason") +``` + +--- + +### Check.Run + +**Run** - Runs a registered check, handling setup, execution, and cleanup while recording timing and logging. + +#### Signature (Go) + +```go +func (check *Check) Run() error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs a registered check, handling setup, execution, and cleanup while recording timing and logging. | +| **Parameters** | `check *Check` – receiver; the check instance to run. | +| **Return value** | `error` – non‑nil if the check could not be executed or one of its phases failed. | +| **Key dependencies** | • `fmt.Errorf`
• `cli.PrintCheckRunning`
• `time.Now`
• `check.LogInfo`
• `check.BeforeCheckFn`, `check.CheckFn`, `check.AfterCheckFn`
• `printCheckResult` | +| **Side effects** | • Updates `StartTime` and `EndTime`.
• Emits log entries via the check’s logger.
• Prints running, passed/failed/skipped messages through CLI utilities. | +| **How it fits the package** | Central orchestrator for a single check; called by the test runner (`runCheck`) to execute checks sequentially or in parallel. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Validate check"] --> B{"check nil?"} + B -- yes --> C["Return error check is a nil pointer"] + B -- no --> D{"error exists?"} + D -- yes --> E["Return formatted error"] + D -- no --> F["Print running status"] + F --> G["Record start time"] + G --> H["Log labels"] + H --> I{"BeforeCheckFn present?"} + I -- yes --> J["Execute BeforeCheckFn"] + J --> K{"err?"} + K -- yes --> L["Return formatted error"] + K -- no --> M{"CheckFn present?"} + M -- yes --> N["Execute CheckFn"] + N --> O{"err?"} + O -- yes --> P["Return formatted error"] + O -- no --> Q{"AfterCheckFn present?"} + Q -- yes --> R["Execute AfterCheckFn"] + R --> S{"err?"} + S -- yes --> T["Return formatted error"] + S -- no --> U["Print check result"] + U --> V["End time recorded via defer"] + V --> W["Return nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Check.Run --> fmt.Errorf + func_Check.Run --> cli.PrintCheckRunning + func_Check.Run --> time.Now + func_Check.Run --> check.LogInfo + func_Check.Run --> BeforeCheckFn + func_Check.Run --> CheckFn + func_Check.Run --> AfterCheckFn + func_Check.Run --> printCheckResult +``` + +#### Functions calling `Check.Run` (Mermaid) + +```mermaid +graph TD + runCheck --> Check.Run +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Check.Run +check := &checksdb.Check{ + ID: "example", + Labels: []string{"label1", "label2"}, + BeforeCheckFn: func(c *checksdb.Check) error { return nil }, + CheckFn: func(c *checksdb.Check) error { return nil }, + AfterCheckFn: func(c *checksdb.Check) error { return nil }, +} + +if err := check.Run(); err != nil { + fmt.Printf("check failed: %v\n", err) +} +``` + +--- + +### Check.SetAbortChan + +**SetAbortChan** - Stores the provided `abortChan` in the `Check` instance so that the check can later signal an abort. + +#### 1) Signature + +```go +func (check *Check) SetAbortChan(abortChan chan string) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores the provided `abortChan` in the `Check` instance so that the check can later signal an abort. | +| **Parameters** | *`abortChan`* (`chan string`) – Channel used by the check to notify cancellation or failure. | +| **Return value** | None (method has no return). | +| **Key dependencies** | • Assigns to the field `check.abortChan`. | +| **Side effects** | Mutates the receiver’s internal state; no external I/O or concurrency control. | +| **How it fits the package** | Enables a running check to send an abort signal back to the orchestrating `ChecksGroup` during execution. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive abortChan"] --> B["Set check.abortChan = abortChan"] +``` + +#### 4) Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 5) Functions calling `Check.SetAbortChan` (Mermaid) + +```mermaid +graph TD + func_ChecksGroup_RunChecks --> func_Check_SetAbortChan +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Check.SetAbortChan +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + check := &checksdb.Check{} + abortChan := make(chan string) + check.SetAbortChan(abortChan) // Assign the abort channel to the check +} +``` + +--- + +--- + +### Check.SetResult + +**SetResult** - Persists the lists of compliant and non‑compliant objects for a check, updates the check’s result status accordingly, and records diagnostic details. + +#### Signature (Go) + +```go +func (check *Check) SetResult( + compliantObjects []*testhelper.ReportObject, + nonCompliantObjects []*testhelper.ReportObject, +) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Persists the lists of compliant and non‑compliant objects for a check, updates the check’s result status accordingly, and records diagnostic details. | +| **Parameters** | `compliantObjects []*testhelper.ReportObject` – objects that passed the check.
`nonCompliantObjects []*testhelper.ReportObject` – objects that failed the check. | +| **Return value** | None (void). The function mutates the receiver’s state. | +| **Key dependencies** | • `check.mutex.Lock/Unlock` – protects concurrent access.
• `testhelper.ResultObjectsToString` – serialises result lists to JSON for logging.
• `Check.LogError`, `Check.LogWarn` – emits diagnostics. | +| **Side effects** | *Thread‑safe state mutation: sets `check.details`, may change `check.Result` and `check.skipReason`.
* Emits log messages on errors or when the check is skipped. | +| **How it fits the package** | Part of the runtime evaluation of a check; called after a check has gathered its result objects to finalize status before persisting to the database. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Lock mutex"] --> B{"Result == Aborted"} + B -- Yes --> C["Return"] + B -- No --> D["Serialize objects"] + D --> E{"Serialization error?"} + E -- Yes --> F["LogError"] + E -- No --> G["Store details string"] + G --> H{"Existing Result == Error"} + H -- Yes --> I["Return"] + H -- No --> J{"Non‑compliant objects present?"} + J -- Yes --> K["Set ResultFailed; clear skipReason"] + J -- No --> L{"Compliant list empty?"} + L -- Yes --> M["LogWarn; set skipReason; Set ResultSkipped"] + L -- No --> N["NOP – result remains unchanged"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Check.SetResult --> func_Lock + func_Check.SetResult --> func_Unlock + func_Check.SetResult --> func_ResultObjectsToString + func_Check.SetResult --> func_LogError + func_Check.SetResult --> func_LogWarn +``` + +#### Functions calling `Check.SetResult` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Check.SetResult +check := &Check{ID: "example-check", Result: CheckResultUnknown} +compliantObjs := []*testhelper.ReportObject{ + {Name: "obj1"}, +} +nonCompliantObjs := []*testhelper.ReportObject{ + {Name: "obj2"}, +} + +check.SetResult(compliantObjs, nonCompliantObjs) + +// After the call: +// * check.details contains a JSON string of both lists +// * check.Result is CheckResultFailed because there is at least one non‑compliant object +``` + +--- + +### Check.SetResultAborted + +**SetResultAborted** - Records that the check has been aborted, storing the supplied reason and setting its result state accordingly. + +#### Signature (Go) + +```go +func (check *Check) SetResultAborted(reason string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Records that the check has been aborted, storing the supplied reason and setting its result state accordingly. | +| **Parameters** | `reason` (string) – explanation of why the check was aborted. | +| **Return value** | None | +| **Key dependencies** | *Calls `check.mutex.Lock()` to obtain exclusive access.
* Calls `check.mutex.Unlock()` via `defer`. | +| **Side effects** | Mutates the check’s internal state: sets `Result` to `CheckResultAborted` and records `skipReason`. No I/O or concurrency beyond mutex protection. | +| **How it fits the package** | Used by higher‑level orchestration (e.g., `ChecksGroup.OnAbort`) to mark a specific check as aborted when an abort event occurs during a test run. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Acquire mutex"] --> B{"Set state"} + B --> C["Result = Aborted"] + B --> D["skipReason = reason"] + C --> E["Release mutex"] + D --> E +``` + +#### Function dependencies + +```mermaid +graph TD + func_Check.SetResultAborted --> func_Lock + func_Check.SetResultAborted --> func_Unlock +``` + +#### Functions calling `Check.SetResultAborted` + +```mermaid +graph TD + func_ChecksGroup.OnAbort --> func_Check.SetResultAborted +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Check.SetResultAborted +check := &Check{} +abortReason := "timeout during execution" +check.SetResultAborted(abortReason) +``` + +--- + +### Check.SetResultError + +**SetResultError** - Sets the check’s result to `CheckResultError`, records a skip reason, and logs a warning if the result was already an error. It aborts if the check has been aborted. + +#### Signature (Go) + +```go +func (check *Check) SetResultError(reason string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Sets the check’s result to `CheckResultError`, records a skip reason, and logs a warning if the result was already an error. It aborts if the check has been aborted. | +| **Parameters** | `reason string` – human‑readable description of why the check failed. | +| **Return value** | None | +| **Key dependencies** | *`check.mutex.Lock()` / `Unlock()` – ensures exclusive access to mutable fields.
* `Check.LogWarn(msg string, args ...any)` – logs a warning when attempting to overwrite an existing error result. | +| **Side effects** | Mutates the receiver’s `Result`, `skipReason` fields; performs thread‑safe locking/unlocking; may produce log output. | +| **How it fits the package** | Provides a safe, idempotent way for other components (e.g., failure handlers) to record an error state on a check within the checks database. | + +#### Internal workflow + +```mermaid +flowchart TD + subgraph Locking + A["check.mutex.Lock()"] --> B["Check.Result"] + end + B --> C{"Result == Aborted"} + C -- Yes --> D["Return"] + C -- No --> E{"Result == Error"} + E -- Yes --> F["LogWarn(already marked as error)"] + E -- No --> G["Set Result = Error, skipReason = reason"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Check.SetResultError --> func_Lock + func_Check.SetResultError --> func_Unlock + func_Check.SetResultError --> func_Check.LogWarn +``` + +#### Functions calling `Check.SetResultError` + +```mermaid +graph TD + func_onFailure --> func_Check.SetResultError +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Check.SetResultError +check := &Check{ID: "example", Result: CheckResultPass} +check.SetResultError("validation failed") +// check.Result is now CheckResultError and skipReason holds the message +``` + +--- + +### Check.SetResultSkipped + +**SetResultSkipped** - Flags the check’s result as *skipped*, recording why it was not executed. Skipping is idempotent and ignored if the check had already been aborted. + +#### Signature (Go) + +```go +func (check *Check) SetResultSkipped(reason string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Flags the check’s result as *skipped*, recording why it was not executed. Skipping is idempotent and ignored if the check had already been aborted. | +| **Parameters** | `reason` (string) – a human‑readable explanation for the skip (e.g., label mismatch, abort reason). | +| **Return value** | None. The function mutates the receiver’s state only. | +| **Key dependencies** | *`check.mutex.Lock()` – ensures exclusive access to the check data.
* `check.mutex.Unlock()` – releases the lock via defer. | +| **Side effects** | • Locks and unlocks the check’s mutex.
• Sets `check.Result` to `CheckResultSkipped` unless it is already `CheckResultAborted`.
• Stores the skip reason in `check.skipReason`. | +| **How it fits the package** | Within the checks database, this method centralizes state transition for skipped checks, enabling callers (e.g., group abort logic) to uniformly mark non‑run tests without duplicating locking or result handling. | + +#### Internal workflow + +```mermaid +flowchart TD + subgraph Locking["Acquire Mutex"] + A["check.mutex.Lock()"] --> B["defer check.mutex.Unlock()"] + end + B --> C{"Is Result Aborted?"} + C -- Yes --> D["Exit"] + C -- No --> E["Set Result to Skipped"] + E --> F["Store skipReason"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Check.SetResultSkipped --> func_Lock + func_Check.SetResultSkipped --> func_Unlock +``` + +#### Functions calling `Check.SetResultSkipped` + +```mermaid +graph TD + func_ChecksGroup.OnAbort --> func_Check.SetResultSkipped + func_skipCheck --> func_Check.SetResultSkipped +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Check.SetResultSkipped +check := &Check{ + ID: "example-check", + Result: CheckResultPending, +} +check.SetResultSkipped("not applicable in this environment") +// check.Result is now CheckResultSkipped and skipReason holds the message. +``` + +--- + +### Check.WithAfterCheckFn + +**WithAfterCheckFn** - Assigns an optional function that will be executed after the check’s main logic finishes. The callback receives the same `*Check` instance and may return an error to signal post‑check failure. + +#### Signature (Go) + +```go +func (check *Check) WithAfterCheckFn(afterCheckFn func(check *Check) error) *Check +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Assigns an optional function that will be executed after the check’s main logic finishes. The callback receives the same `*Check` instance and may return an error to signal post‑check failure. | +| **Parameters** | *afterCheckFn* func(check \*Check) error – a closure invoked after the primary check; can inspect or modify the check state. | +| **Return value** | The original `*Check` pointer, allowing method chaining. | +| **Key dependencies** | None beyond standard library functions (simple assignment). | +| **Side effects** | Mutates the `AfterCheckFn` field of the receiver if no prior error is present; otherwise returns unchanged. No external I/O or concurrency occurs here. | +| **How it fits the package** | Provides a fluent interface for attaching post‑check behaviour, complementing other builder methods on `Check`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"check.Error != nil"} + B -- yes --> C["Return check"] + B -- no --> D["Assign AfterCheckFn"] + D --> E["Return check"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_Check.WithAfterCheckFn +``` + +#### Functions calling `Check.WithAfterCheckFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_Check.WithAfterCheckFn +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Check.WithAfterCheckFn +check := NewCheck("example") +check. + WithAfterCheckFn(func(c *Check) error { + // Perform any cleanup or logging after the main check runs. + fmt.Println("Check finished:", c.Name) + return nil + }). + Run() // Assume Run executes the primary check logic. +``` + +--- + +### Check.WithBeforeCheckFn + +**WithBeforeCheckFn** - Registers a function that runs before the main check logic. If the check already contains an error, the hook is skipped. + +```go +func (check *Check) WithBeforeCheckFn(beforeCheckFn func(check *Check) error) *Check +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Registers a function that runs before the main check logic. If the check already contains an error, the hook is skipped. | +| **Parameters** | `beforeCheckFn` – a callback receiving the current `*Check` instance and returning an `error`. | +| **Return value** | The same `*Check` pointer to allow method chaining. | +| **Key dependencies** | Sets the field `BeforeCheckFn` on the receiver. No external calls. | +| **Side effects** | Mutates the `BeforeCheckFn` field of the `Check` struct; may short‑circuit execution if an existing error is present. | +| **How it fits the package** | Enables users to inject custom pre‑processing or validation logic into a check’s lifecycle within the *checksdb* package. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Check instance"] --> B{"check.Error != nil?"} + B -- yes --> C["Return check"] + B -- no --> D["Set BeforeCheckFn"] + D --> E["Return check"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `Check.WithBeforeCheckFn` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example + +```go +// Minimal example invoking Check.WithBeforeCheckFn +c := &Check{} +c = c.WithBeforeCheckFn(func(chk *Check) error { + // perform pre‑check logic here + return nil +}) +``` + +--- + +### Check.WithCheckFn + +**WithCheckFn** - Sets the check’s execution function (`CheckFn`) if no prior error exists, enabling custom validation logic. + +#### 1) Signature (Go) + +```go +func (check *Check) WithCheckFn(checkFn func(check *Check) error) *Check +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Sets the check’s execution function (`CheckFn`) if no prior error exists, enabling custom validation logic. | +| **Parameters** | `checkFn` – a function that receives the current `*Check` and returns an `error`. | +| **Return value** | The modified `*Check`, allowing method chaining. | +| **Key dependencies** | • Assigns to `check.CheckFn`.
• Reads `check.Error`. | +| **Side effects** | Mutates the receiver’s `CheckFn` field; no I/O or concurrency. | +| **How it fits the package** | Provides a fluent API for configuring checks within the `checksdb` repository. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"check.Error != nil?"} + B -- Yes --> C["Return check unchanged"] + B -- No --> D["Set check.CheckFn = checkFn"] + D --> E["Return modified check"] +``` + +#### 4) Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 5) Functions calling `Check.WithCheckFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Check.WithCheckFn +check := &Check{} +customFn := func(c *Check) error { + // custom validation logic here + return nil +} +modifiedCheck := check.WithCheckFn(customFn) +// modifiedCheck.CheckFn now points to customFn +``` + +--- + +### Check.WithSkipCheckFn + +**WithSkipCheckFn** - Registers one or more functions that determine whether the check should be skipped and why. These callbacks are stored in `check.SkipCheckFns`. + +#### Signature (Go) + +```go +func (check *Check) WithSkipCheckFn(skipCheckFn ...func() (skip bool, reason string)) *Check +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Registers one or more functions that determine whether the check should be skipped and why. These callbacks are stored in `check.SkipCheckFns`. | +| **Parameters** | `skipCheckFn ...func() (skip bool, reason string)` – variadic slice of functions returning a boolean indicating skip status and a human‑readable reason. | +| **Return value** | The modified `*Check` instance, enabling method chaining. | +| **Key dependencies** | • `append` – adds callbacks to the internal slice.
• Accesses the receiver’s fields (`Error`, `SkipCheckFns`). | +| **Side effects** | Mutates `check.SkipCheckFns`; returns early if `check.Error != nil`. No I/O or concurrency. | +| **How it fits the package** | Part of the `checksdb` package, allowing callers to augment a check with custom skip logic before execution. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"check.Error != nil"} + B -- Yes --> C["Return check"] + B -- No --> D["Append skipCheckFn… to check.SkipCheckFns"] + D --> E["Return check"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Check.WithSkipCheckFn --> func_append +``` + +#### Functions calling `Check.WithSkipCheckFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Check.WithSkipCheckFn +chk := &checksdb.Check{} +chk = chk.WithSkipCheckFn( + func() (bool, string) { return false, "" }, + func() (bool, string) { return true, "test condition met" }, +) +``` + +--- + +### Check.WithSkipModeAll + +**WithSkipModeAll** - Configures the `Check` instance so that all sub‑checks are skipped during execution. If the check is already in an error state, it returns unchanged. + +#### Signature (Go) + +```go +func (check *Check) WithSkipModeAll() *Check +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Configures the `Check` instance so that all sub‑checks are skipped during execution. If the check is already in an error state, it returns unchanged. | +| **Parameters** | `check *Check` – receiver; the check object to modify. | +| **Return value** | `*Check` – the same (or modified) check instance for method chaining. | +| **Key dependencies** | • Accesses and mutates the `SkipMode` field of `Check`. | +| **Side effects** | Mutates `check.SkipMode`; no external I/O or concurrency actions. | +| **How it fits the package** | Provides a fluent API to alter a check’s behavior before running it, facilitating conditional test execution in the checks database. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"check.Error != nil"} + B -- Yes --> C["Return check"] + B -- No --> D["Set check.SkipMode = SkipModeAll"] + D --> E["Return check"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_Check.WithSkipModeAll --> None +``` + +#### Functions calling `Check.WithSkipModeAll` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_UnknownCaller --> func_Check.WithSkipModeAll +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Check.WithSkipModeAll +check := &Check{ /* initialize fields as needed */ } +check = check.WithSkipModeAll() +// Now all sub‑checks will be skipped when this check is executed. +``` + +--- + +### Check.WithSkipModeAny + +**WithSkipModeAny** - Configures a `Check` instance so that it will skip only when *any* of its conditions are met. This is the default behaviour and therefore this modifier exists mainly for API completeness. + +#### Signature (Go) + +```go +func (check *Check) WithSkipModeAny() *Check +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Configures a `Check` instance so that it will skip only when *any* of its conditions are met. This is the default behaviour and therefore this modifier exists mainly for API completeness. | +| **Parameters** | `check *Check` – receiver; the check being modified. | +| **Return value** | The same `*Check` pointer after modification, enabling method chaining. | +| **Key dependencies** | • Sets the field `SkipMode` to the constant `SkipModeAny`.
• Returns early if `check.Error != nil`. | +| **Side effects** | Mutates the receiver’s `SkipMode` field; no external I/O or concurrency actions. | +| **How it fits the package** | Part of the public API for configuring checks within the `checksdb` package, allowing callers to explicitly opt‑in to the default skip behaviour. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"check.Error != nil"} + B -- Yes --> C["Return check unchanged"] + B -- No --> D["Set check.SkipMode = SkipModeAny"] + D --> E["Return check"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `Check.WithSkipModeAny` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Check.WithSkipModeAny +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + c := &checksdb.Check{} + // Explicitly set the skip mode to the default value (any) + c = c.WithSkipModeAny() +} +``` + +--- + +### Check.WithTimeout + +**WithTimeout** - Assigns a duration to the `Timeout` field of a `Check`, unless an error has already been set. + +#### Signature (Go) + +```go +func (check *Check) WithTimeout(duration time.Duration) *Check +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Assigns a duration to the `Timeout` field of a `Check`, unless an error has already been set. | +| **Parameters** | `duration time.Duration – The timeout value to apply. | +| **Return value** | `*Check – The same check instance, enabling method chaining. | +| **Key dependencies** | - `time.Duration` type from the standard library. | +| **Side effects** | Mutates the receiver’s `Timeout` field; no external I/O or concurrency. | +| **How it fits the package** | Provides a fluent interface for configuring checks within the `checksdb` package, allowing callers to specify timeouts before executing a check. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check instance"] --> B{"Error already set?"} + B -- Yes --> C["Return unchanged Check"] + B -- No --> D["Set Timeout field"] + D --> E["Return updated Check"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_Check.WithTimeout +``` + +#### Functions calling `Check.WithTimeout` + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_Check.WithTimeout +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Check.WithTimeout +package main + +import ( + "time" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + check := &checksdb.Check{} + // Chain configuration: set timeout and then proceed with other methods + check = check.WithTimeout(30 * time.Second) +} +``` + +--- + +### CheckResult.String + +**String** - Converts the `CheckResult` value into its underlying string form. + +#### 1) Signature (Go) + +```go +func (cr CheckResult) String() string { + return string(cr) +} +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Converts the `CheckResult` value into its underlying string form. | +| **Parameters** | *None* – only the receiver `cr CheckResult`. | +| **Return value** | A plain `string` that is identical to the underlying byte slice of the enum. | +| **Key dependencies** | Calls the built‑in `string()` conversion; no external packages. | +| **Side effects** | None – purely functional. | +| **How it fits the package** | Provides a convenient, human‑readable representation used when logging results and generating reports (e.g., in `recordCheckResult` or `ChecksGroup.RunChecks`). | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + CR_String["CheckResult.String"] --> Str(string_cr) +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_CheckResult.String --> func_string +``` + +#### 5) Functions calling `CheckResult.String` (Mermaid) + +```mermaid +graph TD + func_ChecksGroup.RunChecks --> func_CheckResult.String + func_Check.GetLogs --> func_LogArchive.String + func_recordCheckResult --> func_CheckResult.String +``` + +> **Note**: The function is referenced by multiple callers that convert the result to a string for logging or reporting. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking CheckResult.String +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + // Assuming checksdb defines a CheckResult type with values like Failed, Passed, etc. + var result checksdb.CheckResult = checksdb.CheckResultFailed + fmt.Println("Check status:", result.String()) // prints: "Check status: failed" +} +``` + +--- + +### ChecksGroup.Add + +**Add** - Safely appends a new `*Check` to the group's internal slice, ensuring concurrent access is protected by a global lock. + +#### Signature (Go) + +```go +func (group *ChecksGroup) Add(check *Check) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Safely appends a new `*Check` to the group's internal slice, ensuring concurrent access is protected by a global lock. | +| **Parameters** | `check *Check` – the check instance to be added. | +| **Return value** | None (void). | +| **Key dependencies** | - `dbLock.Lock()`
- `defer dbLock.Unlock()`
- `append(group.checks, check)` | +| **Side effects** | Mutates the receiver’s `checks` slice; acquires and releases a global mutex to guard against race conditions. | +| **How it fits the package** | Provides an API for building up a collection of checks that can later be queried or executed by other components of the `checksdb` package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Acquire dbLock"] --> B["Defer Unlock"] + B --> C["Append check to group.checks"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ChecksGroup.Add --> func_Lock + func_ChecksGroup.Add --> func_Unlock + func_ChecksGroup.Add --> func_append +``` + +#### Functions calling `ChecksGroup.Add` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ChecksGroup.Add +group := &checksdb.ChecksGroup{} +check := &Check{ /* fields */ } +group.Add(check) +``` + +--- + +### ChecksGroup.OnAbort + +**OnAbort** - Marks the current and remaining checks in a group as aborted or skipped, depending on their state, when an abort event occurs. + +#### Signature (Go) + +```go +func (group *ChecksGroup) OnAbort(abortReason string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Marks the current and remaining checks in a group as aborted or skipped, depending on their state, when an abort event occurs. | +| **Parameters** | `abortReason string` – description of why the group is being aborted (e.g., timeout, signal). | +| **Return value** | `error` – always `nil`; the function performs side‑effects only. | +| **Key dependencies** | • `fmt.Printf`
• `strings.ToUpper`
• `labelsExprEvaluator.Eval`
• `check.SetResultSkipped`
• `check.SetResultAborted`
• `printCheckResult` | +| **Side effects** | Updates each `Check` in the group: sets result to *skipped* or *aborted*, records skip reason, and prints a summary via CLI. No external I/O except console output. | +| **How it fits the package** | Invoked by the orchestrator (`RunChecks`) when a global abort (timeout, signal, or internal abort) is detected, ensuring that all checks in a group are marked appropriately before recording results. | + +#### Internal workflow + +```mermaid +flowchart TD + Start --> CheckLoop["Iterate over group.checks"] + CheckLoop --> EvalLabels{"labelsExprEvaluator.Eval(check.Labels)"} + EvalLabels -- false --> SkipNotMatching["SetResultSkipped(not matching labels)"] + EvalLabels -- true --> RunningCheck{"i == currentRunningCheckIdx"} + RunningCheck -- true --> AbortCurrent["SetResultAborted(abortReason)"] + RunningCheck -- false --> AfterRunning{"i > currentRunningCheckIdx"} + AfterRunning -- true --> SkipLater["SetResultSkipped(abortReason)"] + AfterRunning -- false --> NoAction["NOP"] + AllPaths --> PrintResult["printCheckResult(check)"] + PrintResult --> Next + Next --> End +``` + +#### Function dependencies + +```mermaid +graph TD + func_ChecksGroup.OnAbort --> fmt.Printf + func_ChecksGroup.OnAbort --> strings.ToUpper + func_ChecksGroup.OnAbort --> labelsExprEvaluator.Eval + func_ChecksGroup.OnAbort --> check.SetResultSkipped + func_ChecksGroup.OnAbort --> check.SetResultAborted + func_ChecksGroup.OnAbort --> printCheckResult +``` + +#### Functions calling `ChecksGroup.OnAbort` + +```mermaid +graph TD + func_RunChecks --> func_ChecksGroup.OnAbort +``` + +#### Usage example (Go) + +```go +// Minimal example invoking ChecksGroup.OnAbort +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + // Assume we have a populated ChecksGroup instance. + var group *checksdb.ChecksGroup + // Trigger abort with a reason. + _ = group.OnAbort("global time-out") +} +``` + +--- + +### ChecksGroup.RecordChecksResults + +**RecordChecksResults** - Iterates over every `Check` in the receiver’s check list and persists each outcome to the global results store, emitting an informational log. + +Collects the results of all checks in a group and records them into the shared result database, logging the action. + +#### Signature (Go) + +```go +func (group *ChecksGroup) RecordChecksResults() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over every `Check` in the receiver’s check list and persists each outcome to the global results store, emitting an informational log. | +| **Parameters** | `group *ChecksGroup` – the group whose checks are being recorded (receiver). | +| **Return value** | None. | +| **Key dependencies** | • Calls `log.Info` from the internal logging package.
• Invokes the helper `recordCheckResult(check)` for each check. | +| **Side effects** | • Writes to the shared map `resultsDB`.
• Emits log entries (no external I/O). | +| **How it fits the package** | Used by `RunChecks` and abort handling to ensure that all executed checks, regardless of success or failure, are persisted for later reporting. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start RecordChecksResults"] --> B["Log group name"] + B --> C{"Iterate over group.checks"} + C --> D["Call recordCheckResult(check)"] + D --> C + C --> E["End"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_ChecksGroup.RecordChecksResults --> func_log.Info + func_ChecksGroup.RecordChecksResults --> func_recordCheckResult +``` + +#### Functions calling `ChecksGroup.RecordChecksResults` + +```mermaid +graph TD + func_RunChecks --> func_ChecksGroup.RecordChecksResults +``` + +#### Usage example (Go) + +```go +// Minimal example invoking ChecksGroup.RecordChecksResults +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + // Assume we have a populated ChecksGroup instance named grp. + var grp *checksdb.ChecksGroup + // ... populate grp with checks ... + grp.RecordChecksResults() +} +``` + +--- + +--- + +### ChecksGroup.RunChecks + +**RunChecks** - Runs all enabled checks in a `ChecksGroup`, respecting label filtering and lifecycle hooks (`BeforeAll`, `BeforeEach`, `AfterEach`, `AfterAll`). It returns any errors encountered and the count of failed checks. + +#### Signature (Go) + +```go +func (group *ChecksGroup) RunChecks(stopChan <-chan bool, abortChan chan string) ([]error, int) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs all enabled checks in a `ChecksGroup`, respecting label filtering and lifecycle hooks (`BeforeAll`, `BeforeEach`, `AfterEach`, `AfterAll`). It returns any errors encountered and the count of failed checks. | +| **Parameters** | *`stopChan <-chan bool`* – signals an external stop request (e.g., timeout).
*`abortChan chan string`* – channel used by individual checks to abort themselves with a message. | +| **Return value** | `[]error` – slice of errors collected during execution (may contain at most one error if a pre‑execution hook fails).
`int` – number of checks that ended with a failed result (`CheckResultFailed`). | +| **Key dependencies** | • `log.Info`, `fmt.Printf`, `strings.ToUpper` for logging.
• `labelsExprEvaluator.Eval` to filter checks by label expression.
• Helper functions: `skipCheck`, `runBeforeAllFn`, `runBeforeEachFn`, `runCheck`, `runAfterEachFn`, `runAfterAllFn`. | +| **Side effects** | • Logs progress and errors.
• Sets each check’s abort channel (`SetAbortChan`).
• Updates the group’s `currentRunningCheckIdx`.
• Invokes user‑supplied lifecycle functions which may modify global state or interact with external resources. | +| **How it fits the package** | Central orchestration routine for executing a set of CNF certification checks, integrating filtering, skipping logic, and error handling into the `checksdb` package’s test harness. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Log group start"] + B --> C{"Filter by labels"} + C -- No match --> D["Skip check"] + C -- Match --> E["Collect checks"] + E --> F["Run AfterAllFn (deferred)"] + F --> G["Run BeforeAllFn"] + G --> H{"BeforeAll error?"} + H -- Yes --> I["Return errors, count=0"] + H -- No --> J["Iterate over checks"] + J --> K["Stop check?"] + K -- Stop signal --> L["Abort loop, return nil,0"] + K -- Continue --> M["Run BeforeEachFn"] + M --> N{"BeforeEach error?"} + N -- Yes --> O["Set errs={err}, break"] + N -- No --> P["Check skip conditions"] + P --> Q{"Skip?"} + Q -- Yes --> R["Call skipCheck, continue"] + Q -- No --> S["Set abort channel"] + S --> T["Run check function"] + T --> U{"Check error?"} + U -- Yes --> V["Append err"] + U -- No --> W["Continue"] + W --> X["Run AfterEachFn"] + X --> Y{"AfterEach error?"} + Y -- Yes --> Z["Append err, break"] + Y -- No --> AA["Increment failed counter if needed"] + AA --> AB["Increment idx"] + AB --> J + L --> AC["Return nil,0"] + I --> AC +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ChecksGroup.RunChecks --> func_skipCheck + func_ChecksGroup.RunChecks --> func_runBeforeAllFn + func_ChecksGroup.RunChecks --> func_runBeforeEachFn + func_ChecksGroup.RunChecks --> func_shouldSkipCheck + func_ChecksGroup.RunChecks --> func_runCheck + func_ChecksGroup.RunChecks --> func_runAfterEachFn + func_ChecksGroup.RunChecks --> func_runAfterAllFn +``` + +#### Functions calling `ChecksGroup.RunChecks` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ChecksGroup.RunChecks +package main + +import ( + "time" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + // Assume `group` has been populated elsewhere + var group *checksdb.ChecksGroup + + stopChan := make(chan bool) + abortChan := make(chan string) + + // Run checks with a timeout of 30 seconds + go func() { + time.Sleep(30 * time.Second) + close(stopChan) // signal to stop early if needed + }() + + errs, failed := group.RunChecks(stopChan, abortChan) + + // Handle results + for _, err := range errs { + println("Error:", err.Error()) + } + println("Failed checks:", failed) +} +``` + +--- + +--- + +### ChecksGroup.WithAfterAllFn + +**WithAfterAllFn** - Stores a user‑supplied function to be executed after all checks in the group have run. The callback receives the slice of `*Check` objects and may perform validation, cleanup, or aggregation. + +#### 1) Signature (Go) + +```go +func (group *ChecksGroup) WithAfterAllFn(afterAllFn func(checks []*Check) error) *ChecksGroup +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores a user‑supplied function to be executed after all checks in the group have run. The callback receives the slice of `*Check` objects and may perform validation, cleanup, or aggregation. | +| **Parameters** | `afterAllFn func(checks []*Check) error` – a closure that accepts the checks slice and returns an error if post‑processing fails. | +| **Return value** | `*ChecksGroup` – the same group instance for method chaining. | +| **Key dependencies** | *None* – only assigns to a field. | +| **Side effects** | Mutates the receiver’s internal `afterAllFn` field; no external I/O or concurrency. | +| **How it fits the package** | Part of the fluent API for configuring a `ChecksGroup`. It allows callers to hook into the lifecycle of check execution without altering core logic. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Call WithAfterAllFn"] --> B["Set group.afterAllFn"] + B --> C["Return group"] +``` + +#### 4) Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 5) Functions calling `ChecksGroup.WithAfterAllFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking ChecksGroup.WithAfterAllFn + +group := &checksdb.ChecksGroup{} +group = group.WithAfterAllFn(func(checks []*checksdb.Check) error { + // Example: ensure all checks passed before returning nil. + for _, c := range checks { + if !c.Passed() { + return fmt.Errorf("check %s failed", c.Name) + } + } + return nil +}) +``` + +--- + +### ChecksGroup.WithAfterEachFn + +**WithAfterEachFn** - Sets the callback that will be invoked after each `Check` is executed, allowing custom post‑processing or cleanup. + +#### Signature (Go) + +```go +func (group *ChecksGroup) WithAfterEachFn(afterEachFn func(check *Check) error) *ChecksGroup +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Sets the callback that will be invoked after each `Check` is executed, allowing custom post‑processing or cleanup. | +| **Parameters** | `afterEachFn func(check *Check) error` – function receiving a pointer to the finished check and returning an optional error. | +| **Return value** | The same `*ChecksGroup` instance, enabling method chaining. | +| **Key dependencies** | - Assigns the provided function to `group.afterEachFn`. | +| **Side effects** | Mutates the receiver’s internal state by storing the callback; no I/O or concurrency actions occur here. | +| **How it fits the package** | Provides a fluent API for configuring a `ChecksGroup` before execution, enhancing extensibility of check workflows. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive afterEachFn"] --> B["Store in group.afterEachFn"] + B --> C["Return group"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `ChecksGroup.WithAfterEachFn` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ChecksGroup.WithAfterEachFn +group := &checksdb.ChecksGroup{} +group = group.WithAfterEachFn(func(check *checksdb.Check) error { + // Perform cleanup or logging after each check + fmt.Printf("Completed check: %s\n", check.Name) + return nil +}) +``` + +--- + +### ChecksGroup.WithBeforeAllFn + +**WithBeforeAllFn** - Stores a user‑supplied function that will run once before all checks in the group are executed, allowing preparatory work or validation. + +#### Signature (Go) + +```go +func (group *ChecksGroup) WithBeforeAllFn(beforeAllFn func(checks []*Check) error) *ChecksGroup +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores a user‑supplied function that will run once before all checks in the group are executed, allowing preparatory work or validation. | +| **Parameters** | `beforeAllFn` – a callback receiving a slice of pointers to `Check` objects; it may return an error to abort the group’s execution. | +| **Return value** | The same `ChecksGroup` instance (`group`) to support method chaining. | +| **Key dependencies** | • Assigns the provided function to the receiver’s `beforeAllFn` field.
• No external package calls. | +| **Side effects** | Mutates the `beforeAllFn` field of the `ChecksGroup`; no I/O or concurrency actions occur here. | +| **How it fits the package** | Part of the builder pattern for configuring a check group; it enables users to inject logic that runs once before any checks in the group are evaluated. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive beforeAllFn"] --> B{"Assign to group.beforeAllFn"} + B --> C["Return group"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `ChecksGroup.WithBeforeAllFn` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ChecksGroup.WithBeforeAllFn +group := &checksdb.ChecksGroup{} +group = group.WithBeforeAllFn(func(checks []*Check) error { + // Perform any setup needed before all checks run. + fmt.Println("Running pre‑execution hook for", len(checks), "checks") + return nil +}) +``` + +--- + +### ChecksGroup.WithBeforeEachFn + +**WithBeforeEachFn** - Stores the supplied function to be run before each `Check` in the group, enabling pre‑processing or validation logic. + +#### Signature (Go) + +```go +func (group *ChecksGroup) WithBeforeEachFn(beforeEachFn func(check *Check) error) *ChecksGroup +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Stores the supplied function to be run before each `Check` in the group, enabling pre‑processing or validation logic. | +| **Parameters** | `beforeEachFn func(check *Check) error` – a callback that receives a pointer to a `Check` and returns an error if processing fails. | +| **Return value** | The same `*ChecksGroup` instance (the receiver), allowing method chaining. | +| **Key dependencies** | None – the function merely assigns the provided callback to an internal field. | +| **Side effects** | Mutates the `beforeEachFn` field of the `ChecksGroup` instance; no external I/O or concurrency. | +| **How it fits the package** | Part of the fluent API for configuring a group of checks, enabling users to hook into check execution without modifying individual checks. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + User -->|"Calls WithBeforeEachFn"| ChecksGroup + ChecksGroup -->|"Assigns beforeEachFn"| ChecksGroup["State Updated"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `ChecksGroup.WithBeforeEachFn` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ChecksGroup.WithBeforeEachFn +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + group := &checksdb.ChecksGroup{} + group = group.WithBeforeEachFn(func(check *checksdb.Check) error { + // Example: log the check ID before it runs + fmt.Printf("Running check: %s\n", check.ID) + return nil + }) + + // The group now holds the callback and will invoke it before each check execution. +} +``` + +--- + +### FilterCheckIDs + +**FilterCheckIDs** - Iterates over the internal check database (`dbByGroup`), evaluates each check’s labels against a global expression evaluator (`labelsExprEvaluator.Eval`). If the expression matches, the check’s ID is collected. Returns the list of matched IDs. + +Filters all registered checks by a pre‑initialized label expression evaluator and returns their IDs. + +--- + +#### Signature (Go) + +```go +func FilterCheckIDs() ([]string, error) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over the internal check database (`dbByGroup`), evaluates each check’s labels against a global expression evaluator (`labelsExprEvaluator.Eval`). If the expression matches, the check’s ID is collected. Returns the list of matched IDs. | +| **Parameters** | none | +| **Return value** | `([]string, error)` – a slice of matching check IDs; an error if evaluation fails (currently always `nil` in this implementation). | +| **Key dependencies** | • `labelsExprEvaluator.Eval`
• `append` built‑in function
• `dbByGroup` global variable holding grouped checks | +| **Side effects** | No state mutation or I/O; purely functional. | +| **How it fits the package** | Provides a public API for other packages to retrieve check IDs that satisfy a user‑supplied label expression, enabling selective test execution. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph DB["Database"] + dbByGroup --> groupChecks["Group Checks"] + groupChecks --> check["Check"] + end + + evalExpr["labelsExprEvaluator.Eval"] --> match{"Matches?"} + match --yes--> collect["append to result"] + collect --> nextCheck["next Check"] + + result["Return filtered IDs"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + FilterCheckIDs --> Eval + FilterCheckIDs --> append +``` + +--- + +#### Functions calling `FilterCheckIDs` (Mermaid) + +```mermaid +graph TD + getMatchingTestIDs --> FilterCheckIDs +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking FilterCheckIDs +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + // Assume the label evaluator has been initialized elsewhere. + ids, err := checksdb.FilterCheckIDs() + if err != nil { + fmt.Println("Error:", err) + return + } + fmt.Println("Matching check IDs:", ids) +} +``` + +--- + +--- + +### GetReconciledResults + +**GetReconciledResults** - Consolidates the global `resultsDB` store into a plain Go map suitable for inclusion in a Claim output. + +#### Signature (Go) + +```go +func GetReconciledResults() map[string]claim.Result +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Consolidates the global `resultsDB` store into a plain Go map suitable for inclusion in a Claim output. | +| **Parameters** | None | +| **Return value** | `map[string]claim.Result` – a copy of all entries currently held in `resultsDB`. | +| **Key dependencies** | *`make(map[string]claim.Result)` – creates the result container.
* Iteration over the package‑wide `resultsDB` map. | +| **Side effects** | None; purely functional, no mutation of external state or I/O. | +| **How it fits the package** | Provides a public API for other packages (e.g., claim construction) to retrieve all recorded check results without exposing internal data structures. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph Init + A["Create empty resultMap"] --> B["Iterate over resultsDB"] + end + subgraph Populate + B --> C{"Key exists in resultMap?"} + C -- No --> D["Initialize claim.Result{} for key"] + D --> E["Assign value from resultsDB"] + C -- Yes --> F["Assign value from resultsDB"] + E --> G["Return resultMap"] + F --> G + end +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + GetReconciledResults + MakeMap + RangeResultsDB + + GetReconciledResults --> MakeMap + GetReconciledResults --> RangeResultsDB +``` + +#### Functions calling `GetReconciledResults` (Mermaid) + +```mermaid +graph TD + func_ClaimBuilder.Build --> func_GetReconciledResults +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetReconciledResults +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + results := checksdb.GetReconciledResults() + fmt.Printf("Collected %d check results\n", len(results)) +} +``` + +--- + +### GetResults + +**GetResults** - Exposes the in‑memory database of check results to callers. + +#### Signature (Go) + +```go +func GetResults() map[string]claim.Result +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Exposes the in‑memory database of check results to callers. | +| **Parameters** | None | +| **Return value** | A `map[string]claim.Result` containing all stored results keyed by string identifiers. | +| **Key dependencies** | Uses the package‑level variable `resultsDB`. | +| **Side effects** | No state changes; purely read‑only access to global data. | +| **How it fits the package** | Provides a public accessor for other components that need to read the results accumulated by the checks database. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetResults --> Return["Return `resultsDB`"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `GetResults` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetResults +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + results := checksdb.GetResults() + fmt.Printf("Retrieved %d check results\n", len(results)) +} +``` + +--- + +### GetTestSuites + +**GetTestSuites** - Enumerates the keys in the package‑level `resultsDB` map and returns a slice containing each distinct test suite name once. + +#### Signature (Go) + +```go +func GetTestSuites() []string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Enumerates the keys in the package‑level `resultsDB` map and returns a slice containing each distinct test suite name once. | +| **Parameters** | None | +| **Return value** | A `[]string` with every unique key from `resultsDB`. | +| **Key dependencies** | • `github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper.StringInSlice` – checks for existing entries.
• Built‑in `append` function. | +| **Side effects** | None; the function is read‑only and does not modify global state. | +| **How it fits the package** | Provides a public API for other components to discover which test suites have been recorded in the database. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> InitSlice["var suites []string"] + InitSlice --> ForLoop["for key := range resultsDB"] + ForLoop --> CheckExists["if !StringInSlice(suites, key, false)"] + CheckExists --> Append["suites = append(suites, key)"] + Append --> NextIter + NextIter --> EndLoop["end for"] + EndLoop --> Return["return suites"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetTestSuites --> func_StringInSlice + func_GetTestSuites --> func_append +``` + +#### Functions calling `GetTestSuites` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetTestSuites +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + suites := checksdb.GetTestSuites() + fmt.Println("Available test suites:", suites) +} +``` + +--- + +### GetTestsCountByState + +**GetTestsCountByState** - Returns how many test results currently hold the specified `state`. + +#### Signature (Go) + +```go +func GetTestsCountByState(state string) int +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns how many test results currently hold the specified `state`. | +| **Parameters** | `state` (string) – The state to filter by (e.g., `"passed"`, `"failed"`). | +| **Return value** | `int` – Number of results whose `State` field matches the supplied `state`. | +| **Key dependencies** | *None* | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a quick lookup for statistics on test outcomes stored in the shared `resultsDB`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over resultsDB"} + B -->|"Match state"| C["Increment counter"] + B -->|"No match"| D["Continue loop"] + C --> E["Return count"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_GetTestsCountByState --> None +``` + +#### Functions calling `GetTestsCountByState` + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + None --> func_GetTestsCountByState +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetTestsCountByState +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + passed := checksdb.GetTestsCountByState("passed") + fmt.Printf("Number of passed tests: %d\n", passed) +} +``` + +--- + +### GetTotalTests + +**GetTotalTests** - Provides a quick count of all test entries currently held in the internal `resultsDB`. + +#### Signature (Go) + +```go +func GetTotalTests() int +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides a quick count of all test entries currently held in the internal `resultsDB`. | +| **Parameters** | None | +| **Return value** | An integer representing the total number of tests. | +| **Key dependencies** | Calls the built‑in `len` function to obtain the length of the `resultsDB` slice/map. | +| **Side effects** | None; purely read‑only operation. | +| **How it fits the package** | Serves as a lightweight helper for callers that need to report or validate the size of the test database without exposing internal data structures. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Retrieve resultsDB"} + B --> C["len(resultsDB)"] + C --> D["Return count"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetTotalTests --> builtin_len +``` + +#### Functions calling `GetTotalTests` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetTotalTests +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + total := checksdb.GetTotalTests() + fmt.Printf("There are %d tests in the database.\n", total) +} +``` + +--- + +### InitLabelsExprEvaluator + +**InitLabelsExprEvaluator** - Builds and stores a `labels.LabelsExprEvaluator` that can evaluate label expressions supplied to the checks database. + +Initialises a global label expression evaluator for filtering test cases. + +```go +func InitLabelsExprEvaluator(labelsFilter string) error +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds and stores a `labels.LabelsExprEvaluator` that can evaluate label expressions supplied to the checks database. | +| **Parameters** | `labelsFilter string –` a comma‑separated list of labels or the special keyword `"all"` which expands to all supported tags. | +| **Return value** | `error –` nil on success, otherwise an error describing why the evaluator could not be created. | +| **Key dependencies** | • `strings.Join` – concatenates tag names.
• `github.com/redhat‑best‑practices‑for‑k8s/certsuite/pkg/labels.NewLabelsExprEvaluator` – parses the expression into an AST.
• `fmt.Errorf` – formats error messages. | +| **Side effects** | Sets the package‑level variable `labelsExprEvaluator` with the constructed evaluator; no I/O or concurrency occurs. | +| **How it fits the package** | The checks database relies on a global label evaluator to filter test IDs during discovery and execution. This function is called at application start‑up and whenever the label filter changes. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"labelsFilter == all"} + B -- Yes --> C["Build allTags slice"] + C --> D["Join tags into string"] + D --> E["Call NewLabelsExprEvaluator"] + B -- No --> E + E --> F{"err != nil"} + F -- Yes --> G["Return formatted error"] + F -- No --> H["Assign to labelsExprEvaluator"] + H --> I["Return nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_InitLabelsExprEvaluator --> func_NewLabelsExprEvaluator + func_InitLabelsExprEvaluator --> fmt_Errorf + func_InitLabelsExprEvaluator --> strings_Join +``` + +#### Functions calling `InitLabelsExprEvaluator` (Mermaid) + +```mermaid +graph TD + func_getMatchingTestIDs --> func_InitLabelsExprEvaluator + func_Checksdb.Startup --> func_InitLabelsExprEvaluator + func_runHandler --> func_InitLabelsExprEvaluator +``` + +#### Usage example (Go) + +```go +// Minimal example invoking InitLabelsExprEvaluator +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + if err := checksdb.InitLabelsExprEvaluator("common,extended"); err != nil { + fmt.Printf("Failed to initialise label evaluator: %v\n", err) + return + } + // The global evaluator is now ready for use by the checks database. +} +``` + +--- + +### NewCheck + +**NewCheck** - Instantiates a `Check` object pre‑configured for use in the checks database. It sets an initial result status and prepares a logger tied to the check’s ID. + +#### 1) Signature (Go) + +```go +func NewCheck(id string, labels []string) *Check +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates a `Check` object pre‑configured for use in the checks database. It sets an initial result status and prepares a logger tied to the check’s ID. | +| **Parameters** | `id string –` unique identifier of the check
`labels []string –` semantic labels describing the check (e.g., severity, category) | +| **Return value** | `*Check` – pointer to the newly created check instance | +| **Key dependencies** | • `log.GetMultiLogger()` – obtains a logger that writes to an in‑memory buffer and optionally forwards to CLI sniffer.
• `cli.CliCheckLogSniffer` – logger sink used by the multi‑logger. | +| **Side effects** | *Mutates the new `Check` instance (sets fields).
* Configures a logger that captures log output into an internal string builder (`logArchive`). No external I/O or concurrency is triggered during construction. | +| **How it fits the package** | This constructor is used throughout the test suite to register checks in various check groups (`NewChecksGroup.Add(NewCheck(...))`). It centralises common initialisation logic for all checks. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Create Check struct"} + B --> C["Set ID, Labels, Default Result"] + C --> D["Prepare logArchive buffer"] + D --> E["Get multi‑logger via log.GetMultiLogger(logArchive, cli.CliCheckLogSniffer)"] + E --> F["Attach logger to check"] + F --> G["Return *Check"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCheck --> func_GetMultiLogger + func_NewCheck --> var_CliCheckLogSniffer +``` + +#### 5) Functions calling `NewCheck` (Mermaid) + +```mermaid +graph TD + func_LoadChecks_AccessControl --> func_NewCheck + func_LoadChecks_Operator --> func_NewCheck + func_LoadChecks_Performance --> func_NewCheck + func_LoadChecks_Platform --> func_NewCheck + func_LoadChecks_Networking --> func_NewCheck + func_LoadChecks_Observability --> func_NewCheck + func_LoadChecks_Certification --> func_NewCheck + func_LoadChecks_Lifecycle --> func_NewCheck + func_LoadChecks_PreflightGenerateFunctions --> func_NewCheck +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking NewCheck +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + // Create a check with ID and labels + chk := checksdb.NewCheck("test-id", []string{"severity:high", "category:security"}) + + // The returned object can now be added to a ChecksGroup or used directly. + _ = chk // placeholder to avoid unused variable error +} +``` + +--- + +### NewChecksGroup + +**NewChecksGroup** - Returns a singleton `ChecksGroup` for the given name, creating it if absent. It ensures thread‑safe access to the global registry of groups. + +#### Signature (Go) + +```go +func NewChecksGroup(groupName string) *ChecksGroup +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a singleton `ChecksGroup` for the given name, creating it if absent. It ensures thread‑safe access to the global registry of groups. | +| **Parameters** | `groupName string` – Identifier for the checks group (e.g., “accesscontrol”). | +| **Return value** | `*ChecksGroup` – The existing or newly created group instance. | +| **Key dependencies** | *`dbLock.Lock()` / `Unlock()` – Mutex guarding the global map.
* Global variable `dbByGroup map[string]*ChecksGroup`. | +| **Side effects** | Modifies the shared `dbByGroup` map on first request; otherwise only reads it. No I/O or external state changes. | +| **How it fits the package** | Central factory for all check groups used by test suites (`LoadChecks`). It guarantees that each group name maps to a single instance, enabling consistent registration and execution of checks across the framework. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Acquire dbLock"] --> B{"Check if dbByGroup is nil"} + B --yes--> C["Initialize map"] + B --no--> D{"group exists?"} + D --yes--> E["Return existing group"] + D --no--> F["Create new ChecksGroup"] + F --> G["Store in dbByGroup"] + G --> H["Release dbLock"] + H --> I["Return group"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewChecksGroup --> func_dbLock.Lock + func_NewChecksGroup --> func_dbLock.Unlock +``` + +#### Functions calling `NewChecksGroup` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_NewChecksGroup +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewChecksGroup +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + group := checksdb.NewChecksGroup("example") + fmt.Printf("Created group: %s with %d checks\n", group.Name, len(group.Checks)) +} +``` + +--- + +### RunChecks + +**RunChecks** - Orchestrates execution of all `ChecksGroup`s, handles global aborts via timeout or OS signals, aggregates failures and errors. + +#### Signature (Go) + +```go +func RunChecks(timeout time.Duration) (failedCtr int, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Orchestrates execution of all `ChecksGroup`s, handles global aborts via timeout or OS signals, aggregates failures and errors. | +| **Parameters** | `timeout` time.Duration – maximum time allowed for the entire run. | +| **Return value** | `failedCtr int` – total number of failed checks.
`err error` – aggregated error if any group returned non‑nil errors. | +| **Key dependencies** | *Synchronization: `dbLock.Lock/Unlock`
* Timing: `time.After(timeout)`
*Signal handling: `signal.Notify`, `signal.Stop` with SIGINT/SIGTERM
* Group operations: `group.RunChecks`, `group.OnAbort`, `group.RecordChecksResults`
* Logging & CLI output: `log.*`, `cli.PrintResultsTable`, `printFailedChecksLog` | +| **Side effects** | Locks shared database lock, blocks until all groups finish or abort.
Prints summary table and detailed failed‑check logs to stdout. | +| **How it fits the package** | Entry point for running checks; called by higher‑level command `certsuite.Run`. Manages global coordination across multiple check groups stored in `dbByGroup`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Acquire dbLock"] --> B["Setup timeout and signal channels"] + B --> C["Iterate over dbByGroup"] + C --> D{"Abort flag?"} + D -- No --> E["Create stopChan, abortChan, groupDone"] + E --> F["Launch goroutine: group.RunChecks(stopChan, abortChan)"] + F --> G["Select on groupDone / abortChan / timeout / sigIntChan"] + G --> H["Handle outcome"] + H --> I["group.RecordChecksResults()"] + D -- Yes --> J["Call group.OnAbort(abortReason) & RecordChecksResults()"] + I --> C + C --> K["Print results table & failed logs"] + K --> L{"errs empty?"} + L -- No --> M["Return error with count"] + L -- Yes --> N["Return failedCtr, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_RunChecks --> dbLock.Lock + func_RunChecks --> time.After + func_RunChecks --> signal.Notify + func_RunChecks --> signal.Stop + func_RunChecks --> group.RunChecks + func_RunChecks --> group.OnAbort + func_RunChecks --> group.RecordChecksResults + func_RunChecks --> cli.PrintResultsTable + func_RunChecks --> printFailedChecksLog +``` + +#### Functions calling `RunChecks` (Mermaid) + +```mermaid +graph TD + func_Run --> func_RunChecks +``` + +#### Usage example (Go) + +```go +// Minimal example invoking RunChecks +package main + +import ( + "fmt" + "time" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + timeout := 5 * time.Minute + failed, err := checksdb.RunChecks(timeout) + if err != nil { + fmt.Printf("Error running checks: %v\n", err) + } + fmt.Printf("%d checks failed.\n", failed) +} +``` + +--- + +--- + +## Local Functions + +### getResultsSummary + +**getResultsSummary** - Builds a summary map where each key is a group name and the value is a slice `[passed, failed, skipped]` reflecting the outcome of all checks in that group. + +Collects per‑group counts of passed, failed and skipped checks into a map. + +#### Signature (Go) + +```go +func() map[string][]int +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a summary map where each key is a group name and the value is a slice `[passed, failed, skipped]` reflecting the outcome of all checks in that group. | +| **Parameters** | None | +| **Return value** | `map[string][]int` – mapping from group names to their check result counts. | +| **Key dependencies** | • Calls the built‑in `make` function to create the map.
• Iterates over `dbByGroup`, a package‑level map of groups. | +| **Side effects** | None; purely functional, no I/O or state mutation. | +| **How it fits the package** | Used by `RunChecks` to produce a concise summary for CLI output after all checks have executed. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate dbByGroup"} + B --> C["Initialize groupResults = (0,0,0)"] + C --> D{"For each check in group.checks"} + D -->|"Passed"| E["groupResults PASSED++"] + D -->|"Failed"| F["groupResults FAILED++"] + D -->|"Skipped"| G["groupResults SKIPPED++"] + E --> H["Assign results for this group"] + F --> H + G --> H + H --> I{"Next group?"} + I -- Yes --> B + I -- No --> J["Return results"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getResultsSummary --> func_make +``` + +#### Functions calling `getResultsSummary` (Mermaid) + +```mermaid +graph TD + func_RunChecks --> func_getResultsSummary +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getResultsSummary +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + summary := checksdb.RunChecks(30 * time.Second) // Run all checks with a timeout + fmt.Println(summary) +} +``` + +--- + +### onFailure + +**onFailure** - Marks the current check as failed, skips all subsequent checks in the group, and returns a generic error describing the failure. + +#### Signature (Go) + +```go +func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Marks the current check as failed, skips all subsequent checks in the group, and returns a generic error describing the failure. | +| **Parameters** | `failureType string` – short description of the failure source (e.g., “check panic”).
`failureMsg string` – detailed message or stack trace.
`group *ChecksGroup` – the check group being processed.
`currentCheck *Check` – the check that failed.
`remainingChecks []*Check` – checks yet to run. | +| **Return value** | `error` containing a concise reason (`"group "`). | +| **Key dependencies** | • `fmt.Printf` (logging progress)
• `Check.SetResultError`
• `skipAll` helper
• `errors.New` | +| **Side effects** | Updates state of the failed check (`Result = Error`, records skip reason). Marks all remaining checks as skipped. Outputs a line to stdout. | +| **How it fits the package** | Central error‑handling routine used by hook wrappers and the main check runner to standardise failure reporting across `beforeAll`, `afterEach`, etc. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive failure details"] --> B["Print status line"] + B --> C["Set currentCheck ResultError"] + C --> D["Prepare skip reason"] + D --> E["Call skipAll on remainingChecks"] + E --> F["Return generic error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_onFailure --> fmt.Printf + func_onFailure --> Check.SetResultError + func_onFailure --> skipAll + func_onFailure --> errors.New +``` + +#### Functions calling `onFailure` (Mermaid) + +```mermaid +graph TD + runBeforeAllFn --> func_onFailure + runBeforeEachFn --> func_onFailure + runAfterAllFn --> func_onFailure + runAfterEachFn --> func_onFailure + runCheck --> func_onFailure +``` + +#### Usage example (Go) + +```go +// Minimal example invoking onFailure +group := &ChecksGroup{name: "example"} +check := &Check{ID: "chk1"} +remaining := []*Check{{ID: "chk2"}, {ID: "chk3"}} + +err := onFailure("panic", "stack trace here", group, check, remaining) +if err != nil { + fmt.Println("Handling failure:", err) +} +``` + +--- + +### printCheckResult + +**printCheckResult** - Outputs the result of a single check to the console, formatting the message according to whether the check passed, failed, was skipped, aborted, or errored. + +#### Signature (Go) + +```go +func printCheckResult(check *Check) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Outputs the result of a single check to the console, formatting the message according to whether the check passed, failed, was skipped, aborted, or errored. | +| **Parameters** | `check *Check` – reference to the check whose outcome is displayed. | +| **Return value** | none (void) | +| **Key dependencies** | Calls to `cli.PrintCheckPassed`, `cli.PrintCheckFailed`, `cli.PrintCheckSkipped`, `cli.PrintCheckAborted`, and `cli.PrintCheckErrored`. | +| **Side effects** | Writes formatted text to standard output via the CLI helpers; may stop a background goroutine that updates the check status line. No state mutation of the `check` itself occurs. | +| **How it fits the package** | Used internally by `Check.Run`, group abort logic, and skip handling to provide user‑visible feedback after each check is evaluated. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph "Determine outcome" + A["check.Result"] --> B{"Result value"} + end + B -- Passed --> C["cli.PrintCheckPassed(check.ID)"] + B -- Failed --> D["cli.PrintCheckFailed(check.ID)"] + B -- Skipped --> E["cli.PrintCheckSkipped(check.ID, check.skipReason)"] + B -- Aborted --> F["cli.PrintCheckAborted(check.ID, check.skipReason)"] + B -- Error --> G["cli.PrintCheckErrored(check.ID)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_printCheckResult --> func_PrintCheckPassed + func_printCheckResult --> func_PrintCheckFailed + func_printCheckResult --> func_PrintCheckSkipped + func_printCheckResult --> func_PrintCheckAborted + func_printCheckResult --> func_PrintCheckErrored +``` + +#### Functions calling `printCheckResult` (Mermaid) + +```mermaid +graph TD + func_Check.Run --> func_printCheckResult + func_ChecksGroup.OnAbort --> func_printCheckResult + func_skipCheck --> func_printCheckResult +``` + +#### Usage example (Go) + +```go +// Minimal example invoking printCheckResult +check := &Check{ + ID: "example-check", + Result: CheckResultPassed, +} +printCheckResult(check) // prints "[ PASS ] example-check" +``` + +--- + +### printFailedChecksLog + +**printFailedChecksLog** - Iterates over every check in the database, and for those that failed, outputs a formatted log header followed by the check’s archived logs. + +#### Signature (Go) + +```go +func printFailedChecksLog() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over every check in the database, and for those that failed, outputs a formatted log header followed by the check’s archived logs. | +| **Parameters** | None | +| **Return value** | `nil` – this function performs side‑effects only. | +| **Key dependencies** | • `fmt.Sprintf`, `fmt.Println` (package `fmt`)
• `unicode/utf8.RuneCountInString` (package `unicode/utf8`)
• `strings.Repeat` (package `strings`)
• `Check.GetLogs()` method | +| **Side effects** | Writes to standard output; accesses global `dbByGroup` slice and each check’s internal log archive. | +| **How it fits the package** | Used by `RunChecks` after all checks have completed, providing a human‑readable trace of failures for CLI consumers. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate dbByGroup"} + B --> C{"Iterate group.checks"} + C --> D{"check.Result == Failed"} + D -- Yes --> E["Format header"] + E --> F["Print dashes (nbSymbols)"] + F --> G["Print header"] + G --> H["Print dashes"] + H --> I["Get logs via check.GetLogs()"] + I --> J{"logs empty?"} + J -- Yes --> K["Print Empty log output"] + J -- No --> L["Print logs"] + D -- No --> M["Skip to next check"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_printFailedChecksLog --> fmt.Sprintf + func_printFailedChecksLog --> unicode/utf8.RuneCountInString + func_printFailedChecksLog --> fmt.Println + func_printFailedChecksLog --> strings.Repeat + func_printFailedChecksLog --> Check.GetLogs +``` + +#### Functions calling `printFailedChecksLog` (Mermaid) + +```mermaid +graph TD + func_RunChecks --> func_printFailedChecksLog +``` + +#### Usage example (Go) + +```go +// Minimal example invoking printFailedChecksLog +func main() { + // Assume checks have already run and dbByGroup is populated. + printFailedChecksLog() +} +``` + +--- + +### recordCheckResult + +**recordCheckResult** - Persists the outcome of a test check into the global `resultsDB`, enriching it with metadata such as timestamps, duration, and catalog classifications. + +#### Signature (Go) + +```go +func recordCheckResult(check *Check) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Persists the outcome of a test check into the global `resultsDB`, enriching it with metadata such as timestamps, duration, and catalog classifications. | +| **Parameters** | `check *Check` – reference to the check whose result is being recorded. | +| **Return value** | None (void). | +| **Key dependencies** | • `identifiers.TestIDToClaimID` map lookup
• `check.LogDebug`, `check.LogInfo` logging helpers
• `strings.ToUpper`
• `Check.Result.String()`
• `check.StartTime`, `check.EndTime` and their `String()`/`Sub()` methods
• `check.GetLogs()`
• `identifiers.Catalog[claimID]` for catalog metadata | +| **Side effects** | • Writes a new entry into the global map `resultsDB`.
• Emits debug or info logs. | +| **How it fits the package** | This helper is invoked by `ChecksGroup.RecordChecksResults()` to batch‑store all check results in the group, enabling later reporting and persistence layers. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Lookup claimID"} + B -- not found --> C["Log debug & exit"] + B -- found --> D["Log info"] + D --> E["Build claim.Result struct"] + E --> F["Store in resultsDB"] + F --> G["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_recordCheckResult --> Check.LogDebug + func_recordCheckResult --> Check.LogInfo + func_recordCheckResult --> strings.ToUpper + func_recordCheckResult --> Check.Result.String + func_recordCheckResult --> Check.StartTime.Sub + func_recordCheckResult --> Seconds + func_recordCheckResult --> int + func_recordCheckResult --> Check.GetLogs +``` + +#### Functions calling `recordCheckResult` (Mermaid) + +```mermaid +graph TD + ChecksGroup.RecordChecksResults --> recordCheckResult +``` + +#### Usage example (Go) + +```go +// Minimal example invoking recordCheckResult +check := &Check{ + ID: "test-123", + Result: CheckSuccess, + StartTime: time.Now(), + EndTime: time.Now().Add(2 * time.Second), +} +recordCheckResult(check) +``` + +--- + +### runAfterAllFn + +**runAfterAllFn** - Runs the `afterAll` function registered on a `ChecksGroup`. If the hook panics or returns an error, it marks the last check as failed and skips any remaining checks. + +#### Signature (Go) + +```go +func runAfterAllFn(group *ChecksGroup, checks []*Check) (err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs the `afterAll` function registered on a `ChecksGroup`. If the hook panics or returns an error, it marks the last check as failed and skips any remaining checks. | +| **Parameters** | *group* `<*ChecksGroup>` – The group whose hook is to be executed.
*checks* `<[]*Check>` – Slice of all checks that were (or will be) run in this group. | +| **Return value** | `` – Non‑nil if the hook panicked or returned an error; otherwise `nil`. | +| **Key dependencies** | • `log.Debug`, `log.Error`
• Go built‑ins: `len`, `recover`, `fmt.Sprint`, `string`, `runtime/debug.Stack`
• Internal helper: `onFailure` | +| **Side effects** | • Logs debug and error messages.
• Calls `onFailure` which sets the last check’s result to an error, skips remaining checks, and returns a generic error.| +| **How it fits the package** | Invoked by `ChecksGroup.RunChecks` in a deferred call so that the after‑all hook runs regardless of earlier panics or aborts, ensuring cleanup logic is executed. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["runAfterAllFn"] --> B{"group.afterAllFn defined?"} + B -- No --> C["return nil"] + B -- Yes --> D["call group.afterAllFn(group.checks)"] + D --> E{"error?"} + E -- Yes --> F["log error"] + E -- Yes --> G["onFailure(afterAll function unexpected error, err.Error(), ...)"] + E -- No --> H["return nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_runAfterAllFn --> Logger_Debug + func_runAfterAllFn --> len + func_runAfterAllFn --> recover + func_runAfterAllFn --> fmt_Sprint + func_runAfterAllFn --> string + func_runAfterAllFn --> runtime_debug_Stack + func_runAfterAllFn --> Logger_Error + func_runAfterAllFn --> onFailure +``` + +#### Functions calling `runAfterAllFn` (Mermaid) + +```mermaid +graph TD + func_ChecksGroup_RunChecks --> func_runAfterAllFn +``` + +#### Usage example (Go) + +```go +// Minimal example invoking runAfterAllFn inside a test harness. +// Note: runAfterAllFn is unexported; normally called by RunChecks. +func example() { + group := &ChecksGroup{ + name: "example", + afterAllFn: func(checks []*Check) error { /* cleanup logic */ return nil }, + } + checks := []*Check{ /* populate with test checks */ } + + if err := runAfterAllFn(group, checks); err != nil { + fmt.Printf("afterAll failed: %v\n", err) + } +} +``` + +--- + +--- + +### runAfterEachFn + +**runAfterEachFn** - Runs a user‑defined `afterEach` callback after a check completes, handling panics and errors. + +#### Signature (Go) + +```go +func runAfterEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs a user‑defined `afterEach` callback after a check completes, handling panics and errors. | +| **Parameters** | `group *ChecksGroup` – the current check group.
`check *Check` – the check that just finished.
`remainingChecks []*Check` – checks yet to run. | +| **Return value** | `error` – nil on success; otherwise an error describing a panic or unexpected return from the callback. | +| **Key dependencies** | - `group.afterEachFn` (user function)
- `log.Debug`, `log.Error`
- `recover()`
- `fmt.Sprint`, `debug.Stack()`
- `onFailure()` helper | +| **Side effects** | May log debug/error messages.
Calls `onFailure` which sets the current check’s result to error and skips remaining checks. | +| **How it fits the package** | Part of the lifecycle of a `ChecksGroup`; invoked after each individual check in `RunChecks`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"group.afterEachFn nil?"} + B -- Yes --> C["Return nil"] + B -- No --> D["Execute defer recover block"] + D --> E["Call group.afterEachFn(check)"] + E --> F{"Err?"} + F -- Yes --> G["log.Error, call onFailure"] + F -- No --> H["Return nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_runAfterEachFn --> func_Logger.Debug + func_runAfterEachFn --> recover + func_runAfterEachFn --> fmt.Sprint + func_runAfterEachFn --> runtime/debug.Stack + func_runAfterEachFn --> func_Logger.Error + func_runAfterEachFn --> func_onFailure + func_runAfterEachFn --> func_afterEachFn + func_runAfterEachFn --> errors.New +``` + +#### Functions calling `runAfterEachFn` (Mermaid) + +```mermaid +graph TD + func_ChecksGroup.RunChecks --> func_runAfterEachFn +``` + +#### Usage example (Go) + +```go +// Minimal example invoking runAfterEachFn +group := &checksdb.ChecksGroup{ /* fields initialized */ } +check := &checksdb.Check{ID: "sample-check"} +remaining := []*checksdb.Check{} +err := runAfterEachFn(group, check, remaining) +if err != nil { + fmt.Println("afterEach error:", err) +} +``` + +--- + +### runBeforeAllFn + +**runBeforeAllFn** - Runs the `beforeAllFn` callback defined on a `ChecksGroup`. If the callback is absent, it does nothing. It also safeguards against panics and unexpected errors, converting them into a standardized failure state for the first check and skipping the rest. + +#### Signature (Go) + +```go +func runBeforeAllFn(group *ChecksGroup, checks []*Check) (err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs the `beforeAllFn` callback defined on a `ChecksGroup`. If the callback is absent, it does nothing. It also safeguards against panics and unexpected errors, converting them into a standardized failure state for the first check and skipping the rest. | +| **Parameters** | `group *ChecksGroup` – The group whose hook will be executed.
`checks []*Check` – Slice of checks that are scheduled to run; used for reporting failures when the hook fails. | +| **Return value** | `error` – `nil` on success, otherwise an error describing why the hook failed (panic or return error). | +| **Key dependencies** | • `log.Debug`, `log.Error`
• `recover`, `fmt.Sprint`, `runtime/debug.Stack`
• `onFailure` helper
• `group.beforeAllFn(checks)` | +| **Side effects** | *Logs debug and error messages.
* May recover from a panic, log stack trace.
* Calls `onFailure`, which marks the first check as errored and skips all subsequent checks. | +| **How it fits the package** | This function is invoked by `ChecksGroup.RunChecks` before any individual checks are executed, ensuring that group‑wide preconditions are established or appropriately handled. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["log.Debug"] --> B{"group.beforeAllFn == nil"} + B -- Yes --> C["return nil"] + B -- No --> D[firstCheck := checks["0"]] + D --> E["defer recover block"] + E --> F["group.beforeAllFn(checks)"] + F --> G{"err != nil"} + G -- Yes --> H["log.Error & onFailure"] + G -- No --> I["return nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_runBeforeAllFn --> Logger_Debug + func_runBeforeAllFn --> recover + func_runBeforeAllFn --> fmt_Sprint + func_runBeforeAllFn --> string + func_runBeforeAllFn --> runtime_debug_Stack + func_runBeforeAllFn --> Logger_Error + func_runBeforeAllFn --> onFailure + func_runBeforeAllFn --> group.beforeAllFn +``` + +#### Functions calling `runBeforeAllFn` (Mermaid) + +```mermaid +graph TD + func_ChecksGroup_RunChecks --> func_runBeforeAllFn +``` + +#### Usage example (Go) + +```go +// Minimal example invoking runBeforeAllFn +group := &ChecksGroup{ /* … populate fields … */ } +checks := []*Check{check1, check2} +if err := runBeforeAllFn(group, checks); err != nil { + fmt.Printf("beforeAll failed: %v\n", err) +} +``` + +--- + +--- + +### runBeforeEachFn + +**runBeforeEachFn** - Runs the optional `beforeEach` function defined on a `ChecksGroup`. Handles panics and errors by logging and marking the current check as failed while skipping subsequent checks. + +#### Signature (Go) + +```go +func runBeforeEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs the optional `beforeEach` function defined on a `ChecksGroup`. Handles panics and errors by logging and marking the current check as failed while skipping subsequent checks. | +| **Parameters** | *group* – the `ChecksGroup` containing the hook.
*check* – the `Check` currently being processed.
*remainingChecks* – slice of all remaining `Check`s after the current one. | +| **Return value** | `error` – nil if the hook succeeded; otherwise an error describing the failure (panic or unexpected return). | +| **Key dependencies** | *Logger.Debug*, *recover*, *fmt.Sprint*, *debug.Stack*, *Logger.Error*, *onFailure*, *beforeEachFn* | +| **Side effects** | • Logs debug and error messages.
• On panic or error, sets the current check’s result to `Error` and skips all remaining checks via `skipAll`.
• Does not modify global state beyond the check results. | +| **How it fits the package** | Part of the checks execution pipeline; invoked by `ChecksGroup.RunChecks` before each individual check runs. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"group.beforeEachFn exists?"} + B -- No --> C["Return nil"] + B -- Yes --> D["defer recover handler"] + D --> E["group.beforeEachFn(check)"] + E --> F{"err from hook?"} + F -- Yes --> G["Log error"] + G --> H["Call onFailure(...)"] + H --> I["Set err and return"] + F -- No --> J["Return nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_runBeforeEachFn --> func_Logger.Debug + func_runBeforeEachFn --> recover + func_runBeforeEachFn --> fmt.Sprint + func_runBeforeEachFn --> debug.Stack + func_runBeforeEachFn --> func_Logger.Error + func_runBeforeEachFn --> func_onFailure + func_runBeforeEachFn --> beforeEachFn +``` + +#### Functions calling `runBeforeEachFn` (Mermaid) + +```mermaid +graph TD + ChecksGroup.RunChecks --> runBeforeEachFn +``` + +#### Usage example (Go) + +```go +// Minimal example invoking runBeforeEachFn within the same package. +func example() { + group := &ChecksGroup{beforeEachFn: func(c *Check) error { /* ... */ return nil }} + check := &Check{ID: "check-1"} + remaining := []*Check{} + if err := runBeforeEachFn(group, check, remaining); err != nil { + fmt.Println("beforeEach failed:", err) + } +} +``` + +--- + +### runCheck + +**runCheck** - Safely runs an individual `Check`, handling panics and unexpected errors by logging, aborting the current check, and marking subsequent checks as skipped. + +#### Signature + +```go +func runCheck(check *Check, group *ChecksGroup, remainingChecks []*Check) (err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Safely runs an individual `Check`, handling panics and unexpected errors by logging, aborting the current check, and marking subsequent checks as skipped. | +| **Parameters** | `check *Check` – the check to execute.
`group *ChecksGroup` – the group containing this check.
`remainingChecks []*Check` – checks that follow this one in execution order. | +| **Return value** | `error` – `nil` if the check ran successfully; otherwise an error describing the failure (panic or runtime error). | +| **Key dependencies** | • `recover` to catch panics
• `fmt`, `runtime/debug` for stack traces
• `log.Warn` and `check.LogError` for logging
• `onFailure` helper to set results on current and remaining checks | +| **Side effects** | *Logs warning or error messages.
* Sets the result of the current check (error) and marks all remaining checks as skipped.
* Triggers a generic error return that propagates up the call stack. | +| **How it fits the package** | `runCheck` is invoked by `ChecksGroup.RunChecks` for each eligible check, ensuring consistent failure handling across the suite. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start runCheck"] --> B{"Recover block"} + B --> C["Handle AbortPanicMsg"] + B --> D["Handle generic panic"] + D --> E["Log error & onFailure"] + B --> F["Normal flow"] + F --> G["check.Run()"] + G --> H{"Error?"} + H --> I["Unexpected error -> Log & onFailure"] + H --> J["Success"] + J --> K["Return nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_runCheck --> recover + func_runCheck --> log.Warn + func_runCheck --> fmt.Errorf + func_runCheck --> fmt.Sprint + func_runCheck --> debug.Stack + func_runCheck --> check.LogError + func_runCheck --> onFailure + func_runCheck --> fmt.Sprintf + func_runCheck --> check.Run +``` + +#### Functions calling `runCheck` + +```mermaid +graph TD + func_ChecksGroup_RunChecks --> func_runCheck +``` + +#### Usage example (Go) + +```go +// Minimal example invoking runCheck +check := &Check{ID: "sample-check"} +group := &ChecksGroup{name: "example-group", checks: []*Check{check}} +remaining := []*Check{} // no following checks + +if err := runCheck(check, group, remaining); err != nil { + fmt.Println("Check failed:", err) +} +``` + +--- + +### shouldSkipCheck + +**shouldSkipCheck** - Evaluates all skip functions attached to a `Check` instance. If any of them indicate that the check must be skipped (based on the configured `SkipMode`), it returns `true` and collects the corresponding reasons. It also safely recovers from panics in individual skip functions, logging details. + +#### Signature (Go) + +```go +func shouldSkipCheck(check *Check) (skip bool, reasons []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Evaluates all skip functions attached to a `Check` instance. If any of them indicate that the check must be skipped (based on the configured `SkipMode`), it returns `true` and collects the corresponding reasons. It also safely recovers from panics in individual skip functions, logging details. | +| **Parameters** | `check *Check` – The check whose skip status is to be evaluated. | +| **Return value** | `skip bool` – Whether the check should be skipped.
`reasons []string` – Human‑readable reasons for skipping (empty if not skipped). | +| **Key dependencies** | • `len`, `append` (built‑ins)
• `recover`
• `fmt.Sprint`, `fmt.Sprintf`
• `runtime/debug.Stack`
• `Check.LogError` method
• Skip functions (`skipFn`) defined on the check | +| **Side effects** | Logs an error if a skip function panics. No external I/O beyond logging. | +| **How it fits the package** | Used by `ChecksGroup.RunChecks` to decide whether a particular test should be executed or skipped before running its core logic. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Any skipFns?"} + B -- No --> C["Return false"] + B -- Yes --> D["Set currentSkipFnIndex=0"] + D --> E["Setup recover guard"] + E --> F["Iterate over skipFns"] + F --> G{"skipFn() returns true?"} + G -- Yes --> H["Append reason to list"] + G -- No --> I["Continue loop"] + I --> F + F --> J{"All skipFns processed?"} + J --> K{"reasons empty?"} + K -- Yes --> L["Return false"] + K -- No --> M{"SkipMode=Any"} + M -- True --> N["Return true"] + M -- False --> O{"All skipFns returned true?"} + O -- Yes --> P["Return true"] + O -- No --> Q["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_shouldSkipCheck --> builtin_len + func_shouldSkipCheck --> builtin_append + func_shouldSkipCheck --> builtin_recover + func_shouldSkipCheck --> fmt_Sprint + func_shouldSkipCheck --> string_conversion + func_shouldSkipCheck --> runtime_debug_Stack + func_shouldSkipCheck --> Check_LogError + func_shouldSkipCheck --> fmt_Sprintf + func_shouldSkipCheck --> skipFn +``` + +#### Functions calling `shouldSkipCheck` (Mermaid) + +```mermaid +graph TD + ChecksGroup_RunChecks --> func_shouldSkipCheck +``` + +#### Usage example (Go) + +```go +// Minimal example invoking shouldSkipCheck +check := &Check{ + SkipCheckFns: []func() (bool, string){ + func() (bool, string) { return false, "" }, + func() (bool, string) { return true, "environment not supported" }, + }, + SkipMode: SkipModeAny, +} +skip, reasons := shouldSkipCheck(check) +fmt.Printf("Should skip? %v; Reasons: %v\n", skip, reasons) +// Output: Should skip? true; Reasons: [environment not supported] +``` + +--- + +--- + +### skipAll + +**skipAll** - Iterates over a list of checks and marks each one as skipped, providing a uniform reason. + +#### Signature (Go) + +```go +func([]*Check, string)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over a list of checks and marks each one as skipped, providing a uniform reason. | +| **Parameters** | `checks []*Check` – the checks to skip; `reason string` – explanation for skipping. | +| **Return value** | None (void). | +| **Key dependencies** | Calls the helper function `skipCheck`. | +| **Side effects** | Mutates each check’s result state to *Skipped*, logs the action, and prints a summary line via `printCheckResult`. No external I/O beyond these internal calls. | +| **How it fits the package** | Used by higher‑level control flow (e.g., when a failure aborts a group) to cleanly mark remaining checks as skipped without further evaluation. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Receive slice of checks"] --> B{"For each check"} + B --> C["Call skipCheck(check, reason)"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_skipAll --> func_skipCheck +``` + +#### Functions calling `skipAll` + +```mermaid +graph TD + func_onFailure --> func_skipAll +``` + +#### Usage example (Go) + +```go +// Minimal example invoking skipAll +checks := []*Check{check1, check2, check3} +reason := "group myGroup error: validation failed" +skipAll(checks, reason) +``` + +--- + +### skipCheck + +**skipCheck** - Marks a check as skipped with a given reason, logs the action, and prints the result. + +#### Signature (Go) + +```go +func skipCheck(check *Check, reason string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Marks a check as skipped with a given reason, logs the action, and prints the result. | +| **Parameters** | `check *Check` – the check to be skipped.
`reason string` – explanation for skipping. | +| **Return value** | none | +| **Key dependencies** | • `Check.LogInfo(msg string, args ...any)`
• `Check.SetResultSkipped(reason string)`
• `printCheckResult(check *Check)` | +| **Side effects** | Updates the check’s internal state (`Result` set to `Skipped`, `skipReason` stored), logs an informational message, and outputs the skipped result via CLI. No external I/O aside from logging/CLI output. | +| **How it fits the package** | Used by control flow functions (`RunChecks`, `skipAll`) to handle checks that should not execute due to label filtering or other preconditions. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive check & reason"] --> B["Log “Skipping check”"] + B --> C["Mark result as Skipped with reason"] + C --> D["Print skipped result"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_skipCheck --> func_Check.LogInfo + func_skipCheck --> func_Check.SetResultSkipped + func_skipCheck --> func_printCheckResult +``` + +#### Functions calling `skipCheck` (Mermaid) + +```mermaid +graph TD + func_ChecksGroup.RunChecks --> func_skipCheck + func_skipAll --> func_skipCheck +``` + +#### Usage example (Go) + +```go +// Minimal example invoking skipCheck +check := &Check{ID: "example-check"} +reason := "no matching labels" +skipCheck(check, reason) +// The check is now marked as skipped and the result printed to CLI. +``` + +--- diff --git a/docs/pkg/claimhelper/claimhelper.md b/docs/pkg/claimhelper/claimhelper.md new file mode 100644 index 000000000..eb241a631 --- /dev/null +++ b/docs/pkg/claimhelper/claimhelper.md @@ -0,0 +1,1279 @@ +# Package claimhelper + +**Path**: `pkg/claimhelper` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [ClaimBuilder](#claimbuilder) + - [FailureMessage](#failuremessage) + - [SkippedMessage](#skippedmessage) + - [TestCase](#testcase) + - [TestSuitesXML](#testsuitesxml) + - [Testsuite](#testsuite) +- [Exported Functions](#exported-functions) + - [ClaimBuilder.Build](#claimbuilder.build) + - [ClaimBuilder.Reset](#claimbuilder.reset) + - [ClaimBuilder.ToJUnitXML](#claimbuilder.tojunitxml) + - [CreateClaimRoot](#createclaimroot) + - [GenerateNodes](#generatenodes) + - [GetConfigurationFromClaimFile](#getconfigurationfromclaimfile) + - [MarshalClaimOutput](#marshalclaimoutput) + - [MarshalConfigurations](#marshalconfigurations) + - [NewClaimBuilder](#newclaimbuilder) + - [ReadClaimFile](#readclaimfile) + - [SanitizeClaimFile](#sanitizeclaimfile) + - [UnmarshalClaim](#unmarshalclaim) + - [UnmarshalConfigurations](#unmarshalconfigurations) + - [WriteClaimOutput](#writeclaimoutput) +- [Local Functions](#local-functions) + - [populateXMLFromClaim](#populatexmlfromclaim) + +## Overview + +The claimhelper package builds and manages CNF certification claim files, serialising test results to JSON or JUnit‑style XML, and handling configuration extraction and sanitisation. + +### Key Features + +- Creates a claim root populated with node diagnostics, version info and environment configurations +- Provides methods to serialize the claim to JSON or write it to disk +- Converts claim data into a JUnit XML report for CI integration + +### Design Notes + +- ClaimBuilder embeds a claim.Root and tracks start/end times; all output is written atomically via fatal logs on failure +- Configuration data is marshalled/unmarshalled through JSON to allow inclusion in the claim file +- JUnit XML generation builds a TestSuitesXML structure, sorting test cases by name for deterministic output + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**ClaimBuilder**](#claimbuilder) | One‑line purpose | +| [**FailureMessage**](#failuremessage) | Struct definition | +| [**SkippedMessage**](#skippedmessage) | Struct definition | +| [**TestCase**](#testcase) | Struct definition | +| [**TestSuitesXML**](#testsuitesxml) | Struct definition | +| [**Testsuite**](#testsuite) | Struct definition | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func (c *ClaimBuilder) Build(outputFile string)](#claimbuilder.build) | Completes the claim by recording its end time, aggregating check results, serialising the claim structure to JSON, writing it to a file, and logging the action. | +| [func (c *ClaimBuilder) Reset()](#claimbuilder.reset) | Initializes or reinitializes the `StartTime` field of the underlying claim metadata with the present UTC time, ensuring that subsequent claims are stamped correctly. | +| [func (c *ClaimBuilder) ToJUnitXML(outputFile string, startTime, endTime time.Time)](#claimbuilder.tojunitxml) | Builds a JUnit‑style XML file summarizing the test results stored in the `ClaimBuilder`’s claim root. The report includes overall statistics and individual test case details, then writes it to disk. | +| [func CreateClaimRoot() *claim.Root](#createclaimroot) | Instantiates a `claim.Root` containing a single `claim.Claim`. The claim’s metadata records the UTC start time formatted according to `DateTimeFormatDirective`. | +| [func GenerateNodes() map[string]interface{}](#generatenodes) | Collects various node diagnostics (summary, CNI plugins, hardware info, CSI drivers) into a single map for inclusion in a claim file. | +| [func GetConfigurationFromClaimFile(claimFileName string) (*provider.TestEnvironment, error)](#getconfigurationfromclaimfile) | Reads a claim file, extracts the `Configurations` section, and converts it into a `*provider.TestEnvironment`. | +| [func MarshalClaimOutput(claimRoot *claim.Root) []byte](#marshalclaimoutput) | Converts a `claim.Root` value into an indented JSON byte slice for output. If serialization fails, the program terminates immediately with a fatal log message. | +| [func MarshalConfigurations(env *provider.TestEnvironment) ([]byte, error)](#marshalconfigurations) | Produces a JSON‑encoded byte slice of the supplied `TestEnvironment`. If the argument is nil, it falls back to the global test environment via `GetTestEnvironment()`. Any marshalling failure is reported through the logger and returned. | +| [func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error)](#newclaimbuilder) | Builds a `ClaimBuilder` that encapsulates all data required to generate a claim file. It marshals the test environment into JSON, unmarshals it back into a map for configuration inclusion, and populates the claim root with node information, version metadata, and configurations. | +| [func ReadClaimFile(claimFileName string) (data []byte, err error)](#readclaimfile) | Loads the raw byte payload from the specified claim file. It logs success or failure and returns the data or an error. | +| [func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error)](#sanitizeclaimfile) | Filters the claim file by removing any test results whose labels do not satisfy the supplied label expression. | +| [func([]byte, *claim.Root)()](#unmarshalclaim) | Decodes a JSON‑encoded claim into a `claim.Root` structure. If unmarshalling fails, the program logs a fatal error and exits. | +| [func UnmarshalConfigurations(configurations []byte, claimConfigurations map[string]interface{})](#unmarshalconfigurations) | Deserialises a JSON‑encoded configuration payload into the provided `map[string]interface{}`. If deserialization fails, the function logs a fatal error and terminates the process. | +| [func WriteClaimOutput(claimOutputFile string, payload []byte)](#writeclaimoutput) | Persists the serialized claim payload (`payload`) into the specified file (`claimOutputFile`). If writing fails, the program terminates with a fatal log message. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func populateXMLFromClaim(c claim.Claim, startTime, endTime time.Time) TestSuitesXML](#populatexmlfromclaim) | Builds a `TestSuitesXML` structure representing the results of a CNF certification claim, ready for JUnit XML marshalling. | + +## Structs + +### ClaimBuilder + +Creates and writes claim files for a test environment. + +#### Fields + +| Field | Type | Description | +|------------|----------------|-------------| +| `claimRoot` | `*claim.Root` | Pointer to the root object that holds all data for a claim file. | + +#### Purpose + +`ClaimBuilder` is a helper that constructs a complete claim document based on a test environment’s state. It initializes metadata such as start and end times, injects configuration information, collects diagnostic results, and finally marshals the data into a JSON claim file or a JUnit XML report. The builder pattern allows resetting timestamps for repeated use without recreating the entire structure. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NewClaimBuilder` | Constructs a new `ClaimBuilder`, populating it with environment configurations, node data, and version information. | +| `Reset` | Updates the claim’s start time to the current UTC timestamp. | +| `Build` | Finalizes the claim by setting the end time, adding reconciled results, marshaling to JSON, and writing to a specified file. | +| `ToJUnitXML` | Converts the built claim into JUnit XML format and writes it to a given file. | + +--- + +--- + +### FailureMessage + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Text` | `string` | Field documentation | +| `Message` | `string` | Field documentation | +| `Type` | `string` | Field documentation | + +--- + +### SkippedMessage + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Text` | `string` | Field documentation | +| `Messages` | `string` | Field documentation | + +--- + +### TestCase + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Failure` | `*FailureMessage` | Field documentation | +| `Text` | `string` | Field documentation | +| `Name` | `string` | Field documentation | +| `Classname` | `string` | Field documentation | +| `Status` | `string` | Field documentation | +| `Time` | `string` | Field documentation | +| `SystemErr` | `string` | Field documentation | +| `Skipped` | `*SkippedMessage` | Field documentation | + +--- + +### TestSuitesXML + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `XMLName` | `xml.Name` | Root element name (`testsuites`) used by the encoding/xml package. | +| `Text` | `string` | Character data directly under ``; typically unused. | +| `Tests` | `string` | Total number of test cases in the suite (attribute). | +| `Disabled` | `string` | Number of tests that were disabled or skipped (attribute). | +| `Errors` | `string` | Count of error‑type failures (always `"0"` in this code). | +| `Failures` | `string` | Number of test cases that failed (attribute). | +| `Time` | `string` | Total elapsed time for the suite, formatted as a decimal number of seconds. | +| `Testsuite` | `Testsuite` | Nested `` element containing detailed information about each test case. | + +--- + +### Testsuite + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Package` | `string` | Field documentation | +| `Tests` | `string` | Field documentation | +| `Disabled` | `string` | Field documentation | +| `Skipped` | `string` | Field documentation | +| `Errors` | `string` | Field documentation | +| `Time` | `string` | Field documentation | +| `Timestamp` | `string` | Field documentation | +| `Properties` | `struct{Text string; Property []struct{Text string; Name string; Value string}}` | Field documentation | +| `Text` | `string` | Field documentation | +| `Name` | `string` | Field documentation | +| `Failures` | `string` | Field documentation | +| `Testcase` | `[]TestCase` | Field documentation | + +--- + +## Exported Functions + +### ClaimBuilder.Build + +**Build** - Completes the claim by recording its end time, aggregating check results, serialising the claim structure to JSON, writing it to a file, and logging the action. + +#### Signature (Go) + +```go +func (c *ClaimBuilder) Build(outputFile string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Completes the claim by recording its end time, aggregating check results, serialising the claim structure to JSON, writing it to a file, and logging the action. | +| **Parameters** | `outputFile string` – Path where the claim JSON should be written. | +| **Return value** | None (void). The function logs any fatal errors internally. | +| **Key dependencies** | • `time.Now()` – obtains current timestamp.
• `checksdb.GetReconciledResults()` – fetches consolidated check results.
• `MarshalClaimOutput(*claim.Root)` – serialises the claim structure to indented JSON.
• `WriteClaimOutput(string, []byte)` – writes the JSON payload to disk.
• `log.Info(...)` – records a success message. | +| **Side effects** | *Mutates the receiver’s `claimRoot.Claim.Metadata.EndTime`.
* Populates `claimRoot.Claim.Results` with reconciled data.
*Generates and writes a JSON file to `outputFile`.
* Emits an informational log entry. | +| **How it fits the package** | Acts as the terminal step of the claim‑building workflow, turning an in‑memory claim representation into a persisted artifact ready for downstream consumption or archival. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Get current time"} + B --> C["Set EndTime"] + C --> D{"Retrieve results"} + D --> E["Assign to Claim.Results"] + E --> F{"Marshal claim"} + F --> G["Write JSON file"] + G --> H["Log creation"] + H --> I["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ClaimBuilder.Build --> time.Now + func_ClaimBuilder.Build --> checksdb.GetReconciledResults + func_ClaimBuilder.Build --> MarshalClaimOutput + func_ClaimBuilder.Build --> WriteClaimOutput + func_ClaimBuilder.Build --> log.Info +``` + +#### Functions calling `ClaimBuilder.Build` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ClaimBuilder.Build +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper" +) + +func main() { + // Assume a pre‑configured ClaimBuilder instance `builder` + var builder claimhelper.ClaimBuilder + + // Build and persist the claim to a JSON file + builder.Build("/tmp/my-claim.json") +} +``` + +--- + +### ClaimBuilder.Reset + +**Reset** - Initializes or reinitializes the `StartTime` field of the underlying claim metadata with the present UTC time, ensuring that subsequent claims are stamped correctly. + +Resets the claim builder’s start time to the current UTC timestamp formatted per the package directive. + +```go +func (c *ClaimBuilder) Reset() +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Initializes or reinitializes the `StartTime` field of the underlying claim metadata with the present UTC time, ensuring that subsequent claims are stamped correctly. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | • `time.Now()` – obtains current local time.
• `UTC()` – converts to Coordinated Universal Time.
• `Format(DateTimeFormatDirective)` – formats the timestamp according to the package’s directive. | +| **Side effects** | Mutates the internal `claimRoot` structure of the receiver, specifically setting `Claim.Metadata.StartTime`. No I/O or concurrency operations are performed. | +| **How it fits the package** | The function is part of the claim construction workflow in *claimhelper*, allowing callers to reset the timestamp before building a new claim or reusing an existing builder instance. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start Reset"] --> B["Get current time with time.Now()"] + B --> C["Convert to UTC via .UTC()"] + C --> D["Format using DateTimeFormatDirective"] + D --> E["Assign result to c.claimRoot.Claim.Metadata.StartTime"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_ClaimBuilder.Reset --> func_Format + func_ClaimBuilder.Reset --> func_UTC + func_ClaimBuilder.Reset --> func_Now +``` + +#### Functions calling `ClaimBuilder.Reset` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example + +```go +// Minimal example invoking ClaimBuilder.Reset +builder := claimhelper.NewClaimBuilder() +builder.Reset() // sets StartTime to now UTC +``` + +--- + +### ClaimBuilder.ToJUnitXML + +**ToJUnitXML** - Builds a JUnit‑style XML file summarizing the test results stored in the `ClaimBuilder`’s claim root. The report includes overall statistics and individual test case details, then writes it to disk. + +#### 1) Signature (Go) + +```go +func (c *ClaimBuilder) ToJUnitXML(outputFile string, startTime, endTime time.Time) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a JUnit‑style XML file summarizing the test results stored in the `ClaimBuilder`’s claim root. The report includes overall statistics and individual test case details, then writes it to disk. | +| **Parameters** | `outputFile string – path where the XML should be written`
`startTime time.Time – timestamp marking the beginning of the test run`
`endTime time.Time – timestamp marking the end of the test run` | +| **Return value** | None (writes to file or terminates on error) | +| **Key dependencies** | • `populateXMLFromClaim(c.claimRoot.Claim, startTime, endTime)` – converts claim data into an XML‑compatible struct.
• `xml.MarshalIndent` from the standard library – serialises the struct into formatted XML.
• `log.Fatal` and `log.Info` from the internal logging package – report progress or abort on failure.
• `os.WriteFile` – writes the final payload to disk. | +| **Side effects** | *Mutates* the filesystem by creating/overwriting `outputFile`.
*Logs* informational messages and may terminate the program with `os.Exit(1)` if XML generation or file writing fails. | +| **How it fits the package** | The `claimhelper` package is responsible for converting claim data into various formats. `ToJUnitXML` is one of those conversion methods, providing a standard JUnit report that can be consumed by CI systems or other tooling. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Generate XML"} + B -->|"Success"| C["Marshal to bytes"] + B -->|"Error"| D["log.Fatal"] + C --> E{"Write file"} + E -->|"Success"| F["Done"] + E -->|"Error"| G["log.Fatal"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_ClaimBuilder.ToJUnitXML --> func_populateXMLFromClaim + func_ClaimBuilder.ToJUnitXML --> func_xml.MarshalIndent + func_ClaimBuilder.ToJUnitXML --> func_log.Fatal + func_ClaimBuilder.ToJUnitXML --> func_os.WriteFile +``` + +#### 5) Functions calling `ClaimBuilder.ToJUnitXML` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking ClaimBuilder.ToJUnitXML +package main + +import ( + "time" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper" +) + +func main() { + // Assume builder is already populated with claim data. + var builder *claimhelper.ClaimBuilder + + start := time.Now() + // ... run tests, update claim ... + end := time.Now() + + // Write a JUnit XML report to the desired location + builder.ToJUnitXML("report.xml", start, end) +} +``` + +--- + +### CreateClaimRoot + +**CreateClaimRoot** - Instantiates a `claim.Root` containing a single `claim.Claim`. The claim’s metadata records the UTC start time formatted according to `DateTimeFormatDirective`. + +Creates the initial claim structure with a timestamped start time for CertSuite reports. + +#### Signature (Go) + +```go +func CreateClaimRoot() *claim.Root +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates a `claim.Root` containing a single `claim.Claim`. The claim’s metadata records the UTC start time formatted according to `DateTimeFormatDirective`. | +| **Parameters** | none | +| **Return value** | `*claim.Root` – the newly created root object, ready for further population. | +| **Key dependencies** | • `time.Now()` – obtains current wall‑clock time.
• `time.Time.UTC()` – converts to UTC.
• `time.Time.Format(DateTimeFormatDirective)` – formats the timestamp. | +| **Side effects** | No state mutation outside of the returned object; no I/O or concurrency. | +| **How it fits the package** | Provides a foundational claim root used by builders (e.g., `NewClaimBuilder`) and test utilities to seed claim data before adding configurations, nodes, and version information. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Get current time with time.Now()"] + B --> C["Convert to UTC via .UTC()"] + C --> D["Format timestamp using DateTimeFormatDirective"] + D --> E["Create claim.Root struct with formatted StartTime"] + E --> F["Return *claim.Root"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CreateClaimRoot --> func_time.Now + func_CreateClaimRoot --> func_Time.UTC + func_CreateClaimRoot --> func_Time.Format +``` + +#### Functions calling `CreateClaimRoot` (Mermaid) + +```mermaid +graph TD + func_NewClaimBuilder --> func_CreateClaimRoot +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CreateClaimRoot +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper" +) + +func main() { + root := claimhelper.CreateClaimRoot() + fmt.Printf("Claim start time: %s\n", root.Claim.Metadata.StartTime) +} +``` + +--- + +### GenerateNodes + +**GenerateNodes** - Collects various node diagnostics (summary, CNI plugins, hardware info, CSI drivers) into a single map for inclusion in a claim file. + +#### 1) Signature (Go) + +```go +func GenerateNodes() map[string]interface{} +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Collects various node diagnostics (summary, CNI plugins, hardware info, CSI drivers) into a single map for inclusion in a claim file. | +| **Parameters** | none | +| **Return value** | `map[string]interface{}` – key/value pairs of diagnostic data. | +| **Key dependencies** | • `diagnostics.GetNodeJSON()`
• `diagnostics.GetCniPlugins()`
• `diagnostics.GetHwInfoAllNodes()`
• `diagnostics.GetCsiDriver()` | +| **Side effects** | none (pure function) | +| **How it fits the package** | Called by `NewClaimBuilder` to populate the `Claim.Nodes` field of a claim root. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + GenerateNodes --> GetNodeJSON["diagnostics.GetNodeJSON()"] + GenerateNodes --> GetCniPlugins["diagnostics.GetCniPlugins()"] + GenerateNodes --> GetHwInfoAllNodes["diagnostics.GetHwInfoAllNodes()"] + GenerateNodes --> GetCsiDriver["diagnostics.GetCsiDriver()"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_GenerateNodes --> func_GetNodeJSON + func_GenerateNodes --> func_GetCniPlugins + func_GenerateNodes --> func_GetHwInfoAllNodes + func_GenerateNodes --> func_GetCsiDriver +``` + +#### 5) Functions calling `GenerateNodes` (Mermaid) + +```mermaid +graph TD + func_NewClaimBuilder --> func_GenerateNodes +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking GenerateNodes +nodes := GenerateNodes() +fmt.Printf("Collected %d diagnostic entries\n", len(nodes)) +``` + +--- + +### GetConfigurationFromClaimFile + +**GetConfigurationFromClaimFile** - Reads a claim file, extracts the `Configurations` section, and converts it into a `*provider.TestEnvironment`. + +#### Signature (Go) + +```go +func GetConfigurationFromClaimFile(claimFileName string) (*provider.TestEnvironment, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads a claim file, extracts the `Configurations` section, and converts it into a `*provider.TestEnvironment`. | +| **Parameters** | `claimFileName string` – path to the claim file. | +| **Return value** | `(*provider.TestEnvironment, error)` – populated environment struct or an error if reading/parsing fails. | +| **Key dependencies** | • `ReadClaimFile` (file I/O)
• `UnmarshalClaim` (JSON unmarshal into `claim.Root`)
• JSON marshal/unmarshal (`encoding/json`)
• Logging via `log.Error` and `fmt.Printf` | +| **Side effects** | Writes to standard output (via `Printf`) and logs errors; does not modify global state. | +| **How it fits the package** | Core helper that converts a claim file into a configuration object used by tests in the `claimhelper` package. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Read file"} + B -->|"Success"| C["Unmarshal claim root"] + C --> D{"Marshal Configurations"} + D -->|"Success"| E["Unmarshal into env"] + E --> F["Return env, nil"] + B -->|"Error"| G["Log error & return"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetConfigurationFromClaimFile --> func_ReadClaimFile + func_GetConfigurationFromClaimFile --> func_UnmarshalClaim + func_GetConfigurationFromClaimFile --> encoding/json.Marshal + func_GetConfigurationFromClaimFile --> encoding/json.Unmarshal + func_GetConfigurationFromClaimFile --> fmt.Printf +``` + +#### Functions calling `GetConfigurationFromClaimFile` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetConfigurationFromClaimFile +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper" +) + +func main() { + env, err := claimhelper.GetConfigurationFromClaimFile("path/to/claim.json") + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + fmt.Printf("Loaded environment: %+v\n", env) +} +``` + +--- + +### MarshalClaimOutput + +**MarshalClaimOutput** - Converts a `claim.Root` value into an indented JSON byte slice for output. If serialization fails, the program terminates immediately with a fatal log message. + +#### Signature (Go) + +```go +func MarshalClaimOutput(claimRoot *claim.Root) []byte +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Converts a `claim.Root` value into an indented JSON byte slice for output. If serialization fails, the program terminates immediately with a fatal log message. | +| **Parameters** | `claimRoot *claim.Root` – pointer to the claim structure that should be marshalled. | +| **Return value** | `[]byte` – pretty‑printed JSON representation of the claim; returned only when marshalling succeeds. | +| **Key dependencies** | • `encoding/json.MarshalIndent`
• `github.com/redhat-best-practices-for-k8s/certsuite/internal/log.Logger.Fatal` | +| **Side effects** | *Fatal log and program exit on marshalling error.* No other state is mutated. | +| **How it fits the package** | Utility used by higher‑level claim construction and sanitisation routines to persist claims in a human‑readable JSON format. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Call json.MarshalIndent"} + B -- Success --> C["Return payload"] + B -- Failure --> D["log.Fatal & exit"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_MarshalClaimOutput --> func_MarshalIndent["encoding/json.MarshalIndent"] + func_MarshalClaimOutput --> func_Fatal["github.com/redhat-best-practices-for-k8s/certsuite/internal/log.Logger.Fatal"] +``` + +#### Functions calling `MarshalClaimOutput` (Mermaid) + +```mermaid +graph TD + func_ClaimBuilder_Build["ClaimBuilder.Build"] --> func_MarshalClaimOutput + func_SanitizeClaimFile["SanitizeClaimFile"] --> func_MarshalClaimOutput +``` + +#### Usage example (Go) + +```go +// Minimal example invoking MarshalClaimOutput +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/claim" +) + +func main() { + // Assume we have a populated claim.Root instance + var root *claim.Root + + // Marshal the claim to JSON bytes + payload := claimhelper.MarshalClaimOutput(root) + + fmt.Println(string(payload)) +} +``` + +--- + +--- + +### MarshalConfigurations + +**MarshalConfigurations** - Produces a JSON‑encoded byte slice of the supplied `TestEnvironment`. If the argument is nil, it falls back to the global test environment via `GetTestEnvironment()`. Any marshalling failure is reported through the logger and returned. + +#### Signature (Go) + +```go +func MarshalConfigurations(env *provider.TestEnvironment) ([]byte, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a JSON‑encoded byte slice of the supplied `TestEnvironment`. If the argument is nil, it falls back to the global test environment via `GetTestEnvironment()`. Any marshalling failure is reported through the logger and returned. | +| **Parameters** | `env *provider.TestEnvironment` – pointer to the environment to serialise (may be nil). | +| **Return value** | `([]byte, error)` – JSON bytes or an error if marshalling fails. | +| **Key dependencies** | • `github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider.GetTestEnvironment()`
• `encoding/json.Marshal`
• `github.com/redhat-best-practices-for-k8s/certsuite/internal/log.Logger.Error` | +| **Side effects** | Logs an error on marshalling failure; otherwise no state mutation. | +| **How it fits the package** | Used by claim builders to embed current test configuration into a generated claim file. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"env nil?"} + B -- Yes --> C["config = GetTestEnvironment()"] + B -- No --> D["config = env"] + C & D --> E["Marshal(config) → configurations, err"] + E --> F{"err?"} + F -- Yes --> G["log.Error(...) ; return configurations, err"] + F -- No --> H["return configurations, nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_MarshalConfigurations --> func_GetTestEnvironment + func_MarshalConfigurations --> func_Marshal + func_MarshalConfigurations --> func_Error +``` + +#### Functions calling `MarshalConfigurations` + +```mermaid +graph TD + func_NewClaimBuilder --> func_MarshalConfigurations +``` + +#### Usage example (Go) + +```go +// Minimal example invoking MarshalConfigurations +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + env := provider.GetTestEnvironment() + data, err := claimhelper.MarshalConfigurations(env) + if err != nil { + fmt.Println("Failed to marshal configurations:", err) + return + } + fmt.Printf("JSON configuration: %s\n", data) +} +``` + +--- + +### NewClaimBuilder + +**NewClaimBuilder** - Builds a `ClaimBuilder` that encapsulates all data required to generate a claim file. It marshals the test environment into JSON, unmarshals it back into a map for configuration inclusion, and populates the claim root with node information, version metadata, and configurations. + +#### Signature (Go) + +```go +func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `ClaimBuilder` that encapsulates all data required to generate a claim file. It marshals the test environment into JSON, unmarshals it back into a map for configuration inclusion, and populates the claim root with node information, version metadata, and configurations. | +| **Parameters** | `env *provider.TestEnvironment – The current test environment containing cluster and test settings.` | +| **Return value** | `(*ClaimBuilder, error) – A populated builder or an error if configuration marshalling/unmarshalling fails.` | +| **Key dependencies** | • `os.Getenv("UNIT_TEST")`
• `log.Debug()`
• `MarshalConfigurations(env)`
• `UnmarshalConfigurations(configurations, claimConfigurations)`
• `CreateClaimRoot()`
• `GenerateNodes()`
• Diagnostics helpers (`GetVersionOcClient`, `GetVersionOcp`, `GetVersionK8s`) | +| **Side effects** | No global state is modified. The function only reads environment variables, logs a debug message, and constructs new data structures in memory. | +| **How it fits the package** | It is the primary entry point for creating a claim builder used by higher‑level orchestration (e.g., `certsuite.Run`). It prepares all information that will later be marshalled into a JSON claim file or converted to JUnit XML. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Check UNIT_TEST env"] -->|"true"| B["Return minimal ClaimBuilder"] + A -->|"false"| C["Log debug message"] + C --> D["Marshal environment → configurations"] + D --> E{"Error?"} + E -->|"yes"| F["Return error"] + E -->|"no"| G["Unmarshal into claimConfigurations map"] + G --> H["Create new ClaimRoot"] + H --> I["Set Configurations, Nodes, Versions"] + I --> J["Return ClaimBuilder with root"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_NewClaimBuilder --> os.Getenv + func_NewClaimBuilder --> log.Debug + func_NewClaimBuilder --> MarshalConfigurations + func_NewClaimBuilder --> UnmarshalConfigurations + func_NewClaimBuilder --> CreateClaimRoot + func_NewClaimBuilder --> GenerateNodes + func_NewClaimBuilder --> diagnostics.GetVersionOcClient + func_NewClaimBuilder --> diagnostics.GetVersionOcp + func_NewClaimBuilder --> diagnostics.GetVersionK8s +``` + +#### Functions calling `NewClaimBuilder` + +```mermaid +graph TD + certsuite.Run --> claimhelper.NewClaimBuilder +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewClaimBuilder +env := provider.GetTestEnvironment() +builder, err := claimhelper.NewClaimBuilder(env) +if err != nil { + log.Fatalf("Failed to create claim builder: %v", err) +} +claimFilePath := "results/claim.json" +err = builder.Build(claimFilePath) +if err != nil { + log.Fatalf("Failed to build claim file: %v", err) +} +``` + +--- + +### ReadClaimFile + +**ReadClaimFile** - Loads the raw byte payload from the specified claim file. It logs success or failure and returns the data or an error. + +#### Signature (Go) + +```go +func ReadClaimFile(claimFileName string) (data []byte, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Loads the raw byte payload from the specified claim file. It logs success or failure and returns the data or an error. | +| **Parameters** | `claimFileName string` – path to the claim file on disk. | +| **Return value** | `data []byte` – file contents; `err error` – non‑nil if reading fails. | +| **Key dependencies** | • `os.ReadFile` (file I/O)
• `log.Error`, `log.Info` from internal logging package | +| **Side effects** | Writes log entries to the application logger; no state mutation beyond I/O. | +| **How it fits the package** | Core helper for other claim‑handling functions (`GetConfigurationFromClaimFile`, `SanitizeClaimFile`) that need raw file data before unmarshalling or processing. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Read file"} + B -->|"Success"| C["Log success"] + B -->|"Error"| D["Log error"] + C --> E["Return data, nil"] + D --> F["Return data (empty), err"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ReadClaimFile --> func_ReadFile + func_ReadClaimFile --> func_Error + func_ReadClaimFile --> func_Info +``` + +#### Functions calling `ReadClaimFile` (Mermaid) + +```mermaid +graph TD + func_GetConfigurationFromClaimFile --> func_ReadClaimFile + func_SanitizeClaimFile --> func_ReadClaimFile +``` + +#### Usage example (Go) + +```go +// Minimal example invoking ReadClaimFile +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper" +) + +func main() { + data, err := claimhelper.ReadClaimFile("example.claim") + if err != nil { + fmt.Printf("Failed to read claim file: %v\n", err) + return + } + fmt.Printf("Claim file contents (%d bytes)\n%s\n", len(data), string(data)) +} +``` + +--- + +### SanitizeClaimFile + +**SanitizeClaimFile** - Filters the claim file by removing any test results whose labels do not satisfy the supplied label expression. + +#### Signature (Go) + +```go +func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Filters the claim file by removing any test results whose labels do not satisfy the supplied label expression. | +| **Parameters** | `claimFileName` (string) – path to the claim JSON file; `labelsFilter` (string) – a logical expression describing which tests to keep. | +| **Return value** | The same `claimFileName` on success, or an empty string and an error if reading, unmarshalling, or filtering fails. | +| **Key dependencies** | • `ReadClaimFile` (reads the file)
• `UnmarshalClaim` (parses JSON into a `claim.Root`)
• `labels.NewLabelsExprEvaluator` (creates an evaluator for the filter)
• `identifiers.GetTestIDAndLabels` (retrieves test labels)
• `MarshalClaimOutput` & `WriteClaimOutput` (writes back the sanitized claim) | +| **Side effects** | Reads from disk, writes a potentially modified claim file back to the same path, logs progress and errors. | +| **How it fits the package** | Part of the public API used by the test runner (`Run`) after generating a claim; ensures only relevant results are kept before further processing or submission. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Read file"] + B --> C{"Error?"} + C -- Yes --> D["Log error & return"] + C -- No --> E["Unmarshal JSON"] + E --> F["Iterate over results"] + F --> G["Create label evaluator"] + G --> H{"Evaluator error?"} + H -- Yes --> I["Log error & return"] + H -- No --> J["Get labels for testID"] + J --> K{"Eval matches?"} + K -- False --> L["Delete result"] + K -- True --> M["Keep"] + F --> N["Marshal back to JSON"] + N --> O["Write file"] + O --> P["Return claimFileName"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_SanitizeClaimFile --> func_ReadClaimFile + func_SanitizeClaimFile --> func_UnmarshalClaim + func_SanitizeClaimFile --> func_NewLabelsExprEvaluator + func_SanitizeClaimFile --> func_GetTestIDAndLabels + func_SanitizeClaimFile --> func_MarshalClaimOutput + func_SanitizeClaimFile --> func_WriteClaimOutput +``` + +#### Functions calling `SanitizeClaimFile` (Mermaid) + +```mermaid +graph TD + func_Run --> func_SanitizeClaimFile +``` + +#### Usage example (Go) + +```go +// Minimal example invoking SanitizeClaimFile +package main + +import ( + "fmt" + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper" +) + +func main() { + out, err := claimhelper.SanitizeClaimFile("claims.json", "env==prod && type!=regression") + if err != nil { + log.Fatalf("sanitize failed: %v", err) + } + fmt.Printf("Sanitized file written to %s\n", out) +} +``` + +--- + +### UnmarshalClaim + +**UnmarshalClaim** - Decodes a JSON‑encoded claim into a `claim.Root` structure. If unmarshalling fails, the program logs a fatal error and exits. + +#### 1) Signature (Go) + +```go +func([]byte, *claim.Root)() +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Decodes a JSON‑encoded claim into a `claim.Root` structure. If unmarshalling fails, the program logs a fatal error and exits. | +| **Parameters** | `claimFile []byte` – raw JSON data; `claimRoot *claim.Root` – pointer to the target struct. | +| **Return value** | None (the function writes directly into the provided pointer). | +| **Key dependencies** | • `encoding/json.Unmarshal`
• `log.Fatal` from the internal logging package | +| **Side effects** | • Mutates the object pointed to by `claimRoot`.
• May terminate the process via `log.Fatal` on error. | +| **How it fits the package** | Central helper for reading claim files; used by higher‑level functions that need configuration data from a claim. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph UnmarshalClaim + A["Receive `claimFile` and `claimRoot`"] --> B["Call json.Unmarshal"] + B --> C{"Error?"} + C -- Yes --> D["log.Fatal (terminates)"] + C -- No --> E["Populate `claimRoot`"] + end +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_UnmarshalClaim --> func_encoding/json.Unmarshal + func_UnmarshalClaim --> func_log.Logger.Fatal +``` + +#### 5) Functions calling `UnmarshalClaim` (Mermaid) + +```mermaid +graph TD + func_GetConfigurationFromClaimFile --> func_UnmarshalClaim + func_SanitizeClaimFile --> func_UnmarshalClaim +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking UnmarshalClaim +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/claim" +) + +func main() { + data := []byte(`{"some":"json"}`) // replace with real claim data + var root claim.Root + UnmarshalClaim(data, &root) + // root is now populated; proceed with further logic +} +``` + +--- + +### UnmarshalConfigurations + +**UnmarshalConfigurations** - Deserialises a JSON‑encoded configuration payload into the provided `map[string]interface{}`. If deserialization fails, the function logs a fatal error and terminates the process. + +#### Signature (Go) + +```go +func UnmarshalConfigurations(configurations []byte, claimConfigurations map[string]interface{}) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Deserialises a JSON‑encoded configuration payload into the provided `map[string]interface{}`. If deserialization fails, the function logs a fatal error and terminates the process. | +| **Parameters** | - `configurations []byte` – raw JSON data.
- `claimConfigurations map[string]interface{}` – destination map to populate. | +| **Return value** | None (function has no return values). | +| **Key dependencies** | • `encoding/json.Unmarshal`
• `github.com/redhat-best-practices-for-k8s/certsuite/internal/log.Logger.Fatal` | +| **Side effects** | Mutates the supplied map with parsed data; may exit the program via a fatal log on error. No other I/O or concurrency. | +| **How it fits the package** | Used by `NewClaimBuilder` to populate claim configuration fields from a marshalled byte slice before constructing the claim root. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive configurations byte slice"] --> B["Call json.Unmarshal"] + B --> C{"Unmarshal succeeded?"} + C -- Yes --> D["Populate claimConfigurations map"] + C -- No --> E["log.Fatal error, exit process"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_UnmarshalConfigurations --> func_EncodingJSON_Unmarshal + func_UnmarshalConfigurations --> func_Log_Fatal +``` + +#### Functions calling `UnmarshalConfigurations` (Mermaid) + +```mermaid +graph TD + func_NewClaimBuilder --> func_UnmarshalConfigurations +``` + +#### Usage example (Go) + +```go +// Minimal example invoking UnmarshalConfigurations +package main + +import ( + "encoding/json" + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper" +) + +func main() { + // Example JSON configuration + data := []byte(`{"key":"value","number":42}`) + + configMap := map[string]interface{}{} + claimhelper.UnmarshalConfigurations(data, configMap) + + fmt.Printf("Parsed config: %#v\n", configMap) +} +``` + +This example demonstrates how to supply a JSON byte slice and receive the parsed configuration in a map. If `data` were malformed, the program would terminate with a fatal log message. + +--- + +### WriteClaimOutput + +**WriteClaimOutput** - Persists the serialized claim payload (`payload`) into the specified file (`claimOutputFile`). If writing fails, the program terminates with a fatal log message. + +#### Signature (Go) + +```go +func WriteClaimOutput(claimOutputFile string, payload []byte) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Persists the serialized claim payload (`payload`) into the specified file (`claimOutputFile`). If writing fails, the program terminates with a fatal log message. | +| **Parameters** | `claimOutputFile string` – Path where the claim should be written.
`payload []byte` – Serialized claim data to write. | +| **Return value** | None (the function has side‑effects only). | +| **Key dependencies** | • `log.Info` – logs normal operation
• `os.WriteFile` – writes bytes to disk
• `log.Fatal` – logs fatal error and exits on failure | +| **Side effects** | Writes a file with permissions defined by `claimFilePermissions`. On error, the program exits. | +| **How it fits the package** | Core helper used by claim construction (`ClaimBuilder.Build`) and sanitization (`SanitizeClaimFile`) to persist claim data. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B["Log Writing claim data"] + B --> C{"Write file"} + C -- Success --> D["Return"] + C -- Failure --> E["Log fatal and exit"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_WriteClaimOutput --> func_log.Info + func_WriteClaimOutput --> os.WriteFile + func_WriteClaimOutput --> func_log.Fatal +``` + +#### Functions calling `WriteClaimOutput` + +```mermaid +graph TD + ClaimBuilder.Build --> WriteClaimOutput + SanitizeClaimFile --> WriteClaimOutput +``` + +#### Usage example (Go) + +```go +// Minimal example invoking WriteClaimOutput +package main + +import ( + "log" +) + +func main() { + payload := []byte(`{"example":"data"}`) + WriteClaimOutput("claim.json", payload) +} +``` + +--- + +## Local Functions + +### populateXMLFromClaim + +**populateXMLFromClaim** - Builds a `TestSuitesXML` structure representing the results of a CNF certification claim, ready for JUnit XML marshalling. + +#### Signature (Go) + +```go +func populateXMLFromClaim(c claim.Claim, startTime, endTime time.Time) TestSuitesXML +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `TestSuitesXML` structure representing the results of a CNF certification claim, ready for JUnit XML marshalling. | +| **Parameters** | `c claim.Claim` – the claim containing test results.
`startTime time.Time` – overall test run start.
`endTime time.Time` – overall test run end. | +| **Return value** | `TestSuitesXML` – fully populated XML data structure. | +| **Key dependencies** | *`sort.Strings` – sorts test IDs.
* `strconv.Itoa`, `strconv.FormatFloat` – convert numbers to strings.
*`time.Parse`, `time.Sub`, `time.Now().UTC()` – handle timestamps and durations.
* `strings.Split` – strip nanosecond suffix from claim times. | +| **Side effects** | No external I/O or concurrency; only creates in‑memory data structures. | +| **How it fits the package** | Internally used by `ClaimBuilder.ToJUnitXML` to convert a `claim.Claim` into JUnit XML format for reporting. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Collect test IDs"] --> B["Sort IDs"] + B --> C["Initialize TestSuitesXML"] + C --> D["Count failures & skips"] + D --> E["Set suite-level counters"] + E --> F["Loop over sorted IDs"] + F --> G["Parse start/end times"] + G --> H["Compute duration"] + H --> I["Populate skipped/failure messages"] + I --> J["Append TestCase to suite"] + J --> K["Return xmlOutput"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_populateXMLFromClaim --> func_append + func_populateXMLFromClaim --> sort.Strings + func_populateXMLFromClaim --> strconv.Itoa + func_populateXMLFromClaim --> strconv.FormatFloat + func_populateXMLFromClaim --> time.Parse + func_populateXMLFromClaim --> time.Sub + func_populateXMLFromClaim --> strings.Split + func_populateXMLFromCommit --> log.Error +``` + +#### Functions calling `populateXMLFromClaim` (Mermaid) + +```mermaid +graph TD + func_ClaimBuilder_ToJUnitXML --> func_populateXMLFromClaim +``` + +#### Usage example (Go) + +```go +// Minimal example invoking populateXMLFromClaim +import ( + "time" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claimhelper/claim" +) + +func main() { + // Assume `c` is a populated claim.Claim + var c claim.Claim + start := time.Now() + // ... run tests, populate c ... + end := time.Now() + + xmlData := claimhelper.populateXMLFromClaim(c, start, end) + // xmlData can now be marshalled to XML and written to file. +} +``` + +--- diff --git a/docs/pkg/collector/collector.md b/docs/pkg/collector/collector.md new file mode 100644 index 000000000..0e5fd68fc --- /dev/null +++ b/docs/pkg/collector/collector.md @@ -0,0 +1,353 @@ +# Package collector + +**Path**: `pkg/collector` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [SendClaimFileToCollector](#sendclaimfiletocollector) +- [Local Functions](#local-functions) + - [addClaimFileToPostRequest](#addclaimfiletopostrequest) + - [addVarFieldsToPostRequest](#addvarfieldstopostrequest) + - [createSendToCollectorPostRequest](#createsendtocollectorpostrequest) + +## Overview + +The collector package builds and transmits an HTTP POST request that uploads a claim file together with execution metadata to a collector service. It is used by Certsuite when reporting the results of a certification run. + +### Key Features + +- Creates a multipart/form-data request containing the claim file, executed_by, partner_name, and decoded_password fields +- Handles timeouts via the collectorUploadTimeout constant and encapsulates request construction and sending logic +- Separates request creation from execution to allow reuse or testing of individual steps + +### Design Notes + +- The package deliberately splits request assembly (addClaimFileToPostRequest, addVarFieldsToPostRequest) from the high‑level SendClaimFileToCollector function for easier unit testing and potential future extensions +- There is no retry logic; a single HTTP attempt is made which may be insufficient in flaky network environments +- Best practice: validate file existence before calling SendClaimFileToCollector and handle returned errors appropriately + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func SendClaimFileToCollector(endPoint, claimFilePath, executedBy, partnerName, password string) error](#sendclaimfiletocollector) | Builds and sends an HTTP `POST` request containing the claim file and associated metadata to a collector service. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func addClaimFileToPostRequest(w *multipart.Writer, claimFilePath string) error](#addclaimfiletopostrequest) | Reads the file at `claimFilePath` and appends it to a multipart form writer as a file field named “claimFile”. | +| [func (*multipart.Writer, string, string, string)(error)](#addvarfieldstopostrequest) | Appends three text fields (`executed_by`, `partner_name`, `decoded_password`) to a multipart form‑data writer. | +| [func createSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partnerName, password string) (*http.Request, error)](#createsendtocollectorpostrequest) | Constructs an `*http.Request` that posts a claim file along with metadata (executed by, partner name, decoded password) to the collector endpoint. | + +## Exported Functions + +### SendClaimFileToCollector + +**SendClaimFileToCollector** - Builds and sends an HTTP `POST` request containing the claim file and associated metadata to a collector service. + +#### Signature (Go) + +```go +func SendClaimFileToCollector(endPoint, claimFilePath, executedBy, partnerName, password string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds and sends an HTTP `POST` request containing the claim file and associated metadata to a collector service. | +| **Parameters** | `endPoint string` – URL of the collector; `claimFilePath string` – local path to the claim file; `executedBy string` – user/agent that ran the test; `partnerName string` – name of the partner; `password string` – authentication credential for the collector. | +| **Return value** | `error` – non‑nil if request creation or execution fails. | +| **Key dependencies** | • `createSendToCollectorPostRequest` (constructs multipart form data)
• `http.Client.Do` (executes request)
• `http.Client{Timeout: collectorUploadTimeout}` (configures client) | +| **Side effects** | Performs network I/O; does not modify global state or the file system. | +| **How it fits the package** | Part of the `collector` sub‑package, used by higher‑level test orchestration to transmit results to an external collector service. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Build multipart POST request via createSendToCollectorPostRequest"] + B --> C["Create HTTP client with timeout"] + C --> D["Execute request using client.Do"] + D --> E["Close response body"] + E --> F["Return nil or error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_SendClaimFileToCollector --> func_createSendToCollectorPostRequest + func_SendClaimFileToCollector --> func_Do + func_SendClaimFileToCollector --> func_Close +``` + +#### Functions calling `SendClaimFileToCollector` (Mermaid) + +```mermaid +graph TD + func_Run --> func_SendClaimFileToCollector +``` + +#### Usage example (Go) + +```go +// Minimal example invoking SendClaimFileToCollector +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/collector" +) + +func main() { + endpoint := "https://example.com/collect" + filePath := "/tmp/claim.json" + executedBy := "certsuite-runner" + partnerName := "AcmeCorp" + password := "secret" + + if err := collector.SendClaimFileToCollector(endpoint, filePath, executedBy, partnerName, password); err != nil { + log.Fatalf("Failed to send claim: %v", err) + } +} +``` + +--- + +--- + +## Local Functions + +### addClaimFileToPostRequest + +**addClaimFileToPostRequest** - Reads the file at `claimFilePath` and appends it to a multipart form writer as a file field named “claimFile”. + +#### Signature (Go) + +```go +func addClaimFileToPostRequest(w *multipart.Writer, claimFilePath string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads the file at `claimFilePath` and appends it to a multipart form writer as a file field named “claimFile”. | +| **Parameters** | *`w`* – pointer to `multipart.Writer`; *`claimFilePath`* – path of the claim file to upload. | +| **Return value** | `error` – non‑nil if opening the file, creating the form part, or copying data fails. | +| **Key dependencies** | • `os.Open` – opens the claim file.
• `w.CreateFormFile` – creates a new multipart form field for the file.
• `io.Copy` – streams the file contents into the form writer.
• `defer f.Close()` – ensures the file descriptor is released. | +| **Side effects** | I/O: opens and reads a file; writes its bytes to the provided `multipart.Writer`. No global state mutation. | +| **How it fits the package** | Used by `createSendToCollectorPostRequest` to embed a claim file in an HTTP POST request sent to the collector endpoint. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + OpenFile --> CreateFormField + CreateFormField --> CopyContents + CopyContents --> ReturnSuccess +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_addClaimFileToPostRequest --> os.Open + func_addClaimFileToPostRequest --> multipart.Writer.CreateFormFile + func_addClaimFileToPostRequest --> io.Copy +``` + +#### Functions calling `addClaimFileToPostRequest` (Mermaid) + +```mermaid +graph TD + createSendToCollectorPostRequest --> addClaimFileToPostRequest +``` + +#### Usage example (Go) + +```go +// Minimal example invoking addClaimFileToPostRequest +var buf bytes.Buffer +w := multipart.NewWriter(&buf) + +// Path to the claim file on disk +claimPath := "/tmp/claim.json" + +if err := addClaimFileToPostRequest(w, claimPath); err != nil { + log.Fatalf("failed to attach claim file: %v", err) +} +w.Close() + +// buf now contains a multipart/form-data body that can be used in an HTTP request +``` + +--- + +### addVarFieldsToPostRequest + +**addVarFieldsToPostRequest** - Appends three text fields (`executed_by`, `partner_name`, `decoded_password`) to a multipart form‑data writer. + +#### 1) Signature (Go) + +```go +func (*multipart.Writer, string, string, string)(error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Appends three text fields (`executed_by`, `partner_name`, `decoded_password`) to a multipart form‑data writer. | +| **Parameters** | `w *multipart.Writer` – the multipart writer to write into.
`executedBy string` – value for “executed_by”.
`partnerName string` – value for “partner_name”.
`password string` – value for “decoded_password”. | +| **Return value** | `error` – non‑nil if any form field creation or write fails. | +| **Key dependencies** | • `w.CreateFormField(name)`
• `fw.Write([]byte(value))` | +| **Side effects** | Writes three fields to the provided multipart writer; does not close the writer. | +| **How it fits the package** | Used by `createSendToCollectorPostRequest` to populate metadata needed by the collector endpoint. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Create form field executed_by"] --> B{"Write executedBy"} + B -->|"success"| C["Create form field partner_name"] + C --> D{"Write partnerName"} + D -->|"success"| E["Create form field decoded_password"] + E --> F{"Write password"} + F -->|"success"| G["Return nil"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_addVarFieldsToPostRequest --> func_CreateFormField + func_addVarFieldsToPostRequest --> func_Write +``` + +#### 5) Functions calling `addVarFieldsToPostRequest` (Mermaid) + +```mermaid +graph TD + func_createSendToCollectorPostRequest --> func_addVarFieldsToPostRequest +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking addVarFieldsToPostRequest +package main + +import ( + "bytes" + "mime/multipart" +) + +func main() { + var buf bytes.Buffer + w := multipart.NewWriter(&buf) + + err := addVarFieldsToPostRequest(w, "alice", "partnerX", "s3cr3t") + if err != nil { + panic(err) + } + // Remember to close the writer before using the buffer. + _ = w.Close() +} +``` + +--- + +### createSendToCollectorPostRequest + +**createSendToCollectorPostRequest** - Constructs an `*http.Request` that posts a claim file along with metadata (executed by, partner name, decoded password) to the collector endpoint. + +```go +func createSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partnerName, password string) (*http.Request, error) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Constructs an `*http.Request` that posts a claim file along with metadata (executed by, partner name, decoded password) to the collector endpoint. | +| **Parameters** | `endPoint string` – URL of the collector service.
`claimFilePath string` – Local path to the claim file.
`executedBy string` – Identifier of the user/process performing the upload.
`partnerName string` – Name of the partner organization.
`password string` – Decoded password for authentication. | +| **Return value** | `*http.Request` containing the multipart form data, or an error if any step fails. | +| **Key dependencies** | • `mime/multipart.NewWriter` – creates a writer for multipart content.
• `addClaimFileToPostRequest` – inserts the claim file into the form.
• `addVarFieldsToPostRequest` – adds textual fields to the form.
• `http.NewRequest` – builds the HTTP request.
• `w.FormDataContentType()` – sets the appropriate `Content-Type`. | +| **Side effects** | No global state is modified. The function writes to a local buffer and returns the constructed request; no network I/O occurs until the caller executes it. | +| **How it fits the package** | This helper encapsulates the repetitive logic of preparing multipart POST bodies used by `SendClaimFileToCollector`. It isolates file handling, field addition, and header configuration, promoting reuse within the collector client. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Create buffer"} + B --> C["multipart.NewWriter(&buffer)"] + C --> D["addClaimFileToPostRequest"] + D --> E["addVarFieldsToPostRequest"] + E --> F["w.Close()"] + F --> G["http.NewRequest(POST, endPoint, &buffer)"] + G --> H["Set Content-Type header"] + H --> I["Return request or error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_createSendToCollectorPostRequest --> mime/multipart.NewWriter + func_createSendToCollectorPostRequest --> addClaimFileToPostRequest + func_createSendToCollectorPostRequest --> addVarFieldsToPostRequest + func_createSendToCollectorPostRequest --> http.NewRequest + func_createSendToCollectorPostRequest --> FormDataContentType +``` + +#### Functions calling `createSendToCollectorPostRequest` + +```mermaid +graph TD + SendClaimFileToCollector --> createSendToCollectorPostRequest +``` + +#### Usage example (Go) + +```go +// Minimal example invoking createSendToCollectorPostRequest +package main + +import ( + "log" + "net/http" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/collector" +) + +func main() { + endPoint := "https://collector.example.com/upload" + claimFilePath := "/tmp/claim.zip" + executedBy := "admin" + partnerName := "ExamplePartner" + password := "secret" + + req, err := collector.CreateSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partnerName, password) + if err != nil { + log.Fatalf("Failed to create request: %v", err) + } + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + log.Fatalf("Upload failed: %v", err) + } + defer resp.Body.Close() + + log.Printf("Response status: %s", resp.Status) +} +``` + +*Note:* The exported name `CreateSendToCollectorPostRequest` is used in the example to illustrate public access; internally, the function is unexported (`createSendToCollectorPostRequest`). + +--- diff --git a/docs/pkg/compatibility/compatibility.md b/docs/pkg/compatibility/compatibility.md new file mode 100644 index 000000000..c1ef41e21 --- /dev/null +++ b/docs/pkg/compatibility/compatibility.md @@ -0,0 +1,522 @@ +# Package compatibility + +**Path**: `pkg/compatibility` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [VersionInfo](#versioninfo) +- [Exported Functions](#exported-functions) + - [BetaRHCOSVersionsFoundToMatch](#betarhcosversionsfoundtomatch) + - [DetermineOCPStatus](#determineocpstatus) + - [FindMajorMinor](#findmajorminor) + - [GetLifeCycleDates](#getlifecycledates) + - [IsRHCOSCompatible](#isrhcoscompatible) + - [IsRHELCompatible](#isrhelcompatible) + +## Overview + +The compatibility package supplies utilities for determining whether a given RHCOS or RHEL machine version can run on an OpenShift (OCP) release and classifies OCP releases into lifecycle states (Pre‑GA, GA, Maintenance Support, EOL). It also exposes helper functions for extracting major‑minor versions and mapping static lifecycle dates. + +### Key Features + +- Classifies OCP releases into lifecycle stages via DetermineOCPStatus using a predefined date map +- Provides compatibility checks for RHCOS and RHEL machine versions against an OpenShift release +- Utility to trim semantic versions to major.minor components + +### Design Notes + +- Lifecycle data is stored in a static map; adding new releases requires updating the map +- Beta releases are handled specially: they must match exactly on major‑minor before being considered compatible +- Compatibility checks use hashicorp/go-version for robust version comparisons + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**VersionInfo**](#versioninfo) | One-line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion string) bool](#betarhcosversionsfoundtomatch) | Determines if the supplied machine and OCP versions are identical beta releases, using only their major‑minor components. | +| [func DetermineOCPStatus(version string, date time.Time) string](#determineocpstatus) | Classifies an OpenShift release (`version`) into one of four lifecycle states—Pre‑GA, GA, Maintenance Support (MS), or End‑of‑Life (EOL)—based on the supplied `date`. Returns `OCPStatusUnknown` if inputs are invalid or no matching lifecycle data exists. | +| [func FindMajorMinor(version string) string](#findmajorminor) | Returns a string containing only the major and minor parts of a semantic‑style version, e.g. `"4.12"` from `"4.12.3"`. | +| [func GetLifeCycleDates() map[string]VersionInfo](#getlifecycledates) | Returns the static mapping (`ocpLifeCycleDates`) that contains lifecycle dates and related metadata for each OpenShift version. | +| [func IsRHCOSCompatible(machineVersion, ocpVersion string) bool](#isrhcoscompatible) | Determines whether the supplied RHCOS machine version can run on a given OpenShift (OCP) release. It handles normal releases, beta versions, and validates against lifecycle data. | +| [func IsRHELCompatible(machineVersion, ocpVersion string) bool](#isrhelcompatible) | Checks whether the supplied RHEL `machineVersion` is compatible with the specified OpenShift (`ocpVersion`) based on internal lifecycle data. | + +## Structs + +### VersionInfo + +Represents lifecycle dates and supported operating system versions for a specific product release. + +#### Fields + +| Field | Type | Description | +|----------------------|---------------|-------------| +| `GADate` | `time.Time` | General Availability Date – the date the product was first publicly released. | +| `FSEDate` | `time.Time` | Full Support Ends Date – the last day full support is provided. | +| `MSEDate` | `time.Time` | Maintenance Support Ends Date – the final day maintenance updates are issued. | +| `MinRHCOSVersion` | `string` | Minimum RHCOS (Red Hat CoreOS) version that can run this release. | +| `RHELVersionsAccepted` | `[]string` | List of supported RHEL versions, expressed either as specific releases or ranges such as “7.9 or later” or “7.9 and 8.4”. | + +#### Purpose + +`VersionInfo` holds the key dates for a product’s support lifecycle along with the operating‑system compatibility constraints. It is used to determine whether a given installation falls within an active support window and which OS versions are allowed. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetLifeCycleDates()` | Returns a map of release identifiers to their corresponding `VersionInfo` objects, providing access to the lifecycle data for all supported releases. | + +--- + +## Exported Functions + +### BetaRHCOSVersionsFoundToMatch + +**BetaRHCOSVersionsFoundToMatch** - Determines if the supplied machine and OCP versions are identical beta releases, using only their major‑minor components. + +Checks whether two beta RHCOS version strings match after normalising to major.minor form. + +```go +func BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion string) bool +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if the supplied machine and OCP versions are identical beta releases, using only their major‑minor components. | +| **Parameters** | `machineVersion` (string) – version of the node; `ocpVersion` (string) – version of the OpenShift cluster. | +| **Return value** | `bool` – true if both versions exist in the beta list and match after trimming to major.minor, otherwise false. | +| **Key dependencies** | • `FindMajorMinor(version string)` – extracts major.minor.
• `stringhelper.StringInSlice(s []T, str T, containsCheck bool)` – checks membership in the beta‑version slice. | +| **Side effects** | None; pure function with no I/O or state mutation. | +| **How it fits the package** | Supports compatibility checks for RHCOS by handling special cases where beta releases should be considered equivalent to their corresponding OCP beta releases. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Normalise ocpVersion"} + B --> C["ocpVersion = FindMajorMinor(ocpVersion)"] + C --> D{"Normalise machineVersion"} + D --> E["machineVersion = FindMajorMinor(machineVersion)"] + E --> F{"Check beta list membership"} + F -->|"Both present"| G{"Compare versions"} + G --> H{"Match?"} + H -- Yes --> I["Return true"] + H -- No --> J["Return false"] + F -- Missing --> K["Return false"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_BetaRHCOSVersionsFoundToMatch --> func_FindMajorMinor + func_BetaRHCOSVersionsFoundToMatch --> func_StringInSlice +``` + +#### Functions calling `BetaRHCOSVersionsFoundToMatch` + +```mermaid +graph TD + func_IsRHCOSCompatible --> func_BetaRHCOSVersionsFoundToMatch +``` + +#### Usage example + +```go +// Minimal example invoking BetaRHCOSVersionsFoundToMatch +import "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility" + +func main() { + machineVer := "4.12.0-rc2" + ocpVer := "4.12.0-rc2" + + if compatibility.BetaRHCOSVersionsFoundToMatch(machineVer, ocpVer) { + println("Beta versions match") + } else { + println("Beta versions do not match") + } +} +``` + +--- + +### DetermineOCPStatus + +**DetermineOCPStatus** - Classifies an OpenShift release (`version`) into one of four lifecycle states—Pre‑GA, GA, Maintenance Support (MS), or End‑of‑Life (EOL)—based on the supplied `date`. Returns `OCPStatusUnknown` if inputs are invalid or no matching lifecycle data exists. + +#### Signature (Go) + +```go +func DetermineOCPStatus(version string, date time.Time) string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Classifies an OpenShift release (`version`) into one of four lifecycle states—Pre‑GA, GA, Maintenance Support (MS), or End‑of‑Life (EOL)—based on the supplied `date`. Returns `OCPStatusUnknown` if inputs are invalid or no matching lifecycle data exists. | +| **Parameters** | `version string –` OpenShift release in *major.minor.patch* form.
`date time.Time –` The date to compare against lifecycle milestones. | +| **Return value** | `string –` One of the constants: `OCPStatusPreGA`, `OCPStatusGA`, `OCPStatusMS`, `OCPStatusEOL`, or `OCPStatusUnknown`. | +| **Key dependencies** | • `strings.Split`
• `GetLifeCycleDates()`
• `time.Time.IsZero`, `Before`, `Equal`, `After`
• `IsZero` helper (from standard library) | +| **Side effects** | No mutation of external state; purely functional. | +| **How it fits the package** | Provides lifecycle‑aware decision logic used by autodiscovery to label clusters with their OpenShift support status. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Validate inputs"] --> B{"Empty or zero?"} + B -- Yes --> C["Return OCPStatusUnknown"] + B -- No --> D["Split version into major.minor"] + D --> E["Lookup lifecycleDates map"] + E -- Not found --> F["Return OCPStatusUnknown"] + E -- Found --> G["Ensure FSEDate fallback to MSEDate if missing"] + G --> H{"date < GADate?"} + H -- Yes --> I["Return OCPStatusPreGA"] + H -- No --> J{"GADate <= date < FSEDate?"} + J -- Yes --> K["Return OCPStatusGA"] + J -- No --> L{"FSEDate <= date < MSEDate?"} + L -- Yes --> M["Return OCPStatusMS"] + L -- No --> N["Return OCPStatusEOL"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_DetermineOCPStatus --> strings.Split + func_DetermineOCPStatus --> GetLifeCycleDates + func_DetermineOCPStatus --> time.Time.IsZero + func_DetermineOCPStatus --> time.Time.Before + func_DetermineOCPStatus --> time.Time.Equal + func_DetermineOCPStatus --> time.Time.After +``` + +#### Functions calling `DetermineOCPStatus` + +```mermaid +graph TD + DoAutoDiscover --> DetermineOCPStatus +``` + +#### Usage example (Go) + +```go +// Minimal example invoking DetermineOCPStatus +import ( + "fmt" + "time" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility" +) + +func main() { + version := "4.12.0" + date := time.Date(2023, 7, 1, 0, 0, 0, 0, time.UTC) + status := compatibility.DetermineOCPStatus(version, date) + fmt.Println("OpenShift", version, "status:", status) +} +``` + +--- + +### FindMajorMinor + +**FindMajorMinor** - Returns a string containing only the major and minor parts of a semantic‑style version, e.g. `"4.12"` from `"4.12.3"`. + +#### Signature (Go) + +```go +func FindMajorMinor(version string) string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a string containing only the major and minor parts of a semantic‑style version, e.g. `"4.12"` from `"4.12.3"`. | +| **Parameters** | `version` string – a dotted version such as `"X.Y.Z"`. | +| **Return value** | A new string formatted as `"X.Y"`, where X is the major and Y the minor component. | +| **Key dependencies** | * `strings.Split` – splits the input on dots to isolate components. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Normalises version strings for compatibility checks (used by functions like `IsRHCOSCompatible`). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Receive input string"] --> B{"Split on ."} + B --> C["Take first two elements"] + C --> D["Concatenate with ."] + D --> E["Return result"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_FindMajorMinor --> func_Split +``` + +#### Functions calling `FindMajorMinor` + +```mermaid +graph TD + func_BetaRHCOSVersionsFoundToMatch --> func_FindMajorMinor + func_IsRHCOSCompatible --> func_FindMajorMinor +``` + +#### Usage example (Go) + +```go +// Minimal example invoking FindMajorMinor +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility" +) + +func main() { + version := "4.12.3" + fmt.Println(compatibility.FindMajorMinor(version)) // Output: 4.12 +} +``` + +--- + +### GetLifeCycleDates + +**GetLifeCycleDates** - Returns the static mapping (`ocpLifeCycleDates`) that contains lifecycle dates and related metadata for each OpenShift version. + +#### Signature (Go) + +```go +func GetLifeCycleDates() map[string]VersionInfo +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the static mapping (`ocpLifeCycleDates`) that contains lifecycle dates and related metadata for each OpenShift version. | +| **Parameters** | None | +| **Return value** | `map[string]VersionInfo` – keys are “major.minor” strings (e.g., `"4.12"`) and values hold dates such as GA, FSE, MSE along with supported RHEL/RHCOS versions. | +| **Key dependencies** | *Uses the package‑level variable `ocpLifeCycleDates`. No external calls.* | +| **Side effects** | None; purely read‑only data access. | +| **How it fits the package** | Provides a central lookup for lifecycle dates used by other compatibility checks (e.g., determining OCP status or verifying RHEL/RHCOS support). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetLifeCycleDates --> Return(ocpLifeCycleDates) +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `GetLifeCycleDates` (Mermaid) + +```mermaid +graph TD + func_DetermineOCPStatus --> func_GetLifeCycleDates + func_IsRHCOSCompatible --> func_GetLifeCycleDates + func_IsRHELCompatible --> func_GetLifeCycleDates +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetLifeCycleDates +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility" +) + +func main() { + lc := compatibility.GetLifeCycleDates() + // Print the GA date for OpenShift 4.12, if available. + if info, ok := lc["4.12"]; ok { + fmt.Printf("GA date for 4.12: %s\n", info.GADate) + } else { + fmt.Println("Lifecycle data for 4.12 not found.") + } +} +``` + +--- + +### IsRHCOSCompatible + +**IsRHCOSCompatible** - Determines whether the supplied RHCOS machine version can run on a given OpenShift (OCP) release. It handles normal releases, beta versions, and validates against lifecycle data. + +#### Signature (Go) + +```go +func IsRHCOSCompatible(machineVersion, ocpVersion string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether the supplied RHCOS machine version can run on a given OpenShift (OCP) release. It handles normal releases, beta versions, and validates against lifecycle data. | +| **Parameters** | `machineVersion string` – RHCOS version reported by the node.
`ocpVersion string` – OCP cluster version to compare against. | +| **Return value** | `bool` – `true` if the machine version is allowed for the specified OpenShift release; otherwise `false`. | +| **Key dependencies** | • `BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion)`
• `FindMajorMinor(ocpVersion)`
• `GetLifeCycleDates()`
• `gv.NewVersion` (github.com/hashicorp/go-version)
• `log.Error` (internal logger) | +| **Side effects** | None – pure comparison logic; only logs errors via the internal logger. | +| **How it fits the package** | Core compatibility check used by tests and runtime validation to enforce that control‑plane nodes run RHCOS compatible with the cluster release. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Validate inputs"} + B -- invalid --> C["Return false"] + B -- valid --> D{"Beta check"} + D -- true --> E["Return true"] + D -- false --> F["Parse OCP version"] + F --> G{"Lifecycle lookup"} + G -- not found --> H["Return false"] + G -- found --> I["Create gv.Version objects"] + I --> J{"Compare machine ≥ minRHCOS"} + J -- true --> K["Return true"] + J -- false --> L["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_IsRHCOSCompatible --> func_BetaRHCOSVersionsFoundToMatch + func_IsRHCOSCompatible --> func_FindMajorMinor + func_IsRHCOSCompatible --> func_GetLifeCycleDates + func_IsRHCOSCompatible --> gv.NewVersion + func_IsRHCOSCompatible --> log.Error +``` + +#### Functions calling `IsRHCOSCompatible` (Mermaid) + +```mermaid +graph TD + testNodeOperatingSystemStatus --> IsRHCOSCompatible +``` + +#### Usage example (Go) + +```go +// Minimal example invoking IsRHCOSCompatible +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility" +) + +func main() { + machine := "4.12.1" // RHCOS node version + cluster := "4.12.0" // OpenShift release + + if compatibility.IsRHCOSCompatible(machine, cluster) { + fmt.Println("Node is compatible with the cluster.") + } else { + fmt.Println("Node is NOT compatible with the cluster.") + } +} +``` + +--- + +--- + +### IsRHELCompatible + +**IsRHELCompatible** - Checks whether the supplied RHEL `machineVersion` is compatible with the specified OpenShift (`ocpVersion`) based on internal lifecycle data. + +#### 1) Signature (Go) + +```go +func IsRHELCompatible(machineVersion, ocpVersion string) bool +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the supplied RHEL `machineVersion` is compatible with the specified OpenShift (`ocpVersion`) based on internal lifecycle data. | +| **Parameters** | `machineVersion string –` RHEL version to validate (e.g., `"8.6"`).
`ocpVersion string –` OpenShift release identifier (e.g., `"4.12"`). | +| **Return value** | `bool` – `true` if the machine version is supported for that OpenShift release, otherwise `false`. | +| **Key dependencies** | • `GetLifeCycleDates()` – fetches lifecycle mapping.
• `len()` – checks slice length.
• `github.com/hashicorp/go-version.NewVersion` – parses semantic versions.
• `GreaterThanOrEqual` method of the parsed version. | +| **Side effects** | None; pure function with no I/O or state mutation. | +| **How it fits the package** | Provides core compatibility logic used by tests and potentially runtime checks for node operating systems within CertSuite’s compatibility module. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check empty inputs"] -->|"false"| B{"Retrieve lifecycle info"} + B --> C{"OCP version present?"} + C -->|"yes"| D{"≥2 accepted RHEL versions?"} + D -->|"yes"| E["Exact match loop"] + E --> F["Return true if found"] + D -->|"no"| G["Parse machine & entry versions"] + G --> H["Compare mv >= ev"] + H --> I["Return comparison result"] + C -->|"no"| J["Return false"] + A --> K["Return false"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_IsRHELCompatible --> func_GetLifeCycleDates + func_IsRHELCompatible --> func_len + func_IsRHELCompatible --> gv_NewVersion + func_IsRHELCompatible --> func_GreaterThanOrEqual +``` + +#### 5) Functions calling `IsRHELCompatible` (Mermaid) + +```mermaid +graph TD + testNodeOperatingSystemStatus --> IsRHELCompatible +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking IsRHELCompatible +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/compatibility" +) + +func main() { + machine := "8.6" + ocp := "4.12" + + if compatibility.IsRHELCompatible(machine, ocp) { + fmt.Printf("RHEL %s is compatible with OpenShift %s\n", machine, ocp) + } else { + fmt.Printf("RHEL %s is NOT compatible with OpenShift %s\n", machine, ocp) + } +} +``` + +--- diff --git a/docs/pkg/configuration/configuration.md b/docs/pkg/configuration/configuration.md new file mode 100644 index 000000000..39e4ea257 --- /dev/null +++ b/docs/pkg/configuration/configuration.md @@ -0,0 +1,422 @@ +# Package configuration + +**Path**: `pkg/configuration` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [AcceptedKernelTaintsInfo](#acceptedkerneltaintsinfo) + - [ConnectAPIConfig](#connectapiconfig) + - [CrdFilter](#crdfilter) + - [ManagedDeploymentsStatefulsets](#manageddeploymentsstatefulsets) + - [Namespace](#namespace) + - [SkipHelmChartList](#skiphelmchartlist) + - [SkipScalingTestDeploymentsInfo](#skipscalingtestdeploymentsinfo) + - [SkipScalingTestStatefulSetsInfo](#skipscalingteststatefulsetsinfo) + - [TestConfiguration](#testconfiguration) + - [TestParameters](#testparameters) +- [Exported Functions](#exported-functions) + - [GetTestParameters](#gettestparameters) + - [LoadConfiguration](#loadconfiguration) + +## Overview + +The configuration package centralizes reading, parsing and caching test‑suite settings from a YAML file and exposes them through read‑only accessors for the rest of CertSuite. + +### Key Features + +- Loads and unmarshals a YAML config into structured types, applying defaults on first load +- Caches the parsed configuration so repeated calls are cheap +- Provides global read‑only access to runtime parameters via GetTestParameters + +### Design Notes + +- Configuration is loaded lazily; subsequent calls return the cached instance, ensuring consistency across packages +- The package deliberately hides mutable globals (confLoaded, configuration) to prevent accidental changes +- Users should call LoadConfiguration once early in the program and then rely on GetTestParameters for read‑only access + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**AcceptedKernelTaintsInfo**](#acceptedkerneltaintsinfo) | Struct definition | +| [**ConnectAPIConfig**](#connectapiconfig) | Struct definition | +| [**CrdFilter**](#crdfilter) | Struct definition | +| [**ManagedDeploymentsStatefulsets**](#manageddeploymentsstatefulsets) | One-line purpose | +| [**Namespace**](#namespace) | One-line purpose | +| [**SkipHelmChartList**](#skiphelmchartlist) | Struct definition | +| [**SkipScalingTestDeploymentsInfo**](#skipscalingtestdeploymentsinfo) | Struct definition | +| [**SkipScalingTestStatefulSetsInfo**](#skipscalingteststatefulsetsinfo) | Struct definition | +| [**TestConfiguration**](#testconfiguration) | One-line purpose | +| [**TestParameters**](#testparameters) | One‑line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GetTestParameters() *TestParameters](#gettestparameters) | Provides read‑only access to the global `parameters` instance that holds all runtime options for the test suite. | +| [func LoadConfiguration(filePath string) (TestConfiguration, error)](#loadconfiguration) | Reads a YAML configuration file, unmarshals it into `TestConfiguration`, applies defaults, and caches the result so subsequent calls return the same instance. | + +## Structs + +### AcceptedKernelTaintsInfo + + +**Purpose**: AcceptedKernelTaintsInfo contains all certified operator request info + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Module` | `string` | Field documentation | + +--- + +### ConnectAPIConfig + + +**Purpose**: ConnectAPIConfig contains the configuration for the Red Hat Connect API + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `APIKey` | `string` | Field documentation | +| `ProjectID` | `string` | Field documentation | +| `BaseURL` | `string` | Field documentation | +| `ProxyURL` | `string` | Field documentation | +| `ProxyPort` | `string` | Field documentation | + +--- + +### CrdFilter + + +**Purpose**: CrdFilter defines a CustomResourceDefinition config filter. + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Scalable` | `bool` | Field documentation | +| `NameSuffix` | `string` | Field documentation | + +--- + +### ManagedDeploymentsStatefulsets + +A lightweight representation of a StatefulSet managed by CertSuite, identified by its Kubernetes name. + +#### Fields + +| Field | Type | Description | +|-------|--------|-------------| +| Name | string | The fully qualified or namespace‑scoped name of the StatefulSet. It is used to locate and reference the resource within Kubernetes API calls. | + +#### Purpose + +`ManagedDeploymentsStatefulsets` encapsulates a single StatefulSet that CertSuite will monitor, configure, or modify. By storing only the name, it keeps the configuration minimal while still enabling precise targeting of resources in the cluster. + +#### Related functions (if any) + +| Function | Purpose | +|----------|---------| +| *none* | No direct methods are defined for this struct. | + +--- + +### Namespace + +#### Fields + +| Field | Type | Description | +|-------|--------|-------------| +| Name | string | The namespace name as used in Kubernetes and referenced by YAML/JSON configuration. | + +#### Purpose + +The `Namespace` struct represents a single namespace declaration within the CertSuite configuration. It holds only the namespace identifier, which is serialized/deserialized with `yaml:"name"` and `json:"name"`. This struct is typically instantiated when parsing a configuration file to determine where certificates or resources should be applied. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| *(none)* | | + +--- + +--- + +### SkipHelmChartList + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Name` | `string` | Field documentation | + +--- + +### SkipScalingTestDeploymentsInfo + + +**Purpose**: SkipScalingTestDeploymentsInfo contains a list of names of deployments that should be skipped by the scaling tests to prevent issues + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Name` | `string` | Field documentation | +| `Namespace` | `string` | Field documentation | + +--- + +### SkipScalingTestStatefulSetsInfo + + +**Purpose**: SkipScalingTestStatefulSetsInfo contains a list of names of statefulsets that should be skipped by the scaling tests to prevent issues + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Name` | `string` | Field documentation | +| `Namespace` | `string` | Field documentation | + +--- + +### TestConfiguration + +The configuration holder for test‑execution parameters, including namespaces, labels, CRD filters, deployment states, kernel taint acceptance rules, collector credentials, and API settings. + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `TargetNameSpaces` | `[]Namespace` | Namespaces targeted during testing. | +| `PodsUnderTestLabels` | `[]string` | Labels that identify pods to be examined. | +| `OperatorsUnderTestLabels` | `[]string` | Labels identifying operators involved in the test. | +| `CrdFilters` | `[]CrdFilter` | Filters applied to Custom Resource Definitions during discovery. | +| `ManagedDeployments` | `[]ManagedDeploymentsStatefulsets` | State information for managed deployments (including statefulsets). | +| `ManagedStatefulsets` | `[]ManagedDeploymentsStatefulsets` | Dedicated list of statefulset deployment states. | +| `AcceptedKernelTaints` | `[]AcceptedKernelTaintsInfo` | Kernel taints that are considered acceptable during test runs. | +| `SkipHelmChartList` | `[]SkipHelmChartList` | Helm charts to skip when provisioning resources. | +| `SkipScalingTestDeployments` | `[]SkipScalingTestDeploymentsInfo` | Deployments excluded from scaling tests. | +| `SkipScalingTestStatefulSets` | `[]SkipScalingTestStatefulSetsInfo` | Statefulsets excluded from scaling tests. | +| `ValidProtocolNames` | `[]string` | Protocol names that are permitted for use in the test environment. | +| `ServicesIgnoreList` | `[]string` | Services to ignore during discovery or validation steps. | +| `ProbeDaemonSetNamespace` | `string` | Namespace where the probe DaemonSet runs; defaults if empty. | +| `ExecutedBy` | `string` | Identifier of the entity executing the test (e.g., CI system). | +| `PartnerName` | `string` | Name of the partner organization, used for reporting or tagging. | +| `CollectorAppPassword` | `string` | Password credential for the collector application. | +| `CollectorAppEndpoint` | `string` | Endpoint URL for the collector service. | +| `ConnectAPIConfig` | `ConnectAPIConfig` | Configuration block for interacting with the Red Hat Connect API. | + +#### Purpose + +`TestConfiguration` encapsulates all runtime options and environment settings required by Certsuite to perform its tests. It is loaded once from a YAML file (via `LoadConfiguration`) and then consulted throughout the test lifecycle to determine which resources to probe, what labels to match, how to handle scaling scenarios, and where to send results. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `LoadConfiguration` | Reads a YAML configuration file into a `TestConfiguration` instance, applies defaults (e.g., for the probe DaemonSet namespace), and caches the result for subsequent calls. | + +--- + +--- + +### TestParameters + +A configuration holder used by CertSuite to drive test execution and output behaviour. + +--- + +#### Fields + +| Field | Type | Description | +|---------------------------|-----------------|-------------| +| `Kubeconfig` | `string` | Path to the kubeconfig file that identifies the target cluster. | +| `ConfigFile` | `string` | Path to a YAML/JSON configuration file overriding defaults. | +| `PfltDockerconfig` | `string` | Docker config used for pulling pre‑flight images. | +| `OutputDir` | `string` | Directory where test results and artifacts are written. | +| `LabelsFilter` | `string` | Comma‑separated list of pod labels to include in autodiscovery. | +| `LogLevel` | `string` | Logging verbosity (`debug`, `info`, etc.). | +| `OfflineDB` | `string` | Path to a local database used when the cluster is offline. | +| `DaemonsetCPUReq` | `string` | CPU request for the CertSuite DaemonSet (e.g., `"200m"`). | +| `DaemonsetCPULim` | `string` | CPU limit for the CertSuite DaemonSet. | +| `DaemonsetMemReq` | `string` | Memory request for the CertSuite DaemonSet (e.g., `"256Mi"`). | +| `DaemonsetMemLim` | `string` | Memory limit for the CertSuite DaemonSet. | +| `SanitizeClaim` | `bool` | If true, removes sensitive data from test claims before storage. | +| `CertSuiteImageRepo` | `string` | Repository URL for pulling the CertSuite container image. | +| `CertSuiteProbeImage` | `string` | Image used for health‑probe checks during tests. | +| `Intrusive` | `bool` | Enables tests that modify cluster state (e.g., creating resources). | +| `AllowPreflightInsecure` | `bool` | Allows pre‑flight checks against insecure registries. | +| `IncludeWebFilesInOutputFolder` | `bool` | If true, copies web assets into the output directory. | +| `OmitArtifactsZipFile` | `bool` | Skips creation of a ZIP archive containing artifacts. | +| `EnableDataCollection` | `bool` | Enables collection of telemetry data during tests. | +| `EnableXMLCreation` | `bool` | Generates an XML report in addition to JSON output. | +| `ServerMode` | `bool` | Runs CertSuite in a long‑lived server mode instead of one‑shot. | +| `Timeout` | `time.Duration` | Global timeout applied to test execution phases. | +| `ConnectAPIKey` | `string` | API key for authentication with the Connect service. | +| `ConnectProjectID` | `string` | Identifier of the project in Connect that receives results. | +| `ConnectAPIBaseURL` | `string` | Base URL of the Connect REST API. | +| `ConnectAPIProxyURL` | `string` | Proxy server URL used for outbound Connect traffic. | +| `ConnectAPIProxyPort` | `string` | Port number of the proxy server. | +| `AllowNonRunning` | `bool` | When true, autodiscovery includes pods not in a `Running` state. | + +--- + +#### Purpose + +`TestParameters` aggregates all runtime options that influence how CertSuite discovers workloads, configures resources, interacts with external services, and formats its output. The struct is typically populated from command‑line flags, environment variables, or a configuration file, and then passed to the test harness to orchestrate execution. + +--- + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetTestParameters` | Returns a pointer to the global `parameters` instance that holds current test settings. | + +--- + +--- + +## Exported Functions + +### GetTestParameters + +**GetTestParameters** - Provides read‑only access to the global `parameters` instance that holds all runtime options for the test suite. + +#### Signature (Go) + +```go +func GetTestParameters() *TestParameters +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides read‑only access to the global `parameters` instance that holds all runtime options for the test suite. | +| **Parameters** | None | +| **Return value** | A pointer to the package’s `TestParameters` struct, which contains fields such as `OutputDir`, `LabelsFilter`, `Timeout`, etc. | +| **Key dependencies** | *None* – simply returns a reference to an already‑initialised variable. | +| **Side effects** | None; no state mutation or I/O occurs. | +| **How it fits the package** | Serves as the central accessor for configuration values used throughout `certsuite`, `autodiscover`, and command‑line initialization logic. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetTestParameters --> Return(parameters) +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `GetTestParameters` (Mermaid) + +```mermaid +graph TD + initTestParamsFromFlags --> GetTestParameters + runTestSuite --> GetTestParameters + FindPodsByLabels --> GetTestParameters + Run --> GetTestParameters + Startup --> GetTestParameters + getK8sClientsConfigFileNames --> GetTestParameters + buildTestEnvironment --> GetTestParameters + deployDaemonSet --> GetTestParameters + ShouldRun --> GetTestParameters +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetTestParameters +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/configuration" +) + +func main() { + params := configuration.GetTestParameters() + fmt.Printf("Output directory: %s\n", params.OutputDir) +} +``` + +--- + +### LoadConfiguration + +**LoadConfiguration** - Reads a YAML configuration file, unmarshals it into `TestConfiguration`, applies defaults, and caches the result so subsequent calls return the same instance. + +#### Signature (Go) + +```go +func LoadConfiguration(filePath string) (TestConfiguration, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads a YAML configuration file, unmarshals it into `TestConfiguration`, applies defaults, and caches the result so subsequent calls return the same instance. | +| **Parameters** | `filePath string` – Path to the YAML configuration file. | +| **Return value** | `TestConfiguration` – Parsed configuration; `error` if reading or unmarshalling fails. | +| **Key dependencies** | • `log.Debug`, `log.Info`, `log.Warn` (logging)
• `os.ReadFile` (file I/O)
• `yaml.Unmarshal` (YAML parsing) | +| **Side effects** | • Caches the loaded configuration in package‑level variables (`confLoaded`, `configuration`).
• Logs informational and warning messages. | +| **How it fits the package** | Provides a singleton access point to the test configuration used by the rest of the `configuration` package and other packages such as the provider’s test environment builder. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check if config already loaded"] -->|"Yes"| B["Return cached configuration"] + A -->|"No"| C["Log “Loading config from file”"] + C --> D["Read file contents with os.ReadFile"] + D -- Success --> E["Unmarshal YAML into configuration"] + D -- Error --> F["Return error"] + E -- Success --> G["Set default namespace if missing"] + G --> H["Mark config as loaded"] + H --> I["Return configuration"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_LoadConfiguration --> func_log.Debug + func_LoadConfiguration --> func_log.Info + func_LoadConfiguration --> func_os.ReadFile + func_LoadConfiguration --> func_yaml.Unmarshal + func_LoadConfiguration --> func_log.Warn +``` + +#### Functions calling `LoadConfiguration` (Mermaid) + +```mermaid +graph TD + func_buildTestEnvironment --> func_LoadConfiguration +``` + +#### Usage example (Go) + +```go +// Minimal example invoking LoadConfiguration +config, err := configuration.LoadConfiguration("/etc/certsuite/config.yaml") +if err != nil { + log.Fatalf("Failed to load config: %v", err) +} +fmt.Printf("Loaded configuration: %+v\n", config) +``` + +--- diff --git a/docs/pkg/diagnostics/diagnostics.md b/docs/pkg/diagnostics/diagnostics.md new file mode 100644 index 000000000..7dfc9ca98 --- /dev/null +++ b/docs/pkg/diagnostics/diagnostics.md @@ -0,0 +1,721 @@ +# Package diagnostics + +**Path**: `pkg/diagnostics` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [NodeHwInfo](#nodehwinfo) +- [Exported Functions](#exported-functions) + - [GetCniPlugins](#getcniplugins) + - [GetCsiDriver](#getcsidriver) + - [GetHwInfoAllNodes](#gethwinfoallnodes) + - [GetNodeJSON](#getnodejson) + - [GetVersionK8s](#getversionk8s) + - [GetVersionOcClient](#getversionocclient) + - [GetVersionOcp](#getversionocp) +- [Local Functions](#local-functions) + - [getHWJsonOutput](#gethwjsonoutput) + - [getHWTextOutput](#gethwtextoutput) + +## Overview + +The diagnostics package gathers node‑level information for a Kubernetes/Openshift cluster – from CNI plugins and CSI drivers to CPU, memory, network and block device stats – and exposes it in JSON‑serialisable maps. + +### Key Features + +- Collects and serialises hardware metrics (CPU, memory, NICs, disks) per node via probe pods +- Retrieves the list of installed CNI plugins and CSI drivers from the cluster +- Provides utility functions to fetch Kubernetes and OpenShift version strings + +### Design Notes + +- Uses a lightweight probe pod approach instead of direct kubelet API calls for compatibility with restricted clusters +- All data is returned as generic maps or structs; callers must unmarshal into concrete types if needed +- Error handling logs via internal logger but propagates errors for caller awareness + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**NodeHwInfo**](#nodehwinfo) | Node Hardware Information | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GetCniPlugins() (out map[string][]interface{})](#getcniplugins) | Executes a command inside probe pods to obtain the list of CNI plugins present on each node and returns it as a JSON‑serialisable map keyed by node name. | +| [func GetCsiDriver() (out map[string]interface{})](#getcsidriver) | Fetches the list of Container Storage Interface (CSI) drivers from a Kubernetes cluster and returns it as a generic `map[string]interface{}` suitable for JSON serialization. | +| [func GetHwInfoAllNodes() (out map[string]NodeHwInfo)](#gethwinfoallnodes) | Gathers CPU, memory, network and block device data from each probe pod running on cluster nodes. The result is a map keyed by node name containing a `NodeHwInfo` struct per node. | +| [func GetNodeJSON() (out map[string]interface{})](#getnodejson) | Returns a map representation of the current Kubernetes nodes, mirroring the output of `oc get nodes -json`. | +| [func GetVersionK8s() (out string)](#getversionk8s) | Returns the Kubernetes (`k8s`) version that is configured in the current test environment. | +| [func GetVersionOcClient() (out string)](#getversionocclient) | Provides a static string indicating that the package does not use an OC or kubectl client. | +| [func GetVersionOcp() (out string)](#getversionocp) | Returns a human‑readable representation of the OpenShift (OCP) cluster version, or an explanatory placeholder if not running on an OCP cluster. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func getHWJsonOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string)(interface{}, error)](#gethwjsonoutput) | Executes a shell command inside a probe pod, captures its stdout, and unmarshals the output into an `interface{}` (typically a map). | +| [func getHWTextOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) ([]string, error)](#gethwtextoutput) | Runs a shell command inside the first container of `probePod` and returns its standard output as an array of lines. | + +## Structs + +### NodeHwInfo + +#### Fields + +| Field | Type | Description | +|---------|---------------|-------------| +| Lscpu | interface{} | Raw output from the `lscpu` command; expected to be a JSON‑serializable map containing CPU details. | +| IPconfig| interface{} | Raw output from the `ip` command; contains network configuration information, typically parsed as a JSON object. | +| Lsblk | interface{} | Raw output from the `lsblk` command; holds block device data, usually in JSON form. | +| Lspci | []string | Slice of strings representing the text output lines from the `lspci` command (PCI device list). | + +#### Purpose + +`NodeHwInfo` aggregates low‑level hardware inspection results for a Kubernetes node. Each field holds the raw result of a specific system command (`lscpu`, `ip`, `lsblk`, `lspci`). The struct is populated during diagnostics collection and later used to report or analyze node hardware characteristics. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetHwInfoAllNodes` | Iterates over probe pods, runs system commands on each node, fills a `NodeHwInfo` instance per node, and returns a map keyed by node name. | + +--- + +--- + +## Exported Functions + +### GetCniPlugins + +**GetCniPlugins** - Executes a command inside probe pods to obtain the list of CNI plugins present on each node and returns it as a JSON‑serialisable map keyed by node name. + +#### 1. Signature (Go) + +```go +func GetCniPlugins() (out map[string][]interface{}) +``` + +#### 2. Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes a command inside probe pods to obtain the list of CNI plugins present on each node and returns it as a JSON‑serialisable map keyed by node name. | +| **Parameters** | None | +| **Return value** | `map[string][]interface{}` – keys are node names; values are slices containing decoded JSON objects describing each plugin. | +| **Key dependencies** | • `provider.GetTestEnvironment` to obtain probe pod information.
• `clientsholder.GetClientsHolder` and `clientsholder.NewContext` for Kubernetes client interactions.
• `o.ExecCommandContainer` to run the command inside a pod.
• `encoding/json.Unmarshal` to parse the command output. | +| **Side effects** | Writes error logs via `log.Error` when command execution or JSON decoding fails; otherwise only returns data. No state mutation in global variables. | +| **How it fits the package** | Part of diagnostics utilities that gather runtime information from a cluster, used by higher‑level claim generation to include CNI plugin details in node summaries. | + +#### 3. Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["GetCniPlugins"] --> B["Retrieve TestEnvironment"] + B --> C["Iterate over ProbePods"] + C --> D["Create Context for each pod"] + D --> E["Execute cniPluginsCommand in container"] + E -- Success --> F["Unmarshal JSON output"] + F --> G["Store decoded slice in out map keyed by node name"] + E -- Failure --> H["Log error & continue"] + H --> C +``` + +#### 4. Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetCniPlugins --> func_GetTestEnvironment + func_GetCniPlugins --> func_GetClientsHolder + func_GetCniPlugins --> func_NewContext + func_GetCniPlugins --> func_ExecCommandContainer + func_GetCniPlugins --> func_Unmarshal +``` + +#### 5. Functions calling `GetCniPlugins` (Mermaid) + +```mermaid +graph TD + func_GenerateNodes --> func_GetCniPlugins +``` + +#### 6. Usage example (Go) + +```go +// Minimal example invoking GetCniPlugins +pluginsByNode := diagnostics.GetCniPlugins() +for node, plugins := range pluginsByNode { + fmt.Printf("Node %s has %d CNI plugins\n", node, len(plugins)) +} +``` + +--- + +--- + +### GetCsiDriver + +**GetCsiDriver** - Fetches the list of Container Storage Interface (CSI) drivers from a Kubernetes cluster and returns it as a generic `map[string]interface{}` suitable for JSON serialization. + +#### 1) Signature (Go) + +```go +func GetCsiDriver() (out map[string]interface{}) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Fetches the list of Container Storage Interface (CSI) drivers from a Kubernetes cluster and returns it as a generic `map[string]interface{}` suitable for JSON serialization. | +| **Parameters** | None | +| **Return value** | A map containing the CSI driver data; empty if an error occurs. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – obtains a Kubernetes client.
• `K8sClient.StorageV1().CSIDrivers().List(...)` – lists CSI drivers.
• `runtime.NewScheme()`, `storagev1.AddToScheme()` – set up a runtime scheme for the CSI driver type.
• `serializer.NewCodecFactory(...).LegacyCodec(...)` – creates a codec to encode objects into JSON‑compatible bytes.
• `json.Unmarshal` – converts encoded bytes back into a map. | +| **Side effects** | No state mutation; performs network I/O to the Kubernetes API and logs errors via the package logger. | +| **How it fits the package** | Provides CSI driver data for diagnostic reports, used by higher‑level functions such as `claimhelper.GenerateNodes`. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["GetCsiDriver"] --> B["GetClientsHolder"] + B --> C["List CSIDrivers"] + C --> D{"err?"} + D -- Yes --> E["log.Error & return empty map"] + D -- No --> F["Create runtime scheme"] + F --> G["Add CSI driver type to scheme"] + G --> H{"err?"} + H -- Yes --> I["log.Error & return empty map"] + H -- No --> J["Encode CSIDriver list to JSON bytes"] + J --> K{"err?"} + K -- Yes --> L["log.Error & return empty map"] + K -- No --> M["Unmarshal JSON into map"] + M --> N{"err?"} + N -- Yes --> O["log.Error & return empty map"] + N -- No --> P["return populated map"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetCsiDriver --> func_GetClientsHolder + func_GetCsiDriver --> func_List + func_GetCsiDriver --> func_AddToScheme + func_GetCsiDriver --> func_LegacyCodec + func_GetCsiDriver --> func_Encode + func_GetCsiDriver --> func_Unmarshal +``` + +#### 5) Functions calling `GetCsiDriver` (Mermaid) + +```mermaid +graph TD + func_GenerateNodes --> func_GetCsiDriver +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking GetCsiDriver +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics" +) + +func main() { + csiDrivers := diagnostics.GetCsiDriver() + fmt.Printf("CSI Drivers: %+v\n", csiDrivers) +} +``` + +--- + +### GetHwInfoAllNodes + +**GetHwInfoAllNodes** - Gathers CPU, memory, network and block device data from each probe pod running on cluster nodes. The result is a map keyed by node name containing a `NodeHwInfo` struct per node. + +#### Signature (Go) + +```go +func GetHwInfoAllNodes() (out map[string]NodeHwInfo) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Gathers CPU, memory, network and block device data from each probe pod running on cluster nodes. The result is a map keyed by node name containing a `NodeHwInfo` struct per node. | +| **Parameters** | None | +| **Return value** | `map[string]NodeHwInfo` – mapping of node names to their hardware information. | +| **Key dependencies** | • `provider.GetTestEnvironment()` – obtains the test environment configuration.
• `clientsholder.GetClientsHolder()` – provides a client holder for executing commands in pods.
• `getHWJsonOutput(...)` – runs JSON‑producing commands (`lscpu`, `ip`) inside probe pods.
• `getHWTextOutput(...)` – runs text‑producing command (`lspci`).
• `log.Error(...)` – logs errors during data collection. | +| **Side effects** | Executes commands in each probe pod; writes error messages to the logger; no global state is modified. | +| **How it fits the package** | Supplies hardware information for diagnostics and reporting, used by higher‑level helpers such as `GenerateNodes`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Get test env"} + B --> C["Retrieve probe pods"] + C --> D{"For each pod"} + D --> E["Run lscpu JSON"] + E --> F["Parse or log error"] + D --> G["Run ip JSON"] + G --> H["Parse or log error"] + D --> I["Run lsblk JSON"] + I --> J["Parse or log error"] + D --> K["Run lspci text"] + K --> L["Parse or log error"] + L --> M["Store NodeHwInfo in map"] + M --> N{"Next pod"} + N --> O["End loop"] + O --> P["Return map"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetHwInfoAllNodes --> func_provider.GetTestEnvironment + func_GetHwInfoAllNodes --> func_clientsholder.GetClientsHolder + func_GetHwInfoAllNodes --> func_getHWJsonOutput + func_GetHwInfoAllNodes --> func_getHWTextOutput + func_GetHwInfoAllNodes --> func_log.Error +``` + +#### Functions calling `GetHwInfoAllNodes` (Mermaid) + +```mermaid +graph TD + func_GenerateNodes --> func_GetHwInfoAllNodes +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetHwInfoAllNodes +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics" +) + +func main() { + hwInfo := diagnostics.GetHwInfoAllNodes() + for node, info := range hwInfo { + fmt.Printf("Node: %s\n", node) + fmt.Printf(" CPU: %v\n", info.Lscpu) + fmt.Printf(" IP config: %v\n", info.IPconfig) + fmt.Printf(" Block devices: %v\n", info.Lsblk) + fmt.Printf(" PCI devices: %v\n", info.Lspci) + } +} +``` + +--- + +### GetNodeJSON + +**GetNodeJSON** - Returns a map representation of the current Kubernetes nodes, mirroring the output of `oc get nodes -json`. + +#### Signature (Go) + +```go +func GetNodeJSON() (out map[string]interface{}) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a map representation of the current Kubernetes nodes, mirroring the output of `oc get nodes -json`. | +| **Parameters** | None. | +| **Return value** | `map[string]interface{}` containing node details; empty on error. | +| **Key dependencies** | • `provider.GetTestEnvironment` – obtains test environment data.
• `encoding/json.Marshal` / `Unmarshal` – serialises/deserialises the nodes.
• `log.Error` – logs marshalling errors. | +| **Side effects** | Logs error messages if JSON operations fail; otherwise no external state changes. | +| **How it fits the package** | Supplies node information for diagnostic reports and claim generation within the diagnostics subsystem. | + +#### Internal workflow + +```mermaid +flowchart TD + A["GetNodeJSON"] --> B{"Retrieve env"} + B --> C["provider.GetTestEnvironment"] + C --> D["env.Nodes"] + D --> E["json.Marshal(env.Nodes)"] + E --> F{"Marshal success?"} + F -- No --> G["log.Error(Could not Marshall...)"] + F -- Yes --> H["json.Unmarshal(nodesJSON, &out)"] + H --> I{"Unmarshal success?"} + I -- No --> J["log.Error(Could not unMarshall...)"] + I -- Yes --> K["out"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetNodeJSON --> func_GetTestEnvironment + func_GetNodeJSON --> func_Marshal + func_GetNodeJSON --> func_Unmarshal + func_GetNodeJSON --> func_Error +``` + +#### Functions calling `GetNodeJSON` + +```mermaid +graph TD + func_GenerateNodes --> func_GetNodeJSON +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetNodeJSON +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics" +) + +func main() { + nodes := diagnostics.GetNodeJSON() + fmt.Printf("Cluster nodes: %+v\n", nodes) +} +``` + +--- + +### GetVersionK8s + +**GetVersionK8s** - Returns the Kubernetes (`k8s`) version that is configured in the current test environment. + +#### 1) Signature (Go) + +```go +func GetVersionK8s() (out string) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the Kubernetes (`k8s`) version that is configured in the current test environment. | +| **Parameters** | None | +| **Return value** | `string` – the Kubernetes version string (e.g., `"v1.24.0"`). | +| **Key dependencies** | • Calls `provider.GetTestEnvironment()` to obtain the `TestEnvironment`.
• Reads the `K8sVersion` field of that environment. | +| **Side effects** | None – purely read‑only operation; no state changes or I/O. | +| **How it fits the package** | Provides a lightweight accessor used by claim construction and diagnostic utilities to embed the Kubernetes version into claims or logs. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["GetVersionK8s"] --> B{"Call provider.GetTestEnvironment"} + B --> C["TestEnvironment"] + C --> D["Return env.K8sVersion"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetVersionK8s --> func_GetTestEnvironment +``` + +#### 5) Functions calling `GetVersionK8s` (Mermaid) + +```mermaid +graph TD + func_NewClaimBuilder --> func_GetVersionK8s +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking GetVersionK8s +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics" +) + +func main() { + k8sVer := diagnostics.GetVersionK8s() + fmt.Println("Current Kubernetes version:", k8sVer) +} +``` + +--- + +### GetVersionOcClient + +**GetVersionOcClient** - Provides a static string indicating that the package does not use an OC or kubectl client. + +```go +func GetVersionOcClient() (out string) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides a static string indicating that the package does not use an OC or kubectl client. | +| **Parameters** | None | +| **Return value** | `string` – `"n/a, (not using oc or kubectl client)"` | +| **Key dependencies** | *none* | +| **Side effects** | No state changes; purely functional. | +| **How it fits the package** | Supplies a version placeholder used when constructing claim metadata in diagnostics. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Return value"} + B --> C["\n/a, (not using oc or kubectl client)\"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `GetVersionOcClient` + +```mermaid +graph TD + func_NewClaimBuilder --> func_GetVersionOcClient +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetVersionOcClient +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics" +) + +func main() { + version := diagnostics.GetVersionOcClient() + fmt.Println("OC client version:", version) +} +``` + +--- + +### GetVersionOcp + +**GetVersionOcp** - Returns a human‑readable representation of the OpenShift (OCP) cluster version, or an explanatory placeholder if not running on an OCP cluster. + +#### Signature (Go) + +```go +func GetVersionOcp() (out string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a human‑readable representation of the OpenShift (OCP) cluster version, or an explanatory placeholder if not running on an OCP cluster. | +| **Parameters** | None | +| **Return value** | `string` – the OCP version string or `"n/a, (non-OpenShift cluster)"`. | +| **Key dependencies** | • `provider.GetTestEnvironment()`
• `provider.IsOCPCluster()` | +| **Side effects** | No state mutation; purely read‑only. | +| **How it fits the package** | Part of the diagnostics utilities that expose environment information to higher‑level components (e.g., claim generation). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Is OCP cluster?"} + B -- Yes --> C["Return env.OpenshiftVersion"] + B -- No --> D["Return n/a, (non-OpenShift cluster)"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetVersionOcp --> func_provider.GetTestEnvironment + func_GetVersionOcp --> func_provider.IsOCPCluster +``` + +#### Functions calling `GetVersionOcp` + +```mermaid +graph TD + func_claimhelper.NewClaimBuilder --> func_GetVersionOcp +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetVersionOcp +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/diagnostics" +) + +func main() { + version := diagnostics.GetVersionOcp() + fmt.Println("OpenShift Version:", version) +} +``` + +--- + +## Local Functions + +### getHWJsonOutput + +**getHWJsonOutput** - Executes a shell command inside a probe pod, captures its stdout, and unmarshals the output into an `interface{}` (typically a map). + +#### 1) Signature (Go) + +```go +func getHWJsonOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string)(interface{}, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes a shell command inside a probe pod, captures its stdout, and unmarshals the output into an `interface{}` (typically a map). | +| **Parameters** | *`probePod`* – pointer to the Kubernetes Pod used for probing.
*`o`* – executor interface that can run commands in containers.
*`cmd`* – command string to run inside the pod. | +| **Return value** | `out interface{}` – decoded JSON payload; `err error` – non‑nil if execution or decoding fails. | +| **Key dependencies** | • `clientsholder.NewContext`
• `o.ExecCommandContainer`
• `fmt.Errorf`
• `encoding/json.Unmarshal` | +| **Side effects** | No state mutation; only performs I/O by running a command in a pod. | +| **How it fits the package** | Used by higher‑level diagnostics to collect hardware information (e.g., lscpu, ip, lsblk) from each node’s probe pod. | + +#### 3) Internal workflow + +```mermaid +flowchart TD + A["Create Context"] --> B{"Run cmd in container"} + B -->|"stdout, stderr"| C["Check errors"] + C --> D{"stderr empty?"} + D -- yes --> E["Unmarshal JSON"] + E --> F["Return result"] + D -- no --> G["Return error with stderr"] +``` + +#### 4) Function dependencies + +```mermaid +graph TD + func_getHWJsonOutput --> clientsholder.NewContext + func_getHWJsonOutput --> o.ExecCommandContainer + func_getHWJsonOutput --> fmt.Errorf + func_getHWJsonOutput --> json.Unmarshal +``` + +#### 5) Functions calling `getHWJsonOutput` + +```mermaid +graph TD + func_GetHwInfoAllNodes --> func_getHWJsonOutput +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getHWJsonOutput +pod := &corev1.Pod{ /* populated elsewhere */ } +cmdExecutor := clientsholder.NewCommandExecutor() // hypothetical constructor +jsonData, err := getHWJsonOutput(pod, cmdExecutor, "lscpu -J") +if err != nil { + log.Fatalf("failed to fetch hardware info: %v", err) +} +fmt.Printf("Hardware JSON: %+v\n", jsonData) +``` + +--- + +### getHWTextOutput + +**getHWTextOutput** - Runs a shell command inside the first container of `probePod` and returns its standard output as an array of lines. + +#### 1) Signature (Go) + +```go +func getHWTextOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) ([]string, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs a shell command inside the first container of `probePod` and returns its standard output as an array of lines. | +| **Parameters** | `probePod *corev1.Pod` – target pod;
`o clientsholder.Command` – executor interface;
`cmd string` – command to run. | +| **Return value** | `[]string` – split stdout by newline; `error` – non‑nil if execution fails or stderr is not empty. | +| **Key dependencies** | • `clientsholder.NewContext`
• `clientsholder.Command.ExecCommandContainer`
• `fmt.Errorf`
• `strings.Split` | +| **Side effects** | No state mutations; performs network I/O to the pod’s container. | +| **How it fits the package** | Used by higher‑level diagnostics functions (e.g., `GetHwInfoAllNodes`) to obtain plain‑text hardware information from probe pods. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Create exec context"] --> B["Execute command"] + B --> C{"Success?"} + C -- No --> D["Return error"] + C -- Yes --> E["Split stdout into lines"] + E --> F["Return lines"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_getHWTextOutput --> clientsholder.NewContext + func_getHWTextOutput --> clientsholder.Command.ExecCommandContainer + func_getHWTextOutput --> fmt.Errorf + func_getHWTextOutput --> strings.Split +``` + +#### 5) Functions calling `getHWTextOutput` (Mermaid) + +```mermaid +graph TD + func_GetHwInfoAllNodes --> func_getHWTextOutput +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getHWTextOutput +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder" +) + +// Assume pod and command are already defined. +func example(pod *corev1.Pod, cmd string) { + var exec clientsholder.Command + lines, err := getHWTextOutput(pod, exec, cmd) + if err != nil { + fmt.Printf("error: %v\n", err) + return + } + for _, line := range lines { + fmt.Println(line) + } +} +``` + +--- diff --git a/docs/pkg/junit/junit.md b/docs/pkg/junit/junit.md new file mode 100644 index 000000000..558b6a0d5 --- /dev/null +++ b/docs/pkg/junit/junit.md @@ -0,0 +1,23 @@ +# Package junit + +**Path**: `pkg/junit` + +## Table of Contents + +- [Overview](#overview) + +## Overview + +This package provides functionality related to handling JUnit test results within the CertSuite project. + +### Key Features + +- Parsing and generating JUnit XML files +- Aggregating test outcomes from multiple sources +- Filtering or transforming test data + +### Design Notes + +- Assumes standard JUnit XML schema for compatibility with CI tools +- Handles missing fields by providing defaults, which may lead to incomplete reports +- Recommended to use helper functions for creating new test suites rather than manipulating structs directly diff --git a/docs/pkg/labels/labels.md b/docs/pkg/labels/labels.md new file mode 100644 index 000000000..fd0deb68c --- /dev/null +++ b/docs/pkg/labels/labels.md @@ -0,0 +1,211 @@ +# Package labels + +**Path**: `pkg/labels` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) +- [Interfaces](#interfaces) + - [LabelsExprEvaluator](#labelsexprevaluator) +- [Exported Functions](#exported-functions) + - [NewLabelsExprEvaluator](#newlabelsexprevaluator) + - [labelsExprParser.Eval](#labelsexprparser.eval) + +## Overview + +Transforms a user‑supplied comma‑separated list of label filters into a Go AST that can be evaluated against test labels, normalizing hyphens to underscores and interpreting commas as logical OR. + +### Key Features + +- Parses comma‑separated expressions into an AST using the standard go/parser package +- Evaluates the resulting AST against a slice of labels via a visitor pattern +- Normalizes label names by replacing hyphens with underscores before parsing + +### Design Notes + +- Assumes each comma represents a disjunction (logical OR) between filters +- Fails with an error if the expression cannot be parsed into a valid Go expression +- Should be invoked once per test suite to avoid repeated parsing overhead + +### Structs Summary + +| Name | Purpose | +|------|----------| + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewLabelsExprEvaluator(labelsExpr string) (LabelsExprEvaluator, error)](#newlabelsexprevaluator) | Transforms a user‑supplied comma‑separated list of label filters into a Go AST that can be evaluated against test labels. It replaces hyphens with underscores and commas with logical OR (` | +| [func (exprParser labelsExprParser) Eval(labels []string) bool](#labelsexprparser.eval) | Determines whether the provided `labels` satisfy the logical expression stored in `exprParser.astRootNode`. | + +## Structs + +## Interfaces + +### LabelsExprEvaluator + + +**Purpose**: + +**Methods**: + +| Method | Description | +|--------|--------------| +| `Eval` | Method documentation | + +--- + +## Exported Functions + +### NewLabelsExprEvaluator + +**NewLabelsExprEvaluator** - Transforms a user‑supplied comma‑separated list of label filters into a Go AST that can be evaluated against test labels. It replaces hyphens with underscores and commas with logical OR (` + +#### Signature (Go) + +```go +func NewLabelsExprEvaluator(labelsExpr string) (LabelsExprEvaluator, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Transforms a user‑supplied comma‑separated list of label filters into a Go AST that can be evaluated against test labels. It replaces hyphens with underscores and commas with logical OR (`||`), then parses the resulting expression. | +| **Parameters** | `labelsExpr string –` raw label filter expression (e.g., `"tag-a,tag-b"`) | +| **Return value** | `LabelsExprEvaluator –` an evaluator capable of checking label sets; `error –` non‑nil if parsing fails | +| **Key dependencies** | • `strings.ReplaceAll`
• `go/parser.ParseExpr`
• `fmt.Errorf` | +| **Side effects** | None – purely functional. | +| **How it fits the package** | Provides the core mechanism for converting label filter strings into evaluators used by checks initialization and claim sanitization. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Input: labelsExpr"] --> B{"Replace hyphens"} + B --> C["Replace commas with ||"] + C --> D{"Parse Go expression"} + D -- success --> E["Return labelsExprParser"] + D -- error --> F["Return fmt.Errorf"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewLabelsExprEvaluator --> func_strings.ReplaceAll + func_NewLabelsExprEvaluator --> func_go/parser.ParseExpr + func_NewLabelsExprEvaluator --> func_fmt.Errorf +``` + +#### Functions calling `NewLabelsExprEvaluator` (Mermaid) + +```mermaid +graph TD + func_InitLabelsExprEvaluator --> func_NewLabelsExprEvaluator + func_SanitizeClaimFile --> func_NewLabelsExprEvaluator +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewLabelsExprEvaluator +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/labels" +) + +func main() { + evaluator, err := labels.NewLabelsExprEvaluator("tag-a,tag_b") + if err != nil { + fmt.Printf("Error creating evaluator: %v\n", err) + return + } + // Assume LabelsExprEvaluator has an Eval method: + match := evaluator.Eval([]string{"tag_a"}) // example usage + fmt.Printf("Match result: %t\n", match) +} +``` + +--- + +### labelsExprParser.Eval + +**Eval** - Determines whether the provided `labels` satisfy the logical expression stored in `exprParser.astRootNode`. + +#### Signature (Go) + +```go +func (exprParser labelsExprParser) Eval(labels []string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether the provided `labels` satisfy the logical expression stored in `exprParser.astRootNode`. | +| **Parameters** | *labels* (`[]string`) – The list of label names to test against. | +| **Return value** | `bool` – `true` if the expression evaluates to true, otherwise `false`. | +| **Key dependencies** | • `make` (to create a map)
• `strings.ReplaceAll` (normalizes label names)
• Recursive `visit` function that traverses an AST of type `ast.Expr`
• `log.Logger.Error` for reporting unsupported expression nodes | +| **Side effects** | None; the function only reads inputs and returns a value. | +| **How it fits the package** | Provides the core evaluation logic used by higher‑level label filtering utilities in the `labels` package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Create labelsMap"] --> B{"Iterate over input labels"} + B --> C["Replace - with _"] + C --> D["Populate map"] + D --> E["Define visit function"] + E --> F{"AST node type"} + F -- Ident --> G["Check existence in map"] + F -- ParenExpr --> H["Recurse on inner expression"] + F -- UnaryExpr (NOT) --> I["Negate child result"] + F -- BinaryExpr (AND/OR) --> J["Evaluate left and right operands"] + J -- AND --> K["Return left && right"] + J -- OR --> L["Return left || right"] + F -- Default --> M["log.Error & return false"] + E --> N["Start traversal at exprParser.astRootNode"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_labelsExprParser.Eval --> make + func_labelsExprParser.Eval --> strings.ReplaceAll + func_labelsExprParser.Eval --> visit + func_labelsExprParser.Eval --> log.Logger.Error +``` + +#### Functions calling `labelsExprParser.Eval` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking labelsExprParser.Eval +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/labels" +) + +func main() { + // Assume exprParser has been initialized with an AST representing `foo && !bar` + var exprParser labels.LabelsExprParser // placeholder for actual type + result := exprParser.Eval([]string{"foo", "baz"}) + fmt.Println("Expression matches:", result) // Output depends on the AST +} +``` + +--- + +--- diff --git a/docs/pkg/podhelper/podhelper.md b/docs/pkg/podhelper/podhelper.md new file mode 100644 index 000000000..5648171d9 --- /dev/null +++ b/docs/pkg/podhelper/podhelper.md @@ -0,0 +1,336 @@ +# Package podhelper + +**Path**: `pkg/podhelper` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [TopOwner](#topowner) +- [Exported Functions](#exported-functions) + - [GetPodTopOwner](#getpodtopowner) +- [Local Functions](#local-functions) + - [followOwnerReferences](#followownerreferences) + - [searchAPIResource](#searchapiresource) + +## Overview + +The podhelper package provides utilities for determining the top‑level ownership of Kubernetes Pods, enabling tests and tooling to identify which custom resources or deployments ultimately own a given pod. + +### Key Features + +- Walks OwnerReferences chains to map pods to their highest‑level owners +- Provides helper functions to resolve API resources dynamically +- Exposes TopOwner struct for concise owner representation + +### Design Notes + +- Relies on dynamic client access via internal client holder; callers must have proper permissions +- Handles missing API resources gracefully by returning informative errors +- Best practice: use GetPodTopOwner as the public entry point, keeping traversal logic private + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**TopOwner**](#topowner) | Represents the highest‑level owner of a Pod | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GetPodTopOwner(podNamespace string, podOwnerReferences []metav1.OwnerReference) (topOwners map[string]TopOwner, err error)](#getpodtopowner) | Walks a pod’s `OwnerReferences` chain to identify the top‑level resource(s) that ultimately own the pod (e.g., a custom resource or deployment). Returns them as a map keyed by owner name. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func followOwnerReferences( resourceList []*metav1.APIResourceList, dynamicClient dynamic.Interface, topOwners map[string]TopOwner, namespace string, ownerRefs []metav1.OwnerReference, ) (err error)](#followownerreferences) | Walks an ownership chain of Kubernetes objects, starting from a set of `ownerRefs`, and records the highest‑level owners in `topOwners`. | +| [func searchAPIResource(kind, apiVersion string, apis []*metav1.APIResourceList) (*metav1.APIResource, error)](#searchapiresource) | Looks through a slice of `*metav1.APIResourceList` to find the `APIResource` that matches both the specified `kind` and `apiVersion`. Returns an error if no match is found. | + +## Structs + +### TopOwner + +#### Fields + +| Field | Type | Description | +|-------|--------|-------------| +| `APIVersion` | `string` | API version of the owning resource (e.g., `"v1"` or `"apps/v1"`). | +| `Kind` | `string` | Kind of the owner resource (`Deployment`, `ReplicaSet`, etc.). | +| `Name` | `string` | Name of the owner object. | +| `Namespace` | `string` | Namespace where the owner resides; empty for cluster‑scoped resources. | + +#### Purpose + +The `TopOwner` struct encapsulates the essential identifying information of a pod’s top‑level controller or owning resource. It is constructed during recursive traversal of owner references (via `followOwnerReferences`) and returned by `GetPodTopOwner`. This allows callers to determine which higher‑level object ultimately governs a given pod, regardless of intermediate layers such as ReplicaSets or StatefulSets. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetPodTopOwner` | Initiates the owner reference traversal for a pod and returns a map of top owners keyed by name. | +| `followOwnerReferences` | Recursively follows owner references, populating the `topOwners` map with `TopOwner` instances when a resource has no further owners. | + +--- + +--- + +## Exported Functions + +### GetPodTopOwner + +**GetPodTopOwner** - Walks a pod’s `OwnerReferences` chain to identify the top‑level resource(s) that ultimately own the pod (e.g., a custom resource or deployment). Returns them as a map keyed by owner name. + +#### Signature (Go) + +```go +func GetPodTopOwner(podNamespace string, podOwnerReferences []metav1.OwnerReference) (topOwners map[string]TopOwner, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Walks a pod’s `OwnerReferences` chain to identify the top‑level resource(s) that ultimately own the pod (e.g., a custom resource or deployment). Returns them as a map keyed by owner name. | +| **Parameters** | *`podNamespace string`* – namespace of the pod.
*`podOwnerReferences []metav1.OwnerReference`* – list of `OwnerReference` objects attached to the pod. | +| **Return value** | *`topOwners map[string]TopOwner`* – mapping from owner name to a `TopOwner` struct containing API version, kind, name and namespace.
*`err error`* – error if traversal fails or required clients cannot be obtained. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` (to obtain dynamic client and resource list)
• `followOwnerReferences` (recursive helper that follows owner refs) | +| **Side effects** | None beyond reading Kubernetes resources; no state mutation. | +| **How it fits the package** | Provides a public entry point for other packages to resolve pod ownership without exposing the internal recursion logic. | + +#### Internal workflow + +```mermaid +flowchart TD + Start["Start"] + InitMap["Init map"] + CallFollow["Call followOwnerReferences"] + CheckErr{"err?"} + ReturnErr["Return error"] + ReturnOK["Return ok"] + + Start --> InitMap + InitMap --> CallFollow + CallFollow --> CheckErr + CheckErr -- Yes --> ReturnErr + CheckErr -- No --> ReturnOK +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetPodTopOwner --> func_followOwnerReferences + func_GetPodTopOwner --> func_clientsholder.GetClientsHolder +``` + +#### Functions calling `GetPodTopOwner` + +```mermaid +graph TD + func_getOperandPodsFromTestCsvs --> func_GetPodTopOwner + func_getPodsOwnedByCsv --> func_GetPodTopOwner + func_Pod.GetTopOwner --> func_GetPodTopOwner + func_findPodsNotBelongingToOperators --> func_GetPodTopOwner +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetPodTopOwner +import ( + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + podhelper "github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper" +) + +func main() { + // Example owner references (normally from a Pod object) + refs := []metav1.OwnerReference{ + {Kind: "Deployment", APIVersion: "apps/v1", Name: "my-deploy"}, + } + + owners, err := podhelper.GetPodTopOwner("default", refs) + if err != nil { + fmt.Printf("Error retrieving top owners: %v\n", err) + return + } + for name, owner := range owners { + fmt.Printf("Top owner: %s (%s/%s)\n", name, owner.Kind, owner.APIVersion) + } +} +``` + +--- + +--- + +## Local Functions + +### followOwnerReferences + +**followOwnerReferences** - Walks an ownership chain of Kubernetes objects, starting from a set of `ownerRefs`, and records the highest‑level owners in `topOwners`. + +```go +func followOwnerReferences( + resourceList []*metav1.APIResourceList, + dynamicClient dynamic.Interface, + topOwners map[string]TopOwner, + namespace string, + ownerRefs []metav1.OwnerReference, +) (err error) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Walks an ownership chain of Kubernetes objects, starting from a set of `ownerRefs`, and records the highest‑level owners in `topOwners`. | +| **Parameters** | *`resourceList []*metav1.APIResourceList`* – Available API resources.
*`dynamicClient dynamic.Interface`* – Client for unstructured resource access.
*`topOwners map[string]TopOwner`* – Map to accumulate discovered top owners.
*`namespace string`* – Current namespace context (may be cleared for non‑namespaced resources).
*`ownerRefs []metav1.OwnerReference`* – Immediate owner references to resolve. | +| **Return value** | `error` – Nil on success; descriptive error if any lookup or parsing fails. | +| **Key dependencies** | • `searchAPIResource` (locates API resource for a kind/version)
• `schema.ParseGroupVersion` (parses APIVersion string)
• `dynamicClient.Resource(...).Get()` (fetches the owner object)
• `k8serrors.IsNotFound` (ignores missing objects)
• Recursive call to itself | +| **Side effects** | Mutates `topOwners` by adding entries for top‑level owners.
No I/O beyond API queries. | +| **How it fits the package** | Used by `GetPodTopOwner` to resolve the ultimate owning resources of a pod, enabling higher‑level analysis and reporting. | + +```mermaid +flowchart TD + A["Start with ownerRefs"] --> B{"For each ownerRef"} + B --> C["Find APIResource via searchAPIResource"] + C --> D["Parse APIVersion → gv"] + D --> E["Build GVR from gv & apiResource.Name"] + E --> F{"Non‑namespaced?"} + F -- yes --> G["namespace ="] + G --> H + F -- no --> H["continue"] + H --> I["Get owner object via dynamicClient"] + I --> J{"NotFound?"} + J -- yes --> K["Skip error, continue"] + J -- no --> L["Retrieve ownerReferences from object"] + L --> M{"Has ownerRefs?"} + M -- no --> N["Add to topOwners"] + N --> O["Continue loop"] + M -- yes --> P["Recursive call followOwnerReferences"] + P --> Q{"Error?"} + Q -- yes --> R["Return error"] + Q -- no --> S["Continue loop"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_followOwnerReferences --> func_searchAPIResource + func_followOwnerReferences --> schema_ParseGroupVersion + func_followOwnerReferences --> dynamicClient_Get + func_followOwnerReferences --> k8serrors_IsNotFound + func_followOwnerReferences --> func_followOwnerReferences +``` + +#### Functions calling `followOwnerReferences` + +```mermaid +graph TD + func_GetPodTopOwner --> func_followOwnerReferences +``` + +#### Usage example (Go) + +```go +// Minimal example invoking followOwnerReferences +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/dynamic" +) + +func main() { + // Assume resourceList, dynamicClient, and podOwnerRefs are initialized elsewhere. + topOwners := make(map[string]TopOwner) + err := followOwnerReferences(resourceList, dynamicClient, topOwners, "default", podOwnerRefs) + if err != nil { + log.Fatalf("failed to resolve owners: %v", err) + } + // topOwners now contains the highest‑level owners of the pod. +} +``` + +--- + +### searchAPIResource + +**searchAPIResource** - Looks through a slice of `*metav1.APIResourceList` to find the `APIResource` that matches both the specified `kind` and `apiVersion`. Returns an error if no match is found. + +#### Signature (Go) + +```go +func searchAPIResource(kind, apiVersion string, apis []*metav1.APIResourceList) (*metav1.APIResource, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Looks through a slice of `*metav1.APIResourceList` to find the `APIResource` that matches both the specified `kind` and `apiVersion`. Returns an error if no match is found. | +| **Parameters** | `kind string` – Kubernetes object kind.
`apiVersion string` – Full API version (e.g., `"v1"` or `"apps/v1"`).
`apis []*metav1.APIResourceList` – List of resource lists to search. | +| **Return value** | `(*metav1.APIResource, error)` – Pointer to the matching resource or an error if not found. | +| **Key dependencies** | *`fmt.Errorf` – for constructing error messages.
* `metav1.APIResourceList` and `metav1.APIResource` types from Kubernetes API machinery. | +| **Side effects** | None. Pure function; no mutation of arguments or external state. | +| **How it fits the package** | Used by higher‑level helper functions (e.g., `followOwnerReferences`) to translate owner references into concrete resource definitions required for dynamic client lookups. | + +#### Internal workflow + +```mermaid +flowchart TD + A["searchAPIResource"] --> B{"for each apiList"} + B -->|"matches GroupVersion?"| C{"found?"} + C -->|"yes"| D["return APIResource"] + C -->|"no"| E["continue loop"] + B --> F["loop finished"] + F --> G["return error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_searchAPIResource --> fmt.Errorf +``` + +#### Functions calling `searchAPIResource` + +```mermaid +graph TD + func_followOwnerReferences --> func_searchAPIResource +``` + +#### Usage example (Go) + +```go +// Minimal example invoking searchAPIResource +package main + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func main() { + // Pretend we have a list of API resources (normally fetched from the server) + apiList := &metav1.APIResourceList{ + GroupVersion: "v1", + APIResources: []metav1.APIResource{ + {Name: "pods", Kind: corev1.Pod{}.Kind, Namespaced: true}, + }, + } + resources := []*metav1.APIResourceList{apiList} + + resource, err := searchAPIResource("Pod", "v1", resources) + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + fmt.Printf("Found resource: %+v\n", resource) +} +``` + +--- diff --git a/docs/pkg/postmortem/postmortem.md b/docs/pkg/postmortem/postmortem.md new file mode 100644 index 000000000..c171169ca --- /dev/null +++ b/docs/pkg/postmortem/postmortem.md @@ -0,0 +1,102 @@ +# Package postmortem + +**Path**: `pkg/postmortem` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [Log](#log) + +## Overview + +The postmortem package generates a human‑readable report summarizing the state of a test environment after a run. It gathers node taints, pending pods, and abnormal events to help diagnose failures. + +### Key Features + +- Collects diagnostic data from the latest test environment via provider.GetTestEnvironment +- Formats the collected information into a readable string +- Allows optional refresh of cached environment data + +### Design Notes + +- Assumes provider exposes current test environment; errors are ignored in Log for simplicity +- Log does not expose detailed structs, keeping API surface minimal +- Best practice: call Log after each test run to capture state before cleanup + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func Log() (out string)](#log) | Builds a human‑readable report containing node taints, pending pods, and abnormal events from the latest test environment. | + +## Exported Functions + +### Log + +**Log** - Builds a human‑readable report containing node taints, pending pods, and abnormal events from the latest test environment. + +#### Signature (Go) + +```go +func Log() (out string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a human‑readable report containing node taints, pending pods, and abnormal events from the latest test environment. | +| **Parameters** | None | +| **Return value** | `string` – multiline diagnostic text | +| **Key dependencies** | • `provider.GetTestEnvironment()`
• `env.SetNeedsRefresh()`
• `fmt.Sprintf`
• `p.String()`, `e.String()` methods | +| **Side effects** | *Reads the current environment (no mutation).
* Calls `SetNeedsRefresh` to flag that a fresh snapshot is required. | +| **How it fits the package** | Supplies post‑mortem data for test diagnostics, invoked by tests when they need detailed context after failures. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Get current environment"] --> B["Mark needs refresh"] + B --> C["Retrieve fresh environment"] + C --> D["Build node status section"] + D --> E["Build pending pods section"] + E --> F["Build abnormal events section"] + F --> G["Return concatenated string"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Log --> func_GetTestEnvironment + func_Log --> func_SetNeedsRefresh + func_Log --> func_Sprintf + func_Log --> func_String +``` + +#### Functions calling `Log` + +```mermaid +graph TD + func_testPodsRecreation --> func_Log +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Log() +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/postmortem" +) + +func main() { + report := postmortem.Log() + fmt.Println(report) +} +``` + +--- diff --git a/docs/pkg/provider/provider.md b/docs/pkg/provider/provider.md new file mode 100644 index 000000000..1f157397f --- /dev/null +++ b/docs/pkg/provider/provider.md @@ -0,0 +1,9170 @@ +# Package provider + +**Path**: `pkg/provider` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [CniNetworkInterface](#cninetworkinterface) + - [Container](#container) + - [ContainerImageIdentifier](#containerimageidentifier) + - [CrScale](#crscale) + - [CsvInstallPlan](#csvinstallplan) + - [Deployment](#deployment) + - [Event](#event) + - [MachineConfig](#machineconfig) + - [Node](#node) + - [Operator](#operator) + - [Pod](#pod) + - [PreflightResultsDB](#preflightresultsdb) + - [PreflightTest](#preflighttest) + - [ScaleObject](#scaleobject) + - [StatefulSet](#statefulset) + - [TestEnvironment](#testenvironment) +- [Exported Functions](#exported-functions) + - [AreCPUResourcesWholeUnits](#arecpuresourceswholeunits) + - [AreResourcesIdentical](#areresourcesidentical) + - [Container.GetUID](#container.getuid) + - [Container.HasExecProbes](#container.hasexecprobes) + - [Container.HasIgnoredContainerName](#container.hasignoredcontainername) + - [Container.IsContainerRunAsNonRoot](#container.iscontainerrunasnonroot) + - [Container.IsContainerRunAsNonRootUserID](#container.iscontainerrunasnonrootuserid) + - [Container.IsIstioProxy](#container.isistioproxy) + - [Container.IsReadOnlyRootFilesystem](#container.isreadonlyrootfilesystem) + - [Container.IsTagEmpty](#container.istagempty) + - [Container.SetPreflightResults](#container.setpreflightresults) + - [Container.String](#container.string) + - [Container.StringLong](#container.stringlong) + - [ConvertArrayPods](#convertarraypods) + - [CrScale.IsScaleObjectReady](#crscale.isscaleobjectready) + - [CrScale.ToString](#crscale.tostring) + - [CsvToString](#csvtostring) + - [Deployment.IsDeploymentReady](#deployment.isdeploymentready) + - [Deployment.ToString](#deployment.tostring) + - [Event.String](#event.string) + - [GetAllOperatorGroups](#getalloperatorgroups) + - [GetCatalogSourceBundleCount](#getcatalogsourcebundlecount) + - [GetPciPerPod](#getpciperpod) + - [GetPodIPsPerNet](#getpodipspernet) + - [GetPreflightResultsDB](#getpreflightresultsdb) + - [GetRuntimeUID](#getruntimeuid) + - [GetTestEnvironment](#gettestenvironment) + - [GetUpdatedCrObject](#getupdatedcrobject) + - [GetUpdatedDeployment](#getupdateddeployment) + - [GetUpdatedStatefulset](#getupdatedstatefulset) + - [IsOCPCluster](#isocpcluster) + - [LoadBalancingDisabled](#loadbalancingdisabled) + - [NewContainer](#newcontainer) + - [NewEvent](#newevent) + - [NewPod](#newpod) + - [Node.GetCSCOSVersion](#node.getcscosversion) + - [Node.GetRHCOSVersion](#node.getrhcosversion) + - [Node.GetRHELVersion](#node.getrhelversion) + - [Node.HasWorkloadDeployed](#node.hasworkloaddeployed) + - [Node.IsCSCOS](#node.iscscos) + - [Node.IsControlPlaneNode](#node.iscontrolplanenode) + - [Node.IsHyperThreadNode](#node.ishyperthreadnode) + - [Node.IsRHCOS](#node.isrhcos) + - [Node.IsRHEL](#node.isrhel) + - [Node.IsRTKernel](#node.isrtkernel) + - [Node.IsWorkerNode](#node.isworkernode) + - [Node.MarshalJSON](#node.marshaljson) + - [Operator.SetPreflightResults](#operator.setpreflightresults) + - [Operator.String](#operator.string) + - [Pod.AffinityRequired](#pod.affinityrequired) + - [Pod.CheckResourceHugePagesSize](#pod.checkresourcehugepagessize) + - [Pod.ContainsIstioProxy](#pod.containsistioproxy) + - [Pod.CreatedByDeploymentConfig](#pod.createdbydeploymentconfig) + - [Pod.GetRunAsNonRootFalseContainers](#pod.getrunasnonrootfalsecontainers) + - [Pod.GetTopOwner](#pod.gettopowner) + - [Pod.HasHugepages](#pod.hashugepages) + - [Pod.HasNodeSelector](#pod.hasnodeselector) + - [Pod.IsAffinityCompliant](#pod.isaffinitycompliant) + - [Pod.IsAutomountServiceAccountSetOnSA](#pod.isautomountserviceaccountsetonsa) + - [Pod.IsCPUIsolationCompliant](#pod.iscpuisolationcompliant) + - [Pod.IsPodGuaranteed](#pod.ispodguaranteed) + - [Pod.IsPodGuaranteedWithExclusiveCPUs](#pod.ispodguaranteedwithexclusivecpus) + - [Pod.IsRunAsUserID](#pod.isrunasuserid) + - [Pod.IsRuntimeClassNameSpecified](#pod.isruntimeclassnamespecified) + - [Pod.IsShareProcessNamespace](#pod.isshareprocessnamespace) + - [Pod.IsUsingClusterRoleBinding](#pod.isusingclusterrolebinding) + - [Pod.IsUsingSRIOV](#pod.isusingsriov) + - [Pod.IsUsingSRIOVWithMTU](#pod.isusingsriovwithmtu) + - [Pod.String](#pod.string) + - [StatefulSet.IsStatefulSetReady](#statefulset.isstatefulsetready) + - [StatefulSet.ToString](#statefulset.tostring) + - [TestEnvironment.GetAffinityRequiredPods](#testenvironment.getaffinityrequiredpods) + - [TestEnvironment.GetBaremetalNodes](#testenvironment.getbaremetalnodes) + - [TestEnvironment.GetCPUPinningPodsWithDpdk](#testenvironment.getcpupinningpodswithdpdk) + - [TestEnvironment.GetDockerConfigFile](#testenvironment.getdockerconfigfile) + - [TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUs](#testenvironment.getguaranteedpodcontainerswithexclusivecpus) + - [TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID](#testenvironment.getguaranteedpodcontainerswithexclusivecpuswithouthostpid) + - [TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID](#testenvironment.getguaranteedpodcontainerswithisolatedcpuswithouthostpid) + - [TestEnvironment.GetGuaranteedPods](#testenvironment.getguaranteedpods) + - [TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs](#testenvironment.getguaranteedpodswithexclusivecpus) + - [TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs](#testenvironment.getguaranteedpodswithisolatedcpus) + - [TestEnvironment.GetHugepagesPods](#testenvironment.gethugepagespods) + - [TestEnvironment.GetMasterCount](#testenvironment.getmastercount) + - [TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID](#testenvironment.getnonguaranteedpodcontainerswithouthostpid) + - [TestEnvironment.GetNonGuaranteedPods](#testenvironment.getnonguaranteedpods) + - [TestEnvironment.GetOfflineDBPath](#testenvironment.getofflinedbpath) + - [TestEnvironment.GetPodsUsingSRIOV](#testenvironment.getpodsusingsriov) + - [TestEnvironment.GetPodsWithoutAffinityRequiredLabel](#testenvironment.getpodswithoutaffinityrequiredlabel) + - [TestEnvironment.GetShareProcessNamespacePods](#testenvironment.getshareprocessnamespacepods) + - [TestEnvironment.GetWorkerCount](#testenvironment.getworkercount) + - [TestEnvironment.IsIntrusive](#testenvironment.isintrusive) + - [TestEnvironment.IsPreflightInsecureAllowed](#testenvironment.ispreflightinsecureallowed) + - [TestEnvironment.IsSNO](#testenvironment.issno) + - [TestEnvironment.SetNeedsRefresh](#testenvironment.setneedsrefresh) +- [Local Functions](#local-functions) + - [addOperandPodsToTestPods](#addoperandpodstotestpods) + - [addOperatorPodsToTestPods](#addoperatorpodstotestpods) + - [buildContainerImageSource](#buildcontainerimagesource) + - [buildTestEnvironment](#buildtestenvironment) + - [createNodes](#createnodes) + - [createOperators](#createoperators) + - [deployDaemonSet](#deploydaemonset) + - [filterDPDKRunningPods](#filterdpdkrunningpods) + - [filterPodsWithoutHostPID](#filterpodswithouthostpid) + - [getAtLeastOneCsv](#getatleastonecsv) + - [getAtLeastOneInstallPlan](#getatleastoneinstallplan) + - [getAtLeastOneSubscription](#getatleastonesubscription) + - [getCNCFNetworksNamesFromPodAnnotation](#getcncfnetworksnamesfrompodannotation) + - [getCatalogSourceBundleCountFromPackageManifests](#getcatalogsourcebundlecountfrompackagemanifests) + - [getCatalogSourceBundleCountFromProbeContainer](#getcatalogsourcebundlecountfromprobecontainer) + - [getCatalogSourceImageIndexFromInstallPlan](#getcatalogsourceimageindexfrominstallplan) + - [getContainers](#getcontainers) + - [getMachineConfig](#getmachineconfig) + - [getOperatorTargetNamespaces](#getoperatortargetnamespaces) + - [getPackageManifestWithSubscription](#getpackagemanifestwithsubscription) + - [getPodContainers](#getpodcontainers) + - [getSummaryAllOperators](#getsummaryalloperators) + - [getUniqueCsvListByName](#getuniquecsvlistbyname) + - [isNetworkAttachmentDefinitionConfigTypeSRIOV](#isnetworkattachmentdefinitionconfigtypesriov) + - [isNetworkAttachmentDefinitionSRIOVConfigMTUSet](#isnetworkattachmentdefinitionsriovconfigmtuset) + - [isSkipHelmChart](#isskiphelmchart) + - [searchPodInSlice](#searchpodinslice) + - [sriovNetworkUsesMTU](#sriovnetworkusesmtu) + - [updateCrUnderTest](#updatecrundertest) + +## Overview + +The provider package implements the core logic for gathering and analysing Kubernetes/Openshift cluster state used by CertSuite. It defines data structures that wrap API objects (Pods, Nodes, Operators, etc.), utilities for interpreting resource requests, CPU isolation rules, network attachment information, pre‑flight checks, and metrics about operator status. + +### Key Features + +- Extends native k8s types with test‑specific metadata and helper methods such as Pod.IsPodGuaranteed or Container.HasExecProbes +- Implements comprehensive discovery of cluster objects (pods, nodes, operators, CRDs) and exposes them via the TestEnvironment singleton +- Provides utilities for parsing CNI annotations, hugepage usage, SR‑IOV configuration, and running preflight checks against container images + +### Design Notes + +- The TestEnvironment is a lazily initialized global that caches discovery results; callers should invoke GetTestEnvironment to obtain it +- Pod/Container methods rely on embedded core types but expose domain logic through small, well‑named functions to aid readability +- Large amounts of string constants (e.g., label names) are defined at package level for reuse and to avoid magic strings + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**CniNetworkInterface**](#cninetworkinterface) | Struct definition | +| [**Container**](#container) | One‑line purpose | +| [**ContainerImageIdentifier**](#containerimageidentifier) | Struct definition | +| [**CrScale**](#crscale) | Struct definition | +| [**CsvInstallPlan**](#csvinstallplan) | Struct definition | +| [**Deployment**](#deployment) | Wrapper around a Kubernetes Deployment object | +| [**Event**](#event) | One-line purpose | +| [**MachineConfig**](#machineconfig) | Struct definition | +| [**Node**](#node) | Wrapper around a Kubernetes node with optional machine‑configuration data | +| [**Operator**](#operator) | One-line purpose | +| [**Pod**](#pod) | Represents a Kubernetes pod with extended test‑specific metadata | +| [**PreflightResultsDB**](#preflightresultsdb) | Container pre‑flight test results | +| [**PreflightTest**](#preflighttest) | One-line purpose | +| [**ScaleObject**](#scaleobject) | Holds scaling configuration for a custom resource | +| [**StatefulSet**](#statefulset) | One-line purpose | +| [**TestEnvironment**](#testenvironment) | Holds the state of a test run | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func AreCPUResourcesWholeUnits(p *Pod) bool](#arecpuresourceswholeunits) | Determines whether every container in the given `*Pod` specifies CPU requests and limits that are multiples of 1000 milli‑CPUs (i.e., whole cores). If any value is missing or not a multiple of 1000, it logs a debug message and returns `false`. | +| [func AreResourcesIdentical(p *Pod) bool](#areresourcesidentical) | Ensures each container in the supplied `*Pod` has matching CPU and memory requests and limits. Returns `true` only if all containers meet this condition; otherwise, it logs a debug message and returns `false`. | +| [func (c *Container) GetUID() (string, error)](#container.getuid) | Extracts the UID of a running container from its status and logs diagnostic information. | +| [func (c *Container) HasExecProbes() bool](#container.hasexecprobes) | Returns `true` when the container defines at least one probe that uses an executable command (`Exec`) for liveness, readiness, or startup checks. | +| [func (c *Container) HasIgnoredContainerName() bool](#container.hasignoredcontainername) | Determines if the current container is in an ignore list, including Istio proxy containers. | +| [func (c *Container) IsContainerRunAsNonRoot(podRunAsNonRoot *bool) (isContainerRunAsNonRoot bool, reason string)](#container.iscontainerrunasnonroot) | Returns whether the container is configured to run as a non‑root user and explains how that value was derived. | +| [func (*int64) (bool, string)](#container.iscontainerrunasnonrootuserid) | Checks whether the container’s `RunAsUser` security context indicates it runs as a non‑root user. It also explains how pod‑level defaults are applied when the container level is unspecified. | +| [func (c *Container) IsIstioProxy() bool](#container.isistioproxy) | Checks whether the container represents the Istio side‑car proxy by comparing its name to a predefined constant. | +| [func (c *Container) IsReadOnlyRootFilesystem(logger *log.Logger) bool](#container.isreadonlyrootfilesystem) | Checks whether the `SecurityContext.ReadOnlyRootFilesystem` flag of a container is set to `true`. | +| [func (c *Container) IsTagEmpty() bool](#container.istagempty) | Checks whether the `Tag` field of the container’s image identifier is an empty string, indicating that no specific tag was supplied. | +| [func (c *Container) SetPreflightResults(preflightImageCache map[string]PreflightResultsDB, env *TestEnvironment) error](#container.setpreflightresults) | Runs the OpenShift‑preflight checks against a container image, caches the results per image, and stores them in the `Container` instance. | +| [func (c *Container) String() string](#container.string) | Generates a concise description of a `Container` instance, including its name, pod name, and namespace. | +| [func (c *Container) StringLong() string](#container.stringlong) | Generates a descriptive string containing key metadata about a Kubernetes container, including node, namespace, pod name, container name, UID, and runtime. | +| [func ConvertArrayPods(pods []*corev1.Pod) (out []*Pod)](#convertarraypods) | Transforms each `*corev1.Pod` from the Kubernetes API into a corresponding `*Pod` value defined by the provider package, preserving all relevant metadata and network information. | +| [func (crScale CrScale) IsScaleObjectReady() bool](#crscale.isscaleobjectready) | Determines if the scale object's `Status.Replicas` equals the desired `Spec.Replicas`, indicating readiness. | +| [func (crScale CrScale) ToString() string](#crscale.tostring) | Returns a formatted string that identifies the custom resource scale, showing both its name and namespace. | +| [func CsvToString(csv *olmv1Alpha.ClusterServiceVersion) string](#csvtostring) | Creates a human‑readable representation of a CSV, showing its name and namespace. | +| [func (d *Deployment) IsDeploymentReady() bool](#deployment.isdeploymentready) | Evaluates whether the deployment satisfies all readiness conditions: an `Available` condition is present, replica counts match specifications, and no replicas are marked unavailable. | +| [func (d *Deployment) ToString() string](#deployment.tostring) | Formats the deployment’s `Name` and `Namespace` into a single descriptive string. | +| [func (e *Event) String() string](#event.string) | Returns a formatted string summarizing the event’s timestamp, involved object, reason, and message. | +| [func GetAllOperatorGroups() ([]*olmv1.OperatorGroup, error)](#getalloperatorgroups) | Queries the Kubernetes API for all `OperatorGroup` objects in the default namespace and returns a slice of pointers to them. Handles “not found” cases gracefully by returning `nil` without an error. | +| [func GetCatalogSourceBundleCount(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int](#getcatalogsourcebundlecount) | Counts the number of bundle images that belong to a given `CatalogSource`. It selects the counting strategy based on the OpenShift version: for ≤ 4.12 it queries a probe container; otherwise it tallies entries from package manifests. | +| [func GetPciPerPod(annotation string) (pciAddr []string, err error)](#getpciperpod) | Parses the JSON network‑status annotation of a pod and extracts all PCI addresses associated with its network interfaces. | +| [func GetPodIPsPerNet(annotation string) (ips map[string]CniNetworkInterface, err error)](#getpodipspernet) | Parses the `k8s.v1.cni.cncf.io/networks-status` annotation to extract all non‑default network interfaces and their IP addresses for a pod. | +| [func GetPreflightResultsDB(results *plibRuntime.Results) PreflightResultsDB](#getpreflightresultsdb) | Builds a `PreflightResultsDB` from the raw preflight runtime results, separating passed, failed and error tests into distinct slices. | +| [func GetRuntimeUID(cs *corev1.ContainerStatus) (runtime, uid string)](#getruntimeuid) | Parses the `ContainerID` field of a `ContainerStatus` to separate the runtime prefix and the unique identifier (UID). | +| [func GetTestEnvironment() TestEnvironment](#gettestenvironment) | Provides read‑only access to the singleton `env` that holds all runtime discovery data. It lazily builds the environment on first call and then returns the cached instance. | +| [func GetUpdatedCrObject(sg scale.ScalesGetter, namespace string, name string, groupResourceSchema schema.GroupResource) (*CrScale, error)](#getupdatedcrobject) | Obtains a `scalingv1.Scale` object for the specified custom resource and encapsulates it in a `CrScale`. | +| [func GetUpdatedDeployment(ac appv1client.AppsV1Interface, namespace, name string) (*Deployment, error)](#getupdateddeployment) | Fetches a Kubernetes Deployment by its namespace and name, wraps the result in the package‑specific `Deployment` type, and returns it. | +| [func GetUpdatedStatefulset(ac appv1client.AppsV1Interface, namespace, name string) (*StatefulSet, error)](#getupdatedstatefulset) | Fetches the latest Kubernetes StatefulSet identified by *namespace* and *name*, wrapping it in the package‑specific `StatefulSet` type for downstream logic. | +| [func IsOCPCluster() bool](#isocpcluster) | Checks whether the test environment represents an OpenShift cluster by comparing the stored version string to a sentinel value for non‑OpenShift clusters. | +| [func LoadBalancingDisabled(p *Pod) bool](#loadbalancingdisabled) | Determines if both the `cpu-load-balancing.crio.io` and `irq-load-balancing.crio.io` annotations on a pod are set to `"disable"`. If either annotation is missing or has an invalid value, it logs a debug message and returns `false`. | +| [func NewContainer() *Container](#newcontainer) | Initializes a fresh `Container` struct, embedding an empty `corev1.Container` to serve as the foundation for further configuration. | +| [func NewEvent(aEvent *corev1.Event) (out Event)](#newevent) | Creates an `Event` wrapper around a Kubernetes core event, preserving the original object for further use. | +| [func NewPod(aPod *corev1.Pod) (out Pod)](#newpod) | Wraps a raw Kubernetes `Pod` into the library’s `Pod` type, enriching it with network interface data and container metadata. | +| [func (node *Node) GetCSCOSVersion() (string, error)](#node.getcscosversion) | Extracts and returns the CoreOS (CentOS Stream CoreOS) version string from a node’s OS image field. It validates that the node is running a supported CoreOS distribution before parsing. | +| [func (node *Node) GetRHCOSVersion() (string, error)](#node.getrhcosversion) | Extracts the concise Red Hat Enterprise Linux CoreOS (RHCOS) version from a node’s OS image string. It validates that the node is running RHCOS and converts the full “long” version to its short form. | +| [func (node *Node) GetRHELVersion() (string, error)](#node.getrhelversion) | Returns the Red Hat Enterprise Linux (RHEL) release number extracted from the node’s OS image string. | +| [func (node *Node) HasWorkloadDeployed(podsUnderTest []*Pod) bool](#node.hasworkloaddeployed) | Returns `true` if at least one Pod in `podsUnderTest` has its `Spec.NodeName` equal to the Node’s name, indicating that a workload is running on this node. Otherwise returns `false`. | +| [func (node *Node) IsCSCOS() bool](#node.iscscos) | Returns `true` when the node’s operating system image indicates it is running CoreOS. The check trims surrounding whitespace and looks for a predefined identifier (`cscosName`). | +| [func (node *Node) IsControlPlaneNode() bool](#node.iscontrolplanenode) | Returns `true` if the node has at least one label that matches any of the predefined control‑plane labels. | +| [func (node *Node) IsHyperThreadNode(env *TestEnvironment) (bool, error)](#node.ishyperthreadnode) | Checks whether the node identified by `node` has more than one thread per core by inspecting its probe pod. | +| [func (node *Node) IsRHCOS() bool](#node.isrhcos) | Determines whether the operating system image of a Kubernetes node corresponds to Red Hat Enterprise Linux CoreOS (RHCOS). | +| [func (node *Node) IsRHEL() bool](#node.isrhel) | Determines whether the node’s operating system image is a Red Hat Enterprise Linux (RHEL) release. | +| [func (node *Node) IsRTKernel() bool](#node.isrtkernel) | Checks whether the node’s kernel version string contains the substring “rt”, indicating a real‑time kernel. | +| [func (node *Node) IsWorkerNode() bool](#node.isworkernode) | Returns `true` if the node’s labels contain any key that matches one of the predefined worker‑label identifiers. | +| [func (node Node) MarshalJSON() ([]byte, error)](#node.marshaljson) | Provides JSON encoding of the `Node.Data` field, enabling a `Node` value to be marshaled directly by `encoding/json`. | +| [func (op *Operator) SetPreflightResults(env *TestEnvironment) error](#operator.setpreflightresults) | Executes a Preflight container check against the operator’s bundle and index images, collects the results, logs output, and assigns them to `op.PreflightResults`. Skips execution if no install plans are present. | +| [func (op *Operator) String() string](#operator.string) | Formats the fields of an `Operator` instance into a single string for logging or debugging. | +| [func (p *Pod) AffinityRequired() bool](#pod.affinityrequired) | Checks the pod’s labels for the key `AffinityRequiredKey`. If present, parses its string value as a boolean and returns that result. Defaults to `false` if the label is absent or invalid. | +| [func (p *Pod) CheckResourceHugePagesSize(size string) bool](#pod.checkresourcehugepagessize) | Ensures every `hugepages-*` resource request and limit in the pod’s containers matches the supplied `size`. If any huge‑page resource differs, it returns `false`; otherwise `true`. | +| [func (p *Pod) ContainsIstioProxy() bool](#pod.containsistioproxy) | Determines whether any container in the pod has the name specified by `IstioProxyContainerName`. | +| [func (p *Pod) CreatedByDeploymentConfig() (bool, error)](#pod.createdbydeploymentconfig) | Determines whether the pod originates from an OpenShift `DeploymentConfig` by traversing owner references through a `ReplicationController`. | +| [func (p *Pod) GetRunAsNonRootFalseContainers(knownContainersToSkip map[string]bool) ([]*Container, []string)](#pod.getrunasnonrootfalsecontainers) | Returns containers that either have `securityContext.runAsNonRoot` set to false or `securityContext.runAsUser` set to 0 (both indicating a root user). Pod‑level defaults are respected if container values are missing. | +| [func (p *Pod) GetTopOwner() (topOwners map[string]podhelper.TopOwner, err error)](#pod.gettopowner) | Returns a mapping of top‑level owner identifiers to `podhelper.TopOwner` structs for the Pod instance. | +| [func (p *Pod) HasHugepages() bool](#pod.hashugepages) | Determines whether any container within the pod requests or limits a hugepage resource. | +| [func (p *Pod) HasNodeSelector() bool](#pod.hasnodeselector) | Determines if the pod’s specification includes at least one key/value pair in `Spec.NodeSelector`, indicating that it targets specific nodes. | +| [func (p *Pod) IsAffinityCompliant() (bool, error)](#pod.isaffinitycompliant) | Validates that a pod has appropriate affinity rules when an `AffinityRequired` flag is set; returns `true` if compliant, otherwise `false` with descriptive error. | +| [func (p *Pod) IsAutomountServiceAccountSetOnSA() (*bool, error)](#pod.isautomountserviceaccountsetonsa) | Determines whether the `AutomountServiceAccountToken` field is set for the service account associated with the pod. | +| [func (p *Pod) IsCPUIsolationCompliant() bool](#pod.iscpuisolationcompliant) | Checks that the pod has correct annotations for disabling CPU and IRQ load balancing and specifies a runtime class name, indicating compliance with CPU isolation requirements. | +| [func (p *Pod) IsPodGuaranteed() bool](#pod.ispodguaranteed) | Checks whether all containers in the pod have identical CPU and memory requests and limits, thereby qualifying the pod for the *Guaranteed* QoS class. | +| [func (p *Pod) IsPodGuaranteedWithExclusiveCPUs() bool](#pod.ispodguaranteedwithexclusivecpus) | Checks that all containers in a pod request and limit the same integer number of CPUs (no fractional milli‑CPU values). | +| [func (p *Pod) IsRunAsUserID(uid int64) bool](#pod.isrunasuserid) | Determines whether the `Pod`’s security context is configured to run as a specific user ID. | +| [func (p *Pod) IsRuntimeClassNameSpecified() bool](#pod.isruntimeclassnamespecified) | Determines if the pod’s spec contains a non‑nil `RuntimeClassName`, indicating that a runtime class has been specified. | +| [func (p *Pod) IsShareProcessNamespace() bool](#pod.isshareprocessnamespace) | Determines if a pod is configured to share its process namespace with other pods in the same pod. This is used for selecting pods that have `shareProcessNamespace: true` set in their spec. | +| [func (p *Pod) IsUsingClusterRoleBinding(clusterRoleBindings []rbacv1.ClusterRoleBinding, logger *log.Logger) (bool, string, error)](#pod.isusingclusterrolebinding) | Checks whether the pod’s service account is referenced as a subject in any provided `ClusterRoleBinding`. Returns a flag, the name of the bound role, and an error if logging fails. | +| [func (p *Pod) IsUsingSRIOV() (bool, error)](#pod.isusingsriov) | Returns `true` when at least one of the pod’s attached networks is configured as SR‑I/O‑V. It inspects the pod’s CNCF CNI annotation and checks each referenced NetworkAttachmentDefinition (NAD). | +| [func (p *Pod) IsUsingSRIOVWithMTU() (bool, error)](#pod.isusingsriovwithmtu) | Returns `true` when any network attachment of the pod is an SR‑IOV type and its MTU has been set via the corresponding `SriovNetworkNodePolicy`. | +| [func (p *Pod) String() string](#pod.string) | Provide a concise string representation of a `Pod`, useful for logging and debugging. | +| [func (ss *StatefulSet) IsStatefulSetReady() bool](#statefulset.isstatefulsetready) | Determines if the StatefulSet’s current status matches the specified number of replicas. It ensures that all replicas are ready, running, and updated. | +| [func (ss *StatefulSet) ToString() string](#statefulset.tostring) | Produces a human‑readable representation of a StatefulSet, including its name and namespace. | +| [func (env *TestEnvironment) GetAffinityRequiredPods() []*Pod](#testenvironment.getaffinityrequiredpods) | Returns a slice of `*Pod` objects from the test environment that have an affinity requirement. | +| [func (env *TestEnvironment) GetBaremetalNodes() []Node](#testenvironment.getbaremetalnodes) | Filters the `TestEnvironment.Nodes` slice and returns only those whose provider ID indicates a bare‑metal host (`"baremetalhost://"` prefix). | +| [func (env *TestEnvironment) GetCPUPinningPodsWithDpdk() []*Pod](#testenvironment.getcpupinningpodswithdpdk) | Retrieves all pods in the test environment that have CPU pinning enabled and are using DPDK for networking. | +| [func (env *TestEnvironment) GetDockerConfigFile() string](#testenvironment.getdockerconfigfile) | Returns the file path to the Docker configuration JSON used for preflight checks. | +| [func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUs() []*Container](#testenvironment.getguaranteedpodcontainerswithexclusivecpus) | Returns a slice of `*Container` objects that belong to pods having exclusive CPU guarantees within the test environment. | +| [func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID() []*Container](#testenvironment.getguaranteedpodcontainerswithexclusivecpuswithouthostpid) | Returns a slice of `*Container` objects belonging to pods that are guaranteed to use exclusive CPUs and have the host PID feature disabled. | +| [func (env *TestEnvironment) GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID() []*Container](#testenvironment.getguaranteedpodcontainerswithisolatedcpuswithouthostpid) | Returns a slice of `*Container` objects belonging to pods that are guaranteed, use isolated CPUs, and do not set the HostPID flag. | +| [func (env *TestEnvironment) GetGuaranteedPods() []*Pod](#testenvironment.getguaranteedpods) | Filters and returns all pods in the test environment that satisfy the “guaranteed” criteria defined by `Pod.IsPodGuaranteed`. | +| [func (env *TestEnvironment) GetGuaranteedPodsWithExclusiveCPUs() []*Pod](#testenvironment.getguaranteedpodswithexclusivecpus) | Filters the `TestEnvironment`’s pod list to include only those pods that are guaranteed to have exclusive CPUs. | +| [func (env *TestEnvironment) GetGuaranteedPodsWithIsolatedCPUs() []*Pod](#testenvironment.getguaranteedpodswithisolatedcpus) | Filters the environment’s pod list to return only those that are guaranteed to run on exclusive CPU units and comply with CPU‑isolation requirements. | +| [func (env *TestEnvironment) GetHugepagesPods() []*Pod](#testenvironment.gethugepagespods) | Filters the pods stored in a `TestEnvironment` to return only those that declare huge‑page memory requests or limits. | +| [func (env *TestEnvironment) GetMasterCount() int](#testenvironment.getmastercount) | Determines how many nodes in the test environment are designated as master (control‑plane) nodes. | +| [func (env *TestEnvironment) GetNonGuaranteedPodContainersWithoutHostPID() []*Container](#testenvironment.getnonguaranteedpodcontainerswithouthostpid) | Returns a slice of `*Container` objects belonging to pods that are not guaranteed and have the `HostPID` feature disabled. | +| [func (env *TestEnvironment) GetNonGuaranteedPods() []*Pod](#testenvironment.getnonguaranteedpods) | Returns all pods in the environment that are not guaranteed, i.e., whose resource requests do not match limits. | +| [func (env *TestEnvironment) GetOfflineDBPath() string](#testenvironment.getofflinedbpath) | Returns the filesystem location of the offline database used by the test environment. | +| [func (env *TestEnvironment) GetPodsUsingSRIOV() ([]*Pod, error)](#testenvironment.getpodsusingsriov) | Returns all `Pod` instances in the test environment that are configured to use SR‑IOV networking. | +| [func (TestEnvironment) GetPodsWithoutAffinityRequiredLabel()([]*Pod)](#testenvironment.getpodswithoutaffinityrequiredlabel) | GetPodsWithoutAffinityRequiredLabel returns a slice of Pod objects that do not have the affinity required label. It iterates over the Pods in the TestEnvironment and filters out the ones that do not have the affinity required label. The filtered Pods are returned as a slice. | +| [func (env *TestEnvironment) GetShareProcessNamespacePods() []*Pod](#testenvironment.getshareprocessnamespacepods) | Returns all `Pod` instances within the environment whose `Spec.ShareProcessNamespace` flag is set to true. | +| [func (env *TestEnvironment) GetWorkerCount() int](#testenvironment.getworkercount) | Counts how many nodes within the `TestEnvironment` are designated as worker nodes. | +| [func (env *TestEnvironment) IsIntrusive() bool](#testenvironment.isintrusive) | Exposes whether the current `TestEnvironment` is configured to run intrusive tests. | +| [func (env *TestEnvironment) IsPreflightInsecureAllowed() bool](#testenvironment.ispreflightinsecureallowed) | Returns whether the test environment allows insecure network connections when running Preflight checks. | +| [func (env *TestEnvironment) IsSNO() bool](#testenvironment.issno) | Checks whether the current `TestEnvironment` consists of exactly one node, indicating a Single‑Node OpenShift (SNO) configuration. | +| [func (env *TestEnvironment) SetNeedsRefresh()](#testenvironment.setneedsrefresh) | Flags that the current test environment has become stale and must be refreshed before subsequent use. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func addOperandPodsToTestPods(operandPods []*Pod, env *TestEnvironment)](#addoperandpodstotestpods) | Incorporates operand pods into the environment’s pod collection, ensuring duplicates are avoided and proper flags are set. | +| [func addOperatorPodsToTestPods(operatorPods []*Pod, env *TestEnvironment) {}](#addoperatorpodstotestpods) | Ensures that all operator pods are represented in the environment’s pod collection. If a pod is already present it is marked as an operator; otherwise it is appended to the list. | +| [func buildContainerImageSource(urlImage, urlImageID string) (source ContainerImageIdentifier)](#buildcontainerimagesource) | Parses the container image URL and the runtime‑reported image ID to populate a `ContainerImageIdentifier` struct with registry, repository, tag, and digest information. | +| [func buildTestEnvironment()](#buildtestenvironment) | Initializes global test environment state, loads configuration, deploys probe daemonset (if possible), performs autodiscovery of cluster resources, and populates the `env` variable with all discovered data. | +| [func createNodes(nodes []corev1.Node) map[string]Node](#createnodes) | Converts a slice of Kubernetes `Node` objects into a map keyed by node name, optionally enriching each entry with its MachineConfig when running on an OpenShift cluster. | +| [func createOperators( csvs []*olmv1Alpha.ClusterServiceVersion, allSubscriptions []olmv1Alpha.Subscription, allPackageManifests []*olmpkgv1.PackageManifest, allInstallPlans []*olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource, succeededRequired bool, keepCsvDetails bool) []*Operator](#createoperators) | Builds a slice of `*Operator` objects that summarize each unique ClusterServiceVersion (CSV). It enriches operators with subscription, install‑plan, and namespace information. | +| [func deployDaemonSet(namespace string) error](#deploydaemonset) | Ensures that the CertSuite probe daemon set is running in the specified Kubernetes namespace. If it already exists and is ready, the function exits immediately; otherwise it creates the daemon set and waits for readiness. | +| [func filterDPDKRunningPods(pods []*Pod) []*Pod](#filterdpdkrunningpods) | From a list of pods, return only those whose first Multus PCI address contains the DPDK driver `vfio-pci`. | +| [func filterPodsWithoutHostPID(pods []*Pod) []*Pod](#filterpodswithouthostpid) | Returns a slice containing only the pods from the input list whose `Spec.HostPID` field is false, effectively excluding any pod that shares the host’s PID namespace. | +| [func getAtLeastOneCsv(csv *olmv1Alpha.ClusterServiceVersion, installPlan *olmv1Alpha.InstallPlan) (atLeastOneCsv bool)](#getatleastonecsv) | Checks whether the provided `InstallPlan` references the specified `ClusterServiceVersion`. It verifies that the CSV name appears in the plan’s list and that the plan has bundle lookup data. | +| [func getAtLeastOneInstallPlan( op *Operator, csv *olmv1Alpha.ClusterServiceVersion, allInstallPlans []*olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource, ) (atLeastOneInstallPlan bool)](#getatleastoneinstallplan) | For a given operator, identifies at least one `InstallPlan` that installs the supplied CSV in the operator’s subscription namespace and records it in the operator’s data. | +| [func getAtLeastOneSubscription(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, subscriptions []olmv1Alpha.Subscription, packageManifests []*olmpkgv1.PackageManifest)(bool)](#getatleastonesubscription) | Finds the first subscription whose `InstalledCSV` matches the provided CSV and populates the `Operator` with subscription metadata; retrieves the default channel from a matching package manifest when needed. | +| [func getCNCFNetworksNamesFromPodAnnotation(networksAnnotation string) []string](#getcncfnetworksnamesfrompodannotation) | Parses the value of the `k8s.v1.cni.cncf.io/networks` annotation and returns only the network names. Supports both comma‑separated lists and JSON array of objects. | +| [func getCatalogSourceBundleCountFromPackageManifests(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int](#getcatalogsourcebundlecountfrompackagemanifests) | Determines the number of bundle images that belong to a specific `CatalogSource` by inspecting all package manifests in the test environment. | +| [func getCatalogSourceBundleCountFromProbeContainer(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int](#getcatalogsourcebundlecountfromprobecontainer) | Determines the number of bundles in a `CatalogSource` by querying its associated service through a probe pod using `grpcurl`. | +| [func getCatalogSourceImageIndexFromInstallPlan(installPlan *olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource) (string, error)](#getcatalogsourceimageindexfrominstallplan) | Extracts the `Spec.Image` value of the catalog source that an install plan references. This image is used as the index image for the operator bundle. | +| [func getContainers(pods []*Pod) []*Container](#getcontainers) | Gathers every `Container` object present in the supplied slice of `*Pod`. | +| [func getMachineConfig(mcName string, machineConfigs map[string]MachineConfig) (MachineConfig, error)](#getmachineconfig) | Fetches an OpenShift `MachineConfig` by name, caches it in the supplied map to avoid duplicate API calls, and parses its raw configuration into a structured `MachineConfig` value. | +| [func getOperatorTargetNamespaces(namespace string) ([]string, error)](#getoperatortargetnamespaces) | Queries the OpenShift Cluster‑Lifecycle‑Manager API to fetch the first `OperatorGroup` in a given namespace and returns its target namespaces. | +| [func(*olmv1Alpha.Subscription, []*olmpkgv1.PackageManifest)(*olmpkgv1.PackageManifest)](#getpackagemanifestwithsubscription) | Finds and returns the `PackageManifest` that corresponds to a specific `Subscription`. It matches on package name, catalog source namespace, and catalog source. | +| [func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Container)](#getpodcontainers) | Builds a slice of `*Container` objects representing each container in the supplied Pod, enriched with status and runtime information. It optionally skips containers that match an ignore list. | +| [func getSummaryAllOperators(operators []*Operator) (summary []string)](#getsummaryalloperators) | Builds a unique, human‑readable summary for each operator, including phase, package name, version, and namespace scope. Returns the summaries sorted alphabetically. | +| [func getUniqueCsvListByName(csvs []*olmv1Alpha.ClusterServiceVersion) []*olmv1Alpha.ClusterServiceVersion](#getuniquecsvlistbyname) | Filters a slice of CSV objects so that each distinct `csv.Name` appears only once, then returns the list sorted by name. | +| [func isNetworkAttachmentDefinitionConfigTypeSRIOV(nadConfig string) (bool, error)](#isnetworkattachmentdefinitionconfigtypesriov) | Checks whether the JSON configuration of a NetworkAttachmentDefinition contains an SR‑I/O‑V plugin. It supports both single‑plugin and multi‑plugin CNI specifications. | +| [func isNetworkAttachmentDefinitionSRIOVConfigMTUSet(nadConfig string) (bool, error)](#isnetworkattachmentdefinitionsriovconfigmtuset) | Parses a CNI configuration JSON and checks whether any SR‑I/O V plugin declares an MTU value greater than zero. | +| [func isSkipHelmChart(helmName string, skipHelmChartList []configuration.SkipHelmChartList) bool](#isskiphelmchart) | Checks whether a Helm chart identified by `helmName` appears in the provided list of charts to skip (`skipHelmChartList`). If found, logs the event and returns `true`; otherwise returns `false`. | +| [func searchPodInSlice(name, namespace string, pods []*Pod) *Pod](#searchpodinslice) | Returns the first `*Pod` from `pods` that matches the supplied `name` and `namespace`. If none match, returns `nil`. | +| [func sriovNetworkUsesMTU(sriovNetworks, sriovNetworkNodePolicies []unstructured.Unstructured, nadName string) bool](#sriovnetworkusesmtu) | Determines whether a SriovNetwork identified by `nadName` has an MTU value defined in any corresponding SriovNetworkNodePolicy. | +| [func updateCrUnderTest(scaleCrUnderTest []autodiscover.ScaleObject) []ScaleObject](#updatecrundertest) | Transforms a slice of `autodiscover.ScaleObject` into the package’s own `ScaleObject` representation, preserving scaling data and resource schema. | + +## Structs + +### CniNetworkInterface + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Default` | `bool` | Field documentation | +| `DNS` | `map[string]interface{}` | Field documentation | +| `DeviceInfo` | `deviceInfo` | Field documentation | +| `Name` | `string` | Field documentation | +| `Interface` | `string` | Field documentation | +| `IPs` | `[]string` | Field documentation | + +--- + +### Container + +Represents a Kubernetes container enriched with runtime information and preflight test results for use in certification checks. + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `*corev1.Container` | pointer to corev1.Container | Embedded standard Kubernetes container spec, providing fields such as Name, Image, SecurityContext, Probes, etc. | +| `Status` | corev1.ContainerStatus | Current status of the container (state, ready flag, restarts, image ID). | +| `Namespace` | string | Namespace in which the pod containing this container resides. | +| `Podname` | string | Name of the pod that owns this container. | +| `NodeName` | string | Node name where the pod is scheduled. | +| `Runtime` | string | Container runtime type extracted from the container status ID (e.g., `docker`, `containerd`). | +| `UID` | string | Unique identifier for the container within the node, parsed from the status ContainerID. | +| `ContainerImageIdentifier` | ContainerImageIdentifier | Parsed repository, name, tag and digest of the image used by this container. | +| `PreflightResults` | PreflightResultsDB | Cached results of preflight tests performed against the container’s image. | + +#### Purpose + +The `Container` struct aggregates both declarative (spec) and imperative (status) data about a container, along with computed metadata such as runtime type, UID, and parsed image identifiers. This enriched representation enables the test suite to perform compliance checks, run preflight scans, and report results without repeatedly querying Kubernetes APIs. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetUID` | Extracts and returns the container’s unique ID from its status. | +| `HasExecProbes` | Determines if any of the liveness, readiness, or startup probes are defined as exec commands. | +| `HasIgnoredContainerName` | Checks whether the container should be skipped based on a predefined ignore list (e.g., Istio proxies). | +| `IsContainerRunAsNonRoot` | Evaluates whether the container is configured to run as non‑root, considering pod‑level defaults. | +| `IsContainerRunAsNonRootUserID` | Checks if the container’s user ID setting guarantees a non‑zero UID, again respecting pod defaults. | +| `IsIstioProxy` | Identifies the special Istio proxy container by name. | +| `IsReadOnlyRootFilesystem` | Determines whether the container is configured with a read‑only root filesystem. | +| `IsTagEmpty` | Indicates if the parsed image tag is empty (i.e., no explicit tag was supplied). | +| `SetPreflightResults` | Executes preflight tests against the container’s image, caching results to avoid repeat work. | +| `String` | Provides a short string summary of the container (name, pod, namespace). | +| `StringLong` | Gives a detailed string with node, namespace, pod name, container name, UID and runtime. | + +--- + +--- + +### ContainerImageIdentifier + + +**Purpose**: Tag and Digest should not be populated at the same time. Digest takes precedence if both are populated + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Digest` | `string` | Field documentation | +| `Repository` | `string` | Field documentation | +| `Registry` | `string` | Field documentation | +| `Tag` | `string` | Field documentation | + +--- + +### CrScale + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `embedded:*scalingv1.Scale` | `*scalingv1.Scale` | Field documentation | + +--- + +### CsvInstallPlan + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `BundleImage` | `string` | Field documentation | +| `IndexImage` | `string` | Field documentation | +| `Name` | `string` | Field documentation | + +--- + +### Deployment + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `*appsv1.Deployment` | *appsv1.Deployment | Embedded Kubernetes Deployment resource providing all standard fields such as Spec, Status, ObjectMeta, etc. | + +#### Purpose + +The `Deployment` struct serves as a thin wrapper around the core Kubernetes `appsv1.Deployment`. It allows provider-specific methods to be attached while still exposing every field of the underlying Deployment through embedding. This design simplifies access to deployment data and enables adding convenience functions (e.g., readiness checks) without duplicating fields. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `IsDeploymentReady` | Determines whether the wrapped deployment is fully available, matching expected replica counts and status conditions. | +| `ToString` | Returns a concise string representation containing the deployment’s name and namespace. | +| `GetUpdatedDeployment` | Retrieves a Deployment by name/namespace from the cluster and returns it wrapped in this struct. | + +--- + +--- + +### Event + +Represents a Kubernetes event enriched with helper methods for stringification and construction. + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `*corev1.Event` | *corev1.Event | Embedded core Kubernetes event containing all standard fields such as `CreationTimestamp`, `InvolvedObject`, `Reason`, and `Message`. The embedding allows direct access to these fields on an `Event` instance. | + +#### Purpose + +The `Event` type is a thin wrapper around the native `k8s.io/api/core/v1.Event`. It exists to provide convenience methods (e.g., `String`) and factory functions (`NewEvent`) while retaining full compatibility with Kubernetes event structures. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NewEvent` | Constructs an `Event` from a pointer to a native `corev1.Event`, initializing the embedded field. | +| `(*Event).String()` | Returns a formatted string summarizing key fields (`timestamp`, `involved object`, `reason`, and `message`) of the underlying event. | + +--- + +--- + +### MachineConfig + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `embedded:*mcv1.MachineConfig` | `*mcv1.MachineConfig` | Field documentation | +| `Config` | `struct{Systemd struct{Units []struct{Contents string; Name string}}}` | Field documentation | + +--- + +### Node + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `Data` | `*corev1.Node` | Pointer to the underlying Kubernetes Node object, containing status, labels, annotations, etc. | +| `Mc` | `MachineConfig` | Machine‑Configuration object associated with the node (ignored during JSON marshaling). | + +#### Purpose + +`Node` encapsulates a Kubernetes node and optionally its machine‑configuration details for use in tests. It provides convenience methods to query operating system type, kernel properties, workload presence, and node role. The struct also implements custom JSON marshalling that serializes only the embedded `corev1.Node`. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetCSCOSVersion` | Returns the CoreOS version string if the node runs CentOS Stream CoreOS. | +| `GetRHCOSVersion` | Extracts the Red Hat Enterprise Linux CoreOS short version from the node’s OS image. | +| `GetRHELVersion` | Retrieves the RHEL release number from the node’s OS image. | +| `HasWorkloadDeployed` | Checks whether any supplied pods are scheduled on this node. | +| `IsCSCOS` | Detects if the node is running CentOS Stream CoreOS. | +| `IsControlPlaneNode` | Determines if the node has a control‑plane label. | +| `IsHyperThreadNode` | Verifies hyper‑threading support by executing a probe container command. | +| `IsRHCOS` | Detects if the node is running Red Hat Enterprise Linux CoreOS. | +| `IsRHEL` | Detects if the node is running RHEL. | +| `IsRTKernel` | Checks whether the kernel contains a real‑time (`rt`) tag. | +| `IsWorkerNode` | Determines if the node has a worker label. | +| `MarshalJSON` | Serializes only the underlying `corev1.Node` to JSON, omitting machine‑config data. | + +--- + +--- + +### Operator + +Represents an installed Kubernetes operator within a cluster, capturing its metadata, deployment status, and test results. + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `Name` | `string` | Operator name. | +| `Namespace` | `string` | Namespace where the operator’s resources reside. | +| `TargetNamespaces` | `[]string` | Namespaces targeted by the operator; empty if cluster‑wide. | +| `IsClusterWide` | `bool` | Indicates whether the operator is deployed to all namespaces (`true`) or only specific ones (`false`). | +| `Csv` | `*olmv1Alpha.ClusterServiceVersion` | Reference to the ClusterServiceVersion object that defines the operator’s bundle. | +| `Phase` | `olmv1Alpha.ClusterServiceVersionPhase` | Current lifecycle phase of the CSV (e.g., Succeeded, Failed). | +| `SubscriptionName` | `string` | Name of the subscription managing this operator. | +| `SubscriptionNamespace` | `string` | Namespace of the corresponding subscription. | +| `InstallPlans` | `[]CsvInstallPlan` | List of install plans that deployed the operator’s bundle. | +| `Package` | `string` | Package name from the subscription spec. | +| `Org` | `string` | Catalog source organization (catalog namespace). | +| `Version` | `string` | Operator version string extracted from the CSV spec. | +| `Channel` | `string` | Subscription channel used to install the operator. | +| `PackageFromCsvName` | `string` | Package name parsed from the CSV file name. | +| `PreflightResults` | `PreflightResultsDB` | Results of pre‑flight checks executed against the operator’s bundle. | +| `OperandPods` | `map[string]*Pod` | Mapping of operand pod names to their detailed information (populated elsewhere). | + +#### Purpose + +The `Operator` struct aggregates all relevant data needed for certification and validation workflows: +- Identifies which operator is under test. +- Tracks its installation status, source package, channel, and deployment scope. +- Holds references to the CSV and install plans for introspection or cleanup. +- Stores pre‑flight test outcomes to report pass/fail status. + +This struct is used throughout the provider package when collecting operator information from a cluster, performing pre‑flight checks, and generating summaries of certification results. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `SetPreflightResults(env *TestEnvironment)` | Runs pre‑flight container tests against the operator’s bundle image, captures logs, stores the results in `PreflightResults`. | +| `String()` | Returns a concise string representation of key fields (name, namespace, subscription, target namespaces). | +| `createOperators(...)` | Builds a slice of `Operator` instances from raw cluster resources (CSV, subscriptions, install plans, etc.). | +| `getAtLeastOneSubscription(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, ...)` | Populates subscription‑related fields (`SubscriptionName`, `Namespace`, `Package`, `Org`, `Channel`) for a given operator. | +| `getAtLeastOneInstallPlan(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, ...)` | Fills the `InstallPlans` slice with relevant install plan data linked to the operator. | +| `getSummaryAllOperators(operators []*Operator)` | Generates human‑readable summaries of all operators for reporting purposes. | + +--- + +--- + +### Pod + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| ` *corev1.Pod` | embedded | Inherits all fields and methods of the standard Kubernetes Pod object, providing access to spec, status, labels, annotations, etc. | +| `AllServiceAccountsMap` | `*map[string]*corev1.ServiceAccount` | Optional reference to a map that holds ServiceAccount objects keyed by namespace+name; used for validating automount settings. | +| `Containers` | `[]*Container` | Slice of wrapper structs representing each container in the pod, enriched with runtime and status information. | +| `MultusNetworkInterfaces` | `map[string]CniNetworkInterface` | Mapping of Multus network names to their corresponding interface details (IP addresses, MACs). | +| `MultusPCIs` | `[]string` | List of PCI device identifiers assigned to the pod via Multus. | +| `SkipNetTests` | `bool` | Flag indicating that network connectivity tests should be skipped for this pod. | +| `SkipMultusNetTests` | `bool` | Flag indicating that Multus‑specific connectivity tests should be skipped. | +| `IsOperator` | `bool` | Marks the pod as an operator component (used to distinguish from test workload pods). | +| `IsOperand` | `bool` | Marks the pod as an operand of an operator (also used for filtering). | + +#### Purpose + +The `Pod` struct extends the standard Kubernetes Pod definition with additional fields required by certsuite’s testing framework. It captures runtime metadata, container details, and test‑specific flags to enable fine‑grained selection and validation of pods during security and compliance checks. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NewPod(aPod *corev1.Pod) Pod` | Wraps a raw Kubernetes pod into the enriched `Pod` struct, populating Multus interfaces, PCI lists, and test flags. | +| `ConvertArrayPods(pods []*corev1.Pod) []*Pod` | Converts a slice of raw pods to the enriched form for batch processing. | +| `GetContainers(aPod *corev1.Pod, useIgnoreList bool) []*Container` | Builds container wrappers used by the `Containers` field. | +| `searchPodInSlice(name, namespace string, pods []*Pod) *Pod` | Helper to find an existing `Pod` in a collection by name and namespace. | + +The struct’s methods (e.g., `IsCPUIsolationCompliant`, `HasHugepages`, `GetRunAsNonRootFalseContainers`) operate on these fields to enforce compliance rules during tests. + +--- + +### PreflightResultsDB + +#### Fields + +| Field | Type | Description | +|--------|-----------------|-------------| +| Passed | `[]PreflightTest` | Tests that succeeded during the pre‑flight run. | +| Failed | `[]PreflightTest` | Tests that failed during the pre‑flight run. | +| Errors | `[]PreflightTest` | Tests that encountered runtime errors (e.g., container start failures). | + +#### Purpose + +`PreflightResultsDB` aggregates the outcome of a pre‑flight validation for a container image. Each slice holds `PreflightTest` entries describing individual checks, including their name, description, suggested remediation, and any error message when applicable. The struct is stored on a `Container` instance (`c.PreflightResults`) and cached per image to avoid redundant executions. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetPreflightResultsDB` | Converts raw `plibRuntime.Results` from the pre‑flight library into a `PreflightResultsDB`, separating passed, failed, and errored checks. | +| `Container.SetPreflightResults` | Executes the pre‑flight test for a container image, stores the resulting `PreflightResultsDB` in both the container struct and an image cache. | + +--- + +--- + +### PreflightTest + +Represents a single pre‑flight check performed by the certsuite provider. +It holds the test’s identity, description of what it verifies, a suggested remediation if the test fails, and any error encountered during execution. + +#### Fields + +| Field | Type | Description | +|-------------|--------|-------------| +| `Name` | string | Human‑readable identifier for the pre‑flight check. | +| `Description` | string | Brief explanation of what the test validates. | +| `Remediation` | string | Suggested action to resolve a failure. | +| `Error` | error | Holds any error produced while running the test; `nil` indicates success. | + +#### Purpose + +The `PreflightTest` struct is used by the provider package to encapsulate the results of individual pre‑flight validations. Each instance records whether the check passed or failed (via the `Error` field) and provides context for users through the descriptive fields. + +#### Related functions + +No public functions directly reference or manipulate this struct. + +--- + +### ScaleObject + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `Scale` | `CrScale` | Represents the desired scale specification for a custom resource instance. The exact semantics depend on the implementation of `CrScale`. | +| `GroupResourceSchema` | `schema.GroupResource` | Identifies the group and resource (e.g., `"apps/v1", "deployments"`) that this scaling configuration applies to. | + +#### Purpose + +`ScaleObject` encapsulates all information required to perform a scaling operation on a custom resource. It bundles together the target resource’s schema (`GroupResourceSchema`) with the desired scale state (`Scale`). This struct is typically constructed from raw input (e.g., from an API request) and passed to functions that apply the scaling logic within the provider. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `updateCrUnderTest` | Converts a slice of `autodiscover.ScaleObject` into a slice of internal `ScaleObject`, copying over the scale specification and resource schema for each entry. | + +--- + +--- + +### StatefulSet + +A lightweight wrapper around Kubernetes’ native `*appsv1.StatefulSet`, providing convenience methods for status checks and string representation within the certsuite provider package. + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `*appsv1.StatefulSet` | *appsv1.StatefulSet | Embedded field that grants direct access to all fields of the Kubernetes StatefulSet API object (metadata, spec, status, etc.). | + +#### Purpose + +The `StatefulSet` struct is used by certsuite’s provider logic to interact with and inspect StatefulSets in a Kubernetes cluster. By embedding the official `appsv1.StatefulSet`, it inherits all standard properties while adding helper methods that simplify readiness checks (`IsStatefulSetReady`) and human‑readable output (`ToString`). It serves as the primary representation of a StatefulSet within this codebase. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetUpdatedStatefulset` | Retrieves a `StatefulSet` by name and namespace, returning an instance of this wrapper around the underlying Kubernetes object. | +| `IsStatefulSetReady` (method) | Determines whether all replicas are ready, current, and updated according to the StatefulSet’s status. | +| `ToString` (method) | Produces a concise string containing the StatefulSet’s name and namespace for logging or display purposes. | + +--- + +--- + +### TestEnvironment + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `Namespaces` | []string | Namespaces targeted by the test. | +| `AbnormalEvents` | []*Event | Events that deviated from expected behavior during the run. | +| `Pods` | []*Pod | Pods discovered for testing, filtered by inclusion rules. | +| `ProbePods` | map[string]*corev1.Pod | Mapping of node name → probe pod used for introspection. | +| `AllPods` | []*Pod | Complete list of pods in the cluster. | +| `CSVToPodListMap` | map[string][]*Pod | Mapping from CSV names to their associated pods. | +| `PodStates` | autodiscover.PodStates | Cached state information for discovered pods. | +| `Deployments` | []*Deployment | Deployments selected for testing. | +| `StatefulSets` | []*StatefulSet | StatefulSets selected for testing. | +| `Containers` | []*Container | Containers filtered by a block‑list of disallowed names. | +| `Operators` | []*Operator | Operators discovered in the test namespace(s). | +| `AllOperators` | []*Operator | All operators present in the cluster. | +| `AllOperatorsSummary` | []string | One‑line summaries for each operator. | +| `AllCsvs` | []*olmv1Alpha.ClusterServiceVersion | All CSV objects in the cluster. | +| `PersistentVolumes` | []corev1.PersistentVolume | Persistent volumes in the cluster. | +| `PersistentVolumeClaims` | []corev1.PersistentVolumeClaim | PVCs in the cluster. | +| `ClusterRoleBindings` | []rbacv1.ClusterRoleBinding | Cluster‑wide role bindings. | +| `RoleBindings` | []rbacv1.RoleBinding | Role bindings scoped to namespaces. | +| `Roles` | []rbacv1.Role | RBAC roles defined in the cluster. | +| `Config` | configuration.TestConfiguration | Configuration data for this test run. | +| `params` | configuration.TestParameters | Runtime parameters controlling discovery and filtering. | +| `Crds` | []*apiextv1.CustomResourceDefinition | CRDs relevant to the test. | +| `AllCrds` | []*apiextv1.CustomResourceDefinition | All CRDs present in the cluster. | +| `HorizontalScaler` | []*scalingv1.HorizontalPodAutoscaler | HPAs discovered for testing. | +| `Services` | []*corev1.Service | Services selected for testing. | +| `AllServices` | []*corev1.Service | All services in the cluster. | +| `ServiceAccounts` | []*corev1.ServiceAccount | Service accounts relevant to the test. | +| `AllServiceAccounts` | []*corev1.ServiceAccount | All service accounts in the cluster. | +| `AllServiceAccountsMap` | map[string]*corev1.ServiceAccount | Map of service account name → object for quick lookup. | +| `Nodes` | map[string]Node | Node objects indexed by node name. | +| `K8sVersion` | string | Kubernetes version string. | +| `OpenshiftVersion` | string | OpenShift version string, if applicable. | +| `OCPStatus` | string | Status of the OpenShift cluster. | +| `HelmChartReleases` | []*release.Release | Helm releases discovered in the test namespaces. | +| `ResourceQuotas` | []corev1.ResourceQuota | Resource quotas defined in the cluster. | +| `PodDisruptionBudgets` | []policyv1.PodDisruptionBudget | PDBs present in the cluster. | +| `NetworkPolicies` | []networkingv1.NetworkPolicy | Network policies relevant to the test. | +| `AllInstallPlans` | []*olmv1Alpha.InstallPlan | All install plans in the cluster. | +| `AllSubscriptions` | []olmv1Alpha.Subscription | All operator subscriptions in the cluster. | +| `AllCatalogSources` | []*olmv1Alpha.CatalogSource | All catalog sources in the cluster. | +| `AllPackageManifests` | []*olmpkgv1.PackageManifest | All package manifests in the cluster. | +| `OperatorGroups` | []*olmv1.OperatorGroup | Operator groups discovered in the cluster. | +| `SriovNetworks` | []unstructured.Unstructured | SR‑IOV network resources. | +| `AllSriovNetworks` | []unstructured.Unstructured | All SR‑IOV networks. | +| `SriovNetworkNodePolicies` | []unstructured.Unstructured | SR‑IOV node policy objects. | +| `AllSriovNetworkNodePolicies` | []unstructured.Unstructured | All SR‑IOV node policies. | +| `NetworkAttachmentDefinitions` | []nadClient.NetworkAttachmentDefinition | Network attachment definitions. | +| `ClusterOperators` | []configv1.ClusterOperator | Cluster operator status objects. | +| `IstioServiceMeshFound` | bool | Indicates if an Istio service mesh is present. | +| `ValidProtocolNames` | []string | Supported protocol names for tests. | +| `DaemonsetFailedToSpawn` | bool | Flag set when a daemonset fails to start. | +| `ScaleCrUnderTest` | []ScaleObject | Scale objects being tested. | +| `StorageClassList` | []storagev1.StorageClass | Storage classes in the cluster. | +| `ExecutedBy` | string | Identifier of the entity executing the test. | +| `PartnerName` | string | Name of the partner or vendor for which the test is run. | +| `CollectorAppPassword` | string | Password used by the collector application. | +| `CollectorAppEndpoint` | string | Endpoint URL for the collector app. | +| `ConnectAPIKey` | string | API key for external connectivity. | +| `ConnectProjectID` | string | Project ID for external connectivity. | +| `ConnectAPIBaseURL` | string | Base URL of the external API. | +| `ConnectAPIProxyURL` | string | Proxy URL for external API traffic. | +| `ConnectAPIProxyPort` | string | Proxy port for external API traffic. | +| `SkipPreflight` | bool | If true, preflight checks are skipped. | + +#### Purpose + +`TestEnvironment` aggregates all data required to perform an end‑to‑end test of a Kubernetes/OpenShift cluster. It stores discovery results (pods, nodes, operators, CRDs, etc.), configuration parameters, and runtime state such as collected events or flags indicating special conditions (e.g., presence of Istio). Test logic queries this struct via helper methods (e.g., `GetPodsUsingSRIOV`, `IsSNO`) to filter resources for specific checks. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetTestEnvironment` | Returns the singleton instance, initializing it if not yet loaded. | +| `SetNeedsRefresh` | Marks the environment as stale so that subsequent calls rebuild it. | +| `GetAffinityRequiredPods` | Filters pods requiring affinity. | +| `GetBaremetalNodes` | Returns nodes with a bare‑metal provider ID. | +| `GetCPUPinningPodsWithDpdk` | Retrieves DPDK‑enabled CPU‑pinned pods. | +| `GetDockerConfigFile` | Supplies the path to Docker config for preflight checks. | +| `GetGuaranteedPodContainersWithExclusiveCPUs` | Lists containers with exclusive CPUs in guaranteed pods. | +| `GetGuaranteedPodsWithExclusiveCPUs` | Returns pods that are guaranteed and have exclusive CPU requests. | +| `GetHugepagesPods` | Finds pods requesting hugepages. | +| `GetMasterCount` | Counts control‑plane nodes. | +| `IsIntrusive` | Indicates if the test environment is running in intrusive mode. | +| `IsPreflightInsecureAllowed` | Determines whether insecure preflight connections are permitted. | +| `IsSNO` | Returns true when only one node exists (Single‑Node OpenShift). | +| `GetPodsUsingSRIOV` | Lists pods that use SR‑IOV network interfaces. | +| `GetWorkerCount` | Counts worker nodes. | + +These methods allow test code to query the environment without directly accessing struct fields, promoting encapsulation and reducing coupling. + +--- + +## Exported Functions + +### AreCPUResourcesWholeUnits + +**AreCPUResourcesWholeUnits** - Determines whether every container in the given `*Pod` specifies CPU requests and limits that are multiples of 1000 milli‑CPUs (i.e., whole cores). If any value is missing or not a multiple of 1000, it logs a debug message and returns `false`. + +#### Signature (Go) + +```go +func AreCPUResourcesWholeUnits(p *Pod) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether every container in the given `*Pod` specifies CPU requests and limits that are multiples of 1000 milli‑CPUs (i.e., whole cores). If any value is missing or not a multiple of 1000, it logs a debug message and returns `false`. | +| **Parameters** | `p *Pod` – the pod whose containers’ CPU resources are examined. | +| **Return value** | `bool` – `true` if all containers meet the whole‑unit requirement; otherwise `false`. | +| **Key dependencies** | • `cut.Resources.Requests.Cpu().MilliValue()`
• `cut.Resources.Limits.Cpu().MilliValue()`
• `log.Debug` (from internal/log)
• `cut.String()` for logging context | +| **Side effects** | Emits debug logs when a container fails the check; no state mutation. | +| **How it fits the package** | Used by higher‑level pod validation logic to enforce CPU isolation rules required for certain workloads (e.g., guaranteed pods with exclusive CPUs). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over containers"} + B -->|"container"| C["Get cpuRequestsMillis"] + C --> D["Check if zero?"] + D -- yes --> E["Log debug, return false"] + D -- no --> F["Get cpuLimitsMillis"] + F --> G["Check if zero?"] + G -- yes --> E + G -- no --> H{"Is request integer?"} + H -- no --> I["Log debug, return false"] + H -- yes --> J{"Is limit integer?"} + J -- no --> I + J -- yes --> K["Continue loop"] + K --> B + B -->|"done"| L["Return true"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_AreCPUResourcesWholeUnits --> func_MilliValue + func_AreCPUResourcesWholeUnits --> func_Cpu + func_AreCPUResourcesWholeUnits --> Logger.Debug + func_AreCPUResourcesWholeUnits --> Pod.String +``` + +#### Functions calling `AreCPUResourcesWholeUnits` (Mermaid) + +```mermaid +graph TD + func_Pod.IsPodGuaranteedWithExclusiveCPUs --> AreCPUResourcesWholeUnits +``` + +#### Usage example (Go) + +```go +// Minimal example invoking AreCPUResourcesWholeUnits +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + pod := &provider.Pod{ + // ... populate pod with containers and resource specs ... + } + + if provider.AreCPUResourcesWholeUnits(pod) { + fmt.Println("All CPU requests/limits are whole units.") + } else { + fmt.Println("Pod contains non‑whole CPU resources.") + } +} +``` + +--- + +### AreResourcesIdentical + +**AreResourcesIdentical** - Ensures each container in the supplied `*Pod` has matching CPU and memory requests and limits. Returns `true` only if all containers meet this condition; otherwise, it logs a debug message and returns `false`. + +#### Signature (Go) + +```go +func AreResourcesIdentical(p *Pod) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures each container in the supplied `*Pod` has matching CPU and memory requests and limits. Returns `true` only if all containers meet this condition; otherwise, it logs a debug message and returns `false`. | +| **Parameters** | `p *Pod` – The pod whose resources are to be validated. | +| **Return value** | `bool` – `true` when every container’s requests equal its limits; `false` if any mismatch or missing limit is found. | +| **Key dependencies** | • `len` (built‑in)
• `log.Debug` from the internal logging package
• `Pod.String()` for human‑readable identifiers
• Resource quantity methods: `Cpu()`, `Memory()`, `Equal(...)`, `AsApproximateFloat64()` | +| **Side effects** | Emits debug logs describing any resource mismatches or missing limits. No state mutation or I/O beyond logging. | +| **How it fits the package** | Central helper for pod isolation checks; used by higher‑level predicates such as `Pod.IsPodGuaranteed` and `Pod.IsPodGuaranteedWithExclusiveCPUs`. It encapsulates the logic that guarantees a pod’s CPU and memory configuration is “identical” (requests = limits). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over containers"} + B --> C{"Check if limits exist?"} + C -- No --> D["Log missing limits, return false"] + C -- Yes --> E["Gather CPU/Mem requests & limits"] + E --> F{"Requests == Limits?"} + F -- No --> G["Log mismatch, return false"] + F -- Yes --> H["Next container"] + H --> I{"All containers processed?"} + I -- Yes --> J["Return true"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_AreResourcesIdentical --> len + func_AreResourcesIdentical --> log.Debug + func_AreResourcesIdentical --> Pod.String + func_AreResourcesIdentical --> Cpu + func_AreResourcesIdentical --> Memory + func_AreResourcesIdentical --> Equal + func_AreResourcesIdentical --> AsApproximateFloat64 +``` + +#### Functions calling `AreResourcesIdentical` (Mermaid) + +```mermaid +graph TD + func_Pod.IsPodGuaranteed --> func_AreResourcesIdentical + func_Pod.IsPodGuaranteedWithExclusiveCPUs --> func_AreResourcesIdentical +``` + +#### Usage example (Go) + +```go +// Minimal example invoking AreResourcesIdentical +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + pod := &provider.Pod{ + Name: "example", + Namespace: "default", + Containers: []provider.Container{ + { + Name: "app", + Resources: provider.ResourceRequirements{ + Limits: map[string]resource.Quantity{"cpu": resource.MustParse("100m"), "memory": resource.MustParse("128Mi")}, + Requests: map[string]resource.Quantity{"cpu": resource.MustParse("100m"), "memory": resource.MustParse("128Mi")}, + }, + }, + }, + } + if provider.AreResourcesIdentical(pod) { + fmt.Println("Pod resources are identical.") + } else { + fmt.Println("Pod resources differ.") + } +} +``` + +--- + +### Container.GetUID + +**GetUID** - Extracts the UID of a running container from its status and logs diagnostic information. + +#### Signature (Go) + +```go +func (c *Container) GetUID() (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Extracts the UID of a running container from its status and logs diagnostic information. | +| **Parameters** | `c` – receiver; the container instance whose UID is requested. | +| **Return value** | `string` – the UID if found, otherwise an empty string.
`error` – non‑nil when the UID cannot be determined. | +| **Key dependencies** | • `strings.Split` – split the container ID on “://”.
• `len` – determine slice length.
• `log.Debug` – log debug messages.
• `errors.New` – construct error when UID is missing. | +| **Side effects** | No state mutation; only logs to the package logger. | +| **How it fits the package** | Provides a lightweight helper for callers that need the container’s identifier, e.g., for cleanup or auditing tasks. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["c.Status.ContainerID"] --> B["Split on ://"] + B --> C{"len(split) > 0"} + C -->|"yes"| D["uid=split_last"] + C -->|"no"| E["uid remains"] + D --> F{"uid =="} + E --> F + F -->|"yes"| G["log.Debug could not find uid"] + F -->|"yes"| H["return , errors.New(cannot determine container UID)"] + F -->|"no"| I["log.Debug uid found"] + I --> J["return uid, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Container.GetUID --> func_strings.Split + func_Container.GetUID --> func_log.Logger.Debug + func_Container.GetUID --> func_errors.New +``` + +#### Functions calling `Container.GetUID` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Container.GetUID +c := &provider.Container{ + Status: provider.ContainerStatus{ContainerID: "docker://12345"}, + Namespace: "default", + Podname: "mypod", + Name: "mycontainer", +} +uid, err := c.GetUID() +if err != nil { + fmt.Printf("Error retrieving UID: %v\n", err) +} else { + fmt.Printf("Container UID is %s\n", uid) +} +``` + +--- + +### Container.HasExecProbes + +**HasExecProbes** - Returns `true` when the container defines at least one probe that uses an executable command (`Exec`) for liveness, readiness, or startup checks. + +#### Signature (Go) + +```go +func (c *Container) HasExecProbes() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` when the container defines at least one probe that uses an executable command (`Exec`) for liveness, readiness, or startup checks. | +| **Parameters** | None | +| **Return value** | `bool` – `true` if any exec‑probe is present; otherwise `false`. | +| **Key dependencies** | *None* – the method accesses only the container’s own fields. | +| **Side effects** | No state mutation or I/O; purely reads struct values. | +| **How it fits the package** | In the provider package, this helper assists higher‑level logic in deciding whether a container requires probe execution support during deployment validation. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check LivenessProbe Exec"] --> B{"Exists?"} + B -- Yes --> C["Return true"] + B -- No --> D["Check ReadinessProbe Exec"] + D -- Yes --> C + D -- No --> E["Check StartupProbe Exec"] + E -- Yes --> C + E -- No --> F["Return false"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `Container.HasExecProbes` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Container.HasExecProbes +c := &provider.Container{ + LivenessProbe: &probe.Probe{Exec: &probe.ExecAction{}}, +} +if c.HasExecProbes() { + fmt.Println("Container has exec probes") +} else { + fmt.Println("No exec probes defined") +} +``` + +--- + +### Container.HasIgnoredContainerName + +**HasIgnoredContainerName** - Determines if the current container is in an ignore list, including Istio proxy containers. + +#### Signature (Go) + +```go +func (c *Container) HasIgnoredContainerName() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if the current container is in an ignore list, including Istio proxy containers. | +| **Parameters** | `c` – the container instance to evaluate. | +| **Return value** | `true` if the container name matches any ignored pattern or is an Istio proxy; otherwise `false`. | +| **Key dependencies** | • `Container.IsIstioProxy()`
• `strings.Contains` (from the standard library) | +| **Side effects** | None – purely read‑only evaluation. | +| **How it fits the package** | Used by container collection logic to filter out non‑relevant containers before performing policy checks. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"For each ignored name"} + B --> C["Check Istio proxy"] + C --> D{"If true"} --> E["Return true"] + D --> F{"Else check contains"} + F --> G{"If matches"} --> H["Return true"] + G --> I{"No match"} --> J["Continue loop"] + J --> K{"End loop?"} --> L["Return false"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Container.HasIgnoredContainerName --> func_Container.IsIstioProxy + func_Container.HasIgnoredContainerName --> strings.Contains +``` + +#### Functions calling `Container.HasIgnoredContainerName` + +```mermaid +graph TD + getPodContainers --> Container.HasIgnoredContainerName +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Container.HasIgnoredContainerName +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + c := provider.Container{Name: "istio-proxy"} + if c.HasIgnoredContainerName() { + fmt.Println("This container is ignored") + } else { + fmt.Println("This container will be processed") + } +} +``` + +--- + +### Container.IsContainerRunAsNonRoot + +**IsContainerRunAsNonRoot** - Returns whether the container is configured to run as a non‑root user and explains how that value was derived. + +#### 1) Signature (Go) + +```go +func (c *Container) IsContainerRunAsNonRoot(podRunAsNonRoot *bool) (isContainerRunAsNonRoot bool, reason string) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns whether the container is configured to run as a non‑root user and explains how that value was derived. | +| **Parameters** | `podRunAsNonRoot *bool` – optional pod‑level override; may be nil. | +| **Return value** | `isContainerRunAsNonRoot bool` – true if the container (or inherited pod setting) specifies non‑root execution.
`reason string` – human‑readable explanation of how the decision was made. | +| **Key dependencies** | • `fmt.Sprintf` (from standard library)
• `stringhelper.PointerToString` (local helper for pointer display) | +| **Side effects** | None – purely deterministic, no state changes or I/O. | +| **How it fits the package** | Used by pod analysis functions to evaluate compliance with security best practices, specifically the RunAsNonRoot requirement. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Container.SecurityContext & RunAsNonRoot present?"} + B -- Yes --> C["Return container value + message"] + B -- No --> D{"podRunAsNonRoot nil?"} + D -- No --> E["Return pod value + message"] + D -- Yes --> F["Return false + default message"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_Container.IsContainerRunAsNonRoot --> fmt.Sprintf + func_Container.IsContainerRunAsNonRoot --> stringhelper.PointerToString +``` + +#### 5) Functions calling `Container.IsContainerRunAsNonRoot` (Mermaid) + +```mermaid +graph TD + func_Pod.GetRunAsNonRootFalseContainers --> func_Container.IsContainerRunAsNonRoot +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Container.IsContainerRunAsNonRoot +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Example container with no SecurityContext + c := provider.Container{} // placeholder; in real code this would be populated from a PodSpec + + // podRunAsNonRoot is nil, meaning no pod‑level override + isNonRoot, reason := c.IsContainerRunAsNonRoot(nil) + + fmt.Printf("Is non‑root: %t\nReason: %s\n", isNonRoot, reason) +} +``` + +--- + +### Container.IsContainerRunAsNonRootUserID + +**IsContainerRunAsNonRootUserID** - Checks whether the container’s `RunAsUser` security context indicates it runs as a non‑root user. It also explains how pod‑level defaults are applied when the container level is unspecified. + +#### Signature (Go) + +```go +func (*int64) (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the container’s `RunAsUser` security context indicates it runs as a non‑root user. It also explains how pod‑level defaults are applied when the container level is unspecified. | +| **Parameters** | `podRunAsNonRootUserID *int64` – The `RunAsUser` value defined at the pod level (may be nil). | +| **Return value** | `isContainerRunAsNonRootUserID bool` – true if a non‑zero user ID is set;
`reason string` – human‑readable explanation of the decision. | +| **Key dependencies** | • `fmt.Sprintf` for formatting messages
• `stringhelper.PointerToString` to display pod value when nil | +| **Side effects** | None: purely functional, no state mutation or I/O. | +| **How it fits the package** | Used by `Pod.GetRunAsNonRootFalseContainers` to evaluate compliance of each container against the “run as non‑root” rule. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check container.SecurityContext"] -->|"RunAsUser set"| B{"Return true if !=0"} + A -->|"nil"| C["Check podRunAsNonRootUserID"] + C -->|"non‑nil"| D{"Return true if !=0"} + C -->|"nil"| E["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_IsContainerRunAsNonRootUserID --> fmt.Sprintf + func_IsContainerRunAsNonRootUserID --> stringhelper.PointerToString +``` + +#### Functions calling `Container.IsContainerRunAsNonRootUserID` (Mermaid) + +```mermaid +graph TD + Pod.GetRunAsNonRootFalseContainers --> Container.IsContainerRunAsNonRootUserID +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Container.IsContainerRunAsNonRootUserID +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + c := provider.Container{} // assume SecurityContext populated elsewhere + podUID := int64(1000) // example pod-level RunAsUser + + ok, reason := c.IsContainerRunAsNonRootUserID(&podUID) + fmt.Printf("Is non‑root? %v\nReason: %s\n", ok, reason) +} +``` + +--- + +--- + +### Container.IsIstioProxy + +**IsIstioProxy** - Checks whether the container represents the Istio side‑car proxy by comparing its name to a predefined constant. + +#### Signature (Go) + +```go +func (c *Container) IsIstioProxy() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the container represents the Istio side‑car proxy by comparing its name to a predefined constant. | +| **Parameters** | `c` – receiver of type `*Container`. | +| **Return value** | `bool`: `true` if the container’s name matches the Istio proxy container name; otherwise `false`. | +| **Key dependencies** | Uses the package‑level constant `IstioProxyContainerName`. | +| **Side effects** | None – purely functional. | +| **How it fits the package** | Provides a helper used by other logic (e.g., filtering containers) to identify Istio side‑car components within a pod. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Container.IsIstioProxy"] --> B{"Compare c.Name"} + B -->|"=="| C["IstioProxyContainerName"] + B -->|"≠"| D["Return false"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `Container.IsIstioProxy` + +```mermaid +graph TD + func_Container.HasIgnoredContainerName --> func_Container.IsIstioProxy +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Container.IsIstioProxy +c := &provider.Container{Name: "istio-proxy"} +if c.IsIstioProxy() { + fmt.Println("This is the Istio side‑car proxy.") +} +``` + +--- + +### Container.IsReadOnlyRootFilesystem + +**IsReadOnlyRootFilesystem** - Checks whether the `SecurityContext.ReadOnlyRootFilesystem` flag of a container is set to `true`. + +#### Signature (Go) + +```go +func (c *Container) IsReadOnlyRootFilesystem(logger *log.Logger) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the `SecurityContext.ReadOnlyRootFilesystem` flag of a container is set to `true`. | +| **Parameters** | `logger *log.Logger` – logger used for debugging output. | +| **Return value** | `bool` – `true` if read‑only is enabled; otherwise `false`. | +| **Key dependencies** | Calls `logger.Info(...)` from the standard logging package. | +| **Side effects** | Emits an informational log message; no state changes or external I/O. | +| **How it fits the package** | Provides a helper to validate security configuration of containers within the `provider` package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Is SecurityContext nil?"} + B -- Yes --> C["Return false"] + B -- No --> D{"Is ReadOnlyRootFilesystem nil?"} + D -- Yes --> E["Return false"] + D -- No --> F["Return value of *ReadOnlyRootFilesystem"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Container.IsReadOnlyRootFilesystem --> func_logger.Info +``` + +#### Functions calling `Container.IsReadOnlyRootFilesystem` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Container.IsReadOnlyRootFilesystem +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + logger := log.Default() + c := &provider.Container{ + SecurityContext: &provider.SecurityContext{ + ReadOnlyRootFilesystem: func(b bool) *bool { return &b }(true), + }, + } + + isRO := c.IsReadOnlyRootFilesystem(logger) + if isRO { + logger.Println("Container root filesystem is read‑only.") + } else { + logger.Println("Container root filesystem is writable.") + } +} +``` + +--- + +### Container.IsTagEmpty + +**IsTagEmpty** - Checks whether the `Tag` field of the container’s image identifier is an empty string, indicating that no specific tag was supplied. + +#### Signature (Go) + +```go +func (c *Container) IsTagEmpty() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the `Tag` field of the container’s image identifier is an empty string, indicating that no specific tag was supplied. | +| **Parameters** | `c *Container` – receiver pointing to the container instance being inspected. | +| **Return value** | `bool` – `true` if the tag is empty; otherwise `false`. | +| **Key dependencies** | None | +| **Side effects** | No state mutation or I/O; purely a read‑only check. | +| **How it fits the package** | Provides a helper for callers that need to decide whether to apply default tagging logic or validate image specifications. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + C["Container"] -->|"reads"| Tag["c.ContainerImageIdentifier.Tag"] + Tag -->|"compares to empty string"| Result["bool"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `Container.IsTagEmpty` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Container.IsTagEmpty +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + c := &provider.Container{ + ContainerImageIdentifier: provider.ImageIdentifier{Tag: ""}, + } + if c.IsTagEmpty() { + println("No tag specified") + } else { + println("Tag present:", c.ContainerImageIdentifier.Tag) + } +} +``` + +--- + +### Container.SetPreflightResults + +**SetPreflightResults** - Runs the OpenShift‑preflight checks against a container image, caches the results per image, and stores them in the `Container` instance. + +#### Signature (Go) + +```go +func (c *Container) SetPreflightResults(preflightImageCache map[string]PreflightResultsDB, env *TestEnvironment) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs the OpenShift‑preflight checks against a container image, caches the results per image, and stores them in the `Container` instance. | +| **Parameters** | `preflightImageCache map[string]PreflightResultsDB` – cache keyed by image name; `
` `env *TestEnvironment` – environment configuration for Docker credentials and insecure‑connection flag. | +| **Return value** | `error` – non‑nil if the preflight run or result extraction fails. | +| **Key dependencies** | • `github.com/redhat-openshift-ecosystem/openshift-preflight/container`
• `github.com/redhat-openshift-ecosystem/openshift-preflight/artifacts`
• `github.com/go-logr/stdr`, `logr`
• `bytes`, `context`, `fmt`
• internal logging (`internal/log`) | +| **Side effects** | *Mutates the `Container.PreflightResults` field.
* Populates/updates the supplied cache map.
* Emits informational logs via the package logger. | +| **How it fits the package** | The `provider.Container` type represents a single image under test; this method performs the core validation logic that other parts of the suite consume when generating reports or deciding on remediation steps. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check cache for image"] -->|"Hit"| B["Use cached results"] + A -->|"Miss"| C["Prepare preflight options"] + C --> D["Create artifacts writer & context"] + D --> E["Set up logger in context"] + E --> F["Instantiate preflight check"] + F --> G["Run check"] + G --> H{"Runtime error?"} + H -->|"Yes"| I["Collect checks list, build errors"] + H -->|"No"| J["Convert results to DB format"] + J --> K["Store in Container and cache"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Container.SetPreflightResults --> plibContainer.WithDockerConfigJSONFromFile + func_Container.SetPreflightResults --> plibContainer.WithInsecureConnection + func_Container.SetPreflightResults --> artifacts.NewMapWriter + func_Container.SetPreflightResults --> artifacts.ContextWithWriter + func_Container.SetPreflightResults --> defaultLog.Default + func_Container.SetPreflightResults --> stdr.New + func_Container.SetPreflightResults --> logr.NewContext + func_Container.SetPreflightResults --> plibContainer.NewCheck + func_Container.SetPreflightResults --> plibContainer.Check.Run + func_Container.SetPreflightResults --> plibContainer.Check.List + func_Container.SetPreflightResults --> GetPreflightResultsDB +``` + +#### Functions calling `Container.SetPreflightResults` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Container.SetPreflightResults +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Setup a container to test + cont := &provider.Container{ + Image: "quay.io/example/image:v1", + } + + // Environment providing Docker credentials and insecure flag + env := provider.NewTestEnvironment() + env.SetDockerConfigFile("/path/to/config.json") + env.AllowPreflightInsecure(true) + + // Cache to reuse results for identical images + cache := make(map[string]provider.PreflightResultsDB) + + if err := cont.SetPreflightResults(cache, env); err != nil { + panic(err) + } + + // Results are now available in cont.PreflightResults and cached +} +``` + +--- + +--- + +### Container.String + +**String** - Generates a concise description of a `Container` instance, including its name, pod name, and namespace. + +Returns a formatted string describing the container’s identity. + +--- + +#### Signature (Go) + +```go +func (c *Container) String() string +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Generates a concise description of a `Container` instance, including its name, pod name, and namespace. | +| **Parameters** | *(receiver)* `c *Container` – the container to describe. | +| **Return value** | `string` – formatted as `"container: pod: ns: "`. | +| **Key dependencies** | • `fmt.Sprintf` from the standard library. | +| **Side effects** | None; purely functional, no state mutation or I/O. | +| **How it fits the package** | Provides a human‑readable representation used for logging, debugging, and displaying container information within the `provider` package. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Call Container.String"] --> B["fmt.Sprintf(\container: %s pod: %s ns: %s\, c.Name, c.Podname, c.Namespace)"] + B --> C["Return formatted string"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Container_String --> fmt_Sprintf +``` + +--- + +#### Functions calling `Container.String` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking Container.String +c := &provider.Container{ + Name: "nginx", + Podname: "pod-123", + Namespace: "default", +} +fmt.Println(c.String()) +// Output: container: nginx pod: pod-123 ns: default +``` + +--- + +### Container.StringLong + +**StringLong** - Generates a descriptive string containing key metadata about a Kubernetes container, including node, namespace, pod name, container name, UID, and runtime. + +#### Signature (Go) + +```go +func (c *Container) StringLong() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Generates a descriptive string containing key metadata about a Kubernetes container, including node, namespace, pod name, container name, UID, and runtime. | +| **Parameters** | `c *Container` – receiver holding the container state. | +| **Return value** | A formatted string of the form:
``node: \ ns: \ podName: \ containerName: \ containerUID: \ containerRuntime: \`` | +| **Key dependencies** | • `fmt.Sprintf` from the standard library. | +| **Side effects** | None; purely functional and read‑only. | +| **How it fits the package** | Provides a human‑readable representation used in logs, debugging, or UI output within the `provider` package that models Kubernetes resources. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Container"] --> B["fmt.Sprintf"] + B --> C["Return string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Container.StringLong --> fmt.Sprintf +``` + +#### Functions calling `Container.StringLong` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Container.StringLong +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + c := provider.Container{ + NodeName: "node-01", + Namespace: "default", + Podname: "nginx-pod", + Name: "nginx-container", + Status: provider.Status{ContainerID: "abc123"}, + Runtime: "containerd", + } + detail := c.StringLong() + fmt.Println(detail) +} +``` + +--- + +### ConvertArrayPods + +**ConvertArrayPods** - Transforms each `*corev1.Pod` from the Kubernetes API into a corresponding `*Pod` value defined by the provider package, preserving all relevant metadata and network information. + +#### Signature (Go) + +```go +func ConvertArrayPods(pods []*corev1.Pod) (out []*Pod) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Transforms each `*corev1.Pod` from the Kubernetes API into a corresponding `*Pod` value defined by the provider package, preserving all relevant metadata and network information. | +| **Parameters** | `pods []*corev1.Pod – slice of pointers to corev1.Pod objects that need conversion.` | +| **Return value** | `out []*Pod – slice of pointers to provider‑defined Pod structs, one per input pod.` | +| **Key dependencies** | • Calls `NewPod(aPod *corev1.Pod) (out Pod)` to create the wrapped Pod.
• Uses Go’s built‑in `append` function. | +| **Side effects** | None – the function is pure; it only allocates new objects and returns them. | +| **How it fits the package** | This helper centralizes pod conversion logic, allowing other parts of the provider to work with a uniform Pod representation without repeatedly handling Kubernetes types directly. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + pods --> loop["For each pod"] + loop --> NewPod + NewPod --> outAppend["Append wrapped pod"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ConvertArrayPods --> func_NewPod + func_ConvertArrayPods --> func_append +``` + +#### Functions calling `ConvertArrayPods` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ConvertArrayPods +import ( + corev1 "k8s.io/api/core/v1" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume we already have a slice of *corev1.Pod from the Kubernetes client. + var kubePods []*corev1.Pod + // ... populate kubePods ... + + providerPods := provider.ConvertArrayPods(kubePods) + // providerPods now contains []*provider.Pod ready for further processing. +} +``` + +--- + +### CrScale.IsScaleObjectReady + +**IsScaleObjectReady** - Determines if the scale object's `Status.Replicas` equals the desired `Spec.Replicas`, indicating readiness. + +Checks whether a scale object’s desired replica count matches its current status. + +#### Signature (Go) + +```go +func (crScale CrScale) IsScaleObjectReady() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if the scale object's `Status.Replicas` equals the desired `Spec.Replicas`, indicating readiness. | +| **Parameters** | *None* – operates on the receiver `crScale`. | +| **Return value** | `bool`: `true` when the current replica count matches the desired count; otherwise `false`. | +| **Key dependencies** | • Calls `log.Info` from `github.com/redhat-best-practices-for-k8s/certsuite/internal/log`. | +| **Side effects** | Logs a message with the desired and current replica counts; no state mutation. | +| **How it fits the package** | Part of the `provider` package’s scale object logic, enabling callers to verify that scaling operations have completed successfully. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Get desired replicas"} + B --> C["Log current vs desired"] + C --> D{"Compare counts"} + D -- true --> E["Return true"] + D -- false --> F["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CrScale.IsScaleObjectReady --> func_Log.Info +``` + +#### Functions calling `CrScale.IsScaleObjectReady` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking CrScale.IsScaleObjectReady +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + scale := provider.CrScale{ + Spec: provider.ScaleSpec{Replicas: 3}, + Status: provider.ScaleStatus{Replicas: 3}, + } + if scale.IsScaleObjectReady() { + fmt.Println("Scale object is ready.") + } else { + fmt.Println("Scale object is not ready yet.") + } +} +``` + +--- + +### CrScale.ToString + +**ToString** - Returns a formatted string that identifies the custom resource scale, showing both its name and namespace. + +> Provides a human‑readable representation of a `CrScale` instance by concatenating its name and namespace fields. + +#### Signature (Go) + +```go +func (crScale CrScale) ToString() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a formatted string that identifies the custom resource scale, showing both its name and namespace. | +| **Parameters** | *None* – uses the receiver’s fields (`Name`, `Namespace`). | +| **Return value** | `string` – e.g., `"cr: myscale ns: default"`. | +| **Key dependencies** | • `fmt.Sprintf` from package `fmt`. | +| **Side effects** | None. The function only reads receiver data and returns a string. | +| **How it fits the package** | Offers a convenient debugging/printing helper for the `CrScale` type within the provider package, aiding logging and test output. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + CrScale_ToString --> fmt.Sprintf +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CrScale.ToString --> func_fmt.Sprintf +``` + +#### Functions calling `CrScale.ToString` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking CrScale.ToString +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume CrScale has exported fields Name and Namespace. + scale := provider.CrScale{Name: "myscale", Namespace: "default"} + fmt.Println(scale.ToString()) // Output: cr: myscale ns: default +} +``` + +--- + +### CsvToString + +**CsvToString** - Creates a human‑readable representation of a CSV, showing its name and namespace. + +Formats a ClusterServiceVersion into a concise descriptive string. + +#### Signature (Go) + +```go +func CsvToString(csv *olmv1Alpha.ClusterServiceVersion) string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a human‑readable representation of a CSV, showing its name and namespace. | +| **Parameters** | `csv` – pointer to an `olmv1Alpha.ClusterServiceVersion`; the object to describe. | +| **Return value** | A formatted string: `"operator csv: ns: "`. | +| **Key dependencies** | - `fmt.Sprintf` from the standard library. | +| **Side effects** | None; purely functional, no mutation or I/O. | +| **How it fits the package** | Utility helper used by other provider functions to log CSV status and debugging information. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive csv"] --> B["Extract Name"] + B --> C["Extract Namespace"] + C --> D["Call fmt.Sprintf with name & namespace"] + D --> E["Return formatted string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CsvToString --> func_Sprintf["fmt.Sprintf"] +``` + +#### Functions calling `CsvToString` (Mermaid) + +```mermaid +graph TD + func_WaitOperatorReady --> func_CsvToString +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CsvToString +package main + +import ( + "fmt" + + olmv1Alpha "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func main() { + csv := &olmv1Alpha.ClusterServiceVersion{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-operator.v0.1.0", + Namespace: "operators", + }, + } + fmt.Println(CsvToString(csv)) +} +``` + +--- + +### Deployment.IsDeploymentReady + +**IsDeploymentReady** - Evaluates whether the deployment satisfies all readiness conditions: an `Available` condition is present, replica counts match specifications, and no replicas are marked unavailable. + +#### Signature (Go) + +```go +func (d *Deployment) IsDeploymentReady() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Evaluates whether the deployment satisfies all readiness conditions: an `Available` condition is present, replica counts match specifications, and no replicas are marked unavailable. | +| **Parameters** | *d* (`*Deployment`) – the deployment instance to inspect. | +| **Return value** | `bool` – `true` if the deployment is ready; otherwise `false`. | +| **Key dependencies** | • `appsv1.DeploymentAvailable` (condition type)
• Fields of `d.Status`: `Conditions`, `UnavailableReplicas`, `ReadyReplicas`, `AvailableReplicas`, `UpdatedReplicas`
• Field of `d.Spec`: `Replicas` | +| **Side effects** | None – purely reads the deployment state. | +| **How it fits the package** | Used by the provider to decide whether a deployment can be considered healthy and ready for further actions or tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Check Available Condition"} + B -- Yes --> C["Set notReady=false"] + B -- No --> D["Keep notReady=true"] + D --> E["Determine expected replicas"] + C --> E + E --> F{"Any readiness issue?"} + F -- Yes --> G["Return false"] + F -- No --> H["Return true"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `Deployment.IsDeploymentReady` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Deployment.IsDeploymentReady + +import "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + +func main() { + // Assume deployment is obtained from a Kubernetes client + var d provider.Deployment + // ... populate d ... + + ready := d.IsDeploymentReady() + if ready { + fmt.Println("Deployment is fully ready.") + } else { + fmt.Println("Deployment is not yet ready.") + } +} +``` + +--- + +### Deployment.ToString + +**ToString** - Formats the deployment’s `Name` and `Namespace` into a single descriptive string. + +Provides a human‑readable string representation of a `Deployment`, combining its name and namespace. + +#### Signature (Go) + +```go +func (d *Deployment) ToString() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Formats the deployment’s `Name` and `Namespace` into a single descriptive string. | +| **Parameters** | `d *Deployment` – receiver containing `Name` and `Namespace`. | +| **Return value** | `string` – e.g., `"deployment: my-app ns: prod"`. | +| **Key dependencies** | Calls `fmt.Sprintf` from the standard library. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Offers a convenient, readable representation of deployments for logging or debugging within the `provider` package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Deployment object"] --> B["fmt.Sprintf(\deployment: %s ns: %s\, d.Name, d.Namespace)"] + B --> C["String result"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Deployment_ToString --> fmt_Sprintf +``` + +#### Functions calling `Deployment.ToString` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Deployment.ToString +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + d := &provider.Deployment{ + Name: "my-app", + Namespace: "prod", + } + fmt.Println(d.ToString()) // prints: deployment: my-app ns: prod +} +``` + +--- + +### Event.String + +**String** - Returns a formatted string summarizing the event’s timestamp, involved object, reason, and message. + +Provides a human‑readable representation of an event by formatting its key fields into a single string. + +#### Signature (Go) + +```go +func (e *Event) String() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a formatted string summarizing the event’s timestamp, involved object, reason, and message. | +| **Parameters** | `e *Event` – receiver containing event data. | +| **Return value** | `string` – human‑readable description of the event. | +| **Key dependencies** | - `fmt.Sprintf` from the standard library. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Enables debugging and logging by providing a concise textual snapshot of an event instance within the `provider` package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Event.String"] --> B["fmt.Sprintf(\timestamp=%s involved object=%s reason=%s message=%s\, e.CreationTimestamp.Time, e.InvolvedObject, e.Reason, e.Message)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Event_String --> func_fmt_Sprintf +``` + +#### Functions calling `Event.String` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Event.String +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + ev := provider.Event{ + CreationTimestamp: /* populate with a timestamp */, + InvolvedObject: "pod/example", + Reason: "Scheduled", + Message: "Successfully scheduled pod.", + } + fmt.Println(ev.String()) +} +``` + +--- + +### GetAllOperatorGroups + +**GetAllOperatorGroups** - Queries the Kubernetes API for all `OperatorGroup` objects in the default namespace and returns a slice of pointers to them. Handles “not found” cases gracefully by returning `nil` without an error. + +#### Signature (Go) + +```go +func GetAllOperatorGroups() ([]*olmv1.OperatorGroup, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Queries the Kubernetes API for all `OperatorGroup` objects in the default namespace and returns a slice of pointers to them. Handles “not found” cases gracefully by returning `nil` without an error. | +| **Parameters** | None | +| **Return value** | `([]*olmv1.OperatorGroup, error)` – A slice containing pointers to each discovered OperatorGroup; `error` is non‑nil only if the API call fails for reasons other than “resource not found”. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – obtains a cached Kubernetes client
• `client.OlmClient.OperatorsV1().OperatorGroups("").List(...)` – performs the actual list operation
• `k8serrors.IsNotFound(err)` – distinguishes “not found” errors from others
• `log.Warn` – logs warnings when no OperatorGroups are present | +| **Side effects** | No modification of cluster state; only reads data and logs warnings. | +| **How it fits the package** | Used during test‑environment construction (`buildTestEnvironment`) to populate the list of OperatorGroups that subsequent logic may need for validation or dependency analysis. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["GetAllOperatorGroups"] --> B["clientsholder.GetClientsHolder"] + B --> C["client.OlmClient.OperatorsV1().OperatorGroups().List"] + C --> D{"err"} + D -->|"error && !IsNotFound"| E["Return error"] + D -->|"IsNotFound"| F["log.Warn, Return nil,nil"] + D -->|"no error"| G["List.Items"] + G --> H{"len(list.Items)==0"} + H -->|"true"| I["log.Warn, Return nil,nil"] + H -->|"false"| J["Iterate list.Items → operatorGroups slice"] + J --> K["Return operatorGroups,nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetAllOperatorGroups --> func_GetClientsHolder + func_GetAllOperatorGroups --> func_List + func_GetAllOperatorGroups --> func_IsNotFound + func_GetAllOperatorGroups --> func_Warn +``` + +#### Functions calling `GetAllOperatorGroups` (Mermaid) + +```mermaid +graph TD + func_buildTestEnvironment --> func_GetAllOperatorGroups +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetAllOperatorGroups +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + operatorGroups, err := provider.GetAllOperatorGroups() + if err != nil { + fmt.Printf("Failed to fetch OperatorGroups: %v\n", err) + return + } + if operatorGroups == nil { + fmt.Println("No OperatorGroups found.") + return + } + + for _, og := range operatorGroups { + fmt.Printf("Found OperatorGroup: %s/%s\n", og.Namespace, og.Name) + } +} +``` + +--- + +--- + +### GetCatalogSourceBundleCount + +**GetCatalogSourceBundleCount** - Counts the number of bundle images that belong to a given `CatalogSource`. It selects the counting strategy based on the OpenShift version: for ≤ 4.12 it queries a probe container; otherwise it tallies entries from package manifests. + +#### 1) Signature (Go) + +```go +func GetCatalogSourceBundleCount(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Counts the number of bundle images that belong to a given `CatalogSource`. It selects the counting strategy based on the OpenShift version: for ≤ 4.12 it queries a probe container; otherwise it tallies entries from package manifests. | +| **Parameters** | `env *TestEnvironment` – test context containing cluster state and helpers.
`cs *olmv1Alpha.CatalogSource` – catalog source whose bundles are counted. | +| **Return value** | `int` – the total bundle count; returns `-1` if counting fails. | +| **Key dependencies** | • `log.Info`, `log.Error` (internal logging)
• `semver.NewVersion` for parsing OpenShift version
• `getCatalogSourceBundleCountFromProbeContainer`
• `getCatalogSourceBundleCountFromPackageManifests` | +| **Side effects** | Emits log messages; no mutation of input objects. | +| **How it fits the package** | Provides bundle counting logic used by compliance checks (e.g., `testOperatorCatalogSourceBundleCount`) to enforce limits on catalog sizes. | + +#### 3) Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"OCP version ≤ 4.12?"} + B -- Yes --> C["getCatalogSourceBundleCountFromProbeContainer"] + B -- No --> D["getCatalogSourceBundleCountFromPackageManifests"] + C --> E["Return bundle count"] + D --> E +``` + +#### 4) Function dependencies + +```mermaid +graph TD + func_GetCatalogSourceBundleCount --> func_getCatalogSourceBundleCountFromProbeContainer + func_GetCatalogSourceBundleCount --> func_getCatalogSourceBundleCountFromPackageManifests + func_GetCatalogSourceBundleCount --> Logger.Info + func_GetCatalogSourceBundleCount --> Logger.Error + func_GetCatalogSourceBundleCount --> semver.NewVersion +``` + +#### 5) Functions calling `GetCatalogSourceBundleCount` + +```mermaid +graph TD + provider_testOperatorCatalogSourceBundleCount --> func_GetCatalogSourceBundleCount +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking GetCatalogSourceBundleCount +env := &provider.TestEnvironment{ + OpenshiftVersion: "4.15", +} +cs := &olmv1Alpha.CatalogSource{ /* fields populated elsewhere */ } + +bundleCount := provider.GetCatalogSourceBundleCount(env, cs) +fmt.Printf("Catalog %s has %d bundles\n", cs.Name, bundleCount) +``` + +--- + +### GetPciPerPod + +**GetPciPerPod** - Parses the JSON network‑status annotation of a pod and extracts all PCI addresses associated with its network interfaces. + +```go +func GetPciPerPod(annotation string) (pciAddr []string, err error) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses the JSON network‑status annotation of a pod and extracts all PCI addresses associated with its network interfaces. | +| **Parameters** | `annotation` – string containing the CNI status JSON (may be empty). | +| **Return value** | `pciAddr []string` – slice of PCI address strings; `err error` – non‑nil if unmarshalling fails. | +| **Key dependencies** | *`strings.TrimSpace` – check for empty annotation
* `encoding/json.Unmarshal` – decode JSON into `[]CniNetworkInterface`
*`fmt.Errorf` – wrap errors
* Built‑in `append` – accumulate addresses | +| **Side effects** | None (pure function). | +| **How it fits the package** | Provides a helper for `NewPod` to populate each pod’s PCI list from its annotations, enabling PCI‑aware connectivity tests. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Check if annotation is empty"] -->|"Yes"| B["Return empty slice"] + A -->|"No"| C["Unmarshal JSON into cniInfo"] + C --> D{"Iterate interfaces"} + D -->|"PCI address present"| E["Append to pciAddr"] + D -->|"None"| F["Continue loop"] + E --> G["End loop"] + G --> H["Return pciAddr, nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetPciPerPod --> func_TrimSpace + func_GetPciPerPod --> func_Unmarshal + func_GetPciPerPod --> func_Errorf + func_GetPciPerPod --> append +``` + +#### Functions calling `GetPciPerPod` + +```mermaid +graph TD + func_NewPod --> func_GetPciPerPod +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetPciPerPod +annotation := `[ + {"deviceInfo":{"PCI":{"pciAddress":"0000:81:00.0"}}}, + {"deviceInfo":{"PCI":{}}} +]` +pci, err := GetPciPerPod(annotation) +if err != nil { + log.Fatalf("error: %v", err) +} +fmt.Printf("PCI addresses: %v\n", pci) // Output: PCI addresses: [0000:81:00.0] +``` + +--- + +### GetPodIPsPerNet + +**GetPodIPsPerNet** - Parses the `k8s.v1.cni.cncf.io/networks-status` annotation to extract all non‑default network interfaces and their IP addresses for a pod. + +#### 1) Signature (Go) + +```go +func GetPodIPsPerNet(annotation string) (ips map[string]CniNetworkInterface, err error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses the `k8s.v1.cni.cncf.io/networks-status` annotation to extract all non‑default network interfaces and their IP addresses for a pod. | +| **Parameters** | `annotation string –` raw JSON content of the CNI networks‑status annotation. | +| **Return value** | `ips map[string]CniNetworkInterface` indexed by network name; `err error` if unmarshalling fails. | +| **Key dependencies** | • `make` (map creation)
• `strings.TrimSpace`
• `encoding/json.Unmarshal`
• `fmt.Errorf` | +| **Side effects** | None – purely functional, no state mutation or I/O beyond error reporting. | +| **How it fits the package** | Provides lower‑level data extraction used by higher‑level pod constructors (`NewPod`) to populate network interface information for connectivity testing. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Trim annotation"} + B -- empty --> C["Return empty map"] + B -- non-empty --> D["Unmarshal JSON into CniNetworkInterface list"] + D -- error --> E["Return nil, fmt.Errorf"] + D -- success --> F{"Iterate interfaces"} + F --> G{"Default?"} + G -- yes --> H["Skip"] + G -- no --> I["Add to map by name"] + I --> J["Continue loop"] + J --> K["End loop"] + K --> L["Return map, nil"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetPodIPsPerNet --> func_make + func_GetPodIPsPerNet --> strings.TrimSpace + func_GetPodIPsPerNet --> json.Unmarshal + func_GetPodIPsPerNet --> fmt.Errorf +``` + +#### 5) Functions calling `GetPodIPsPerNet` (Mermaid) + +```mermaid +graph TD + func_NewPod --> func_GetPodIPsPerNet +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking GetPodIPsPerNet +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + annotation := `[{"name":"net1","ips":[{"address":"10.0.0.5/24"}],"default":false},{"name":"eth0","ips":[{"address":"192.168.1.10/24"}],"default":true}]` + ips, err := provider.GetPodIPsPerNet(annotation) + if err != nil { + fmt.Printf("error: %v\n", err) + return + } + for netName, iface := range ips { + fmt.Printf("Network %s has IPs: %+v\n", netName, iface.IPS) + } +} +``` + +--- + +### GetPreflightResultsDB + +**GetPreflightResultsDB** - Builds a `PreflightResultsDB` from the raw preflight runtime results, separating passed, failed and error tests into distinct slices. + +#### Signature (Go) + +```go +func GetPreflightResultsDB(results *plibRuntime.Results) PreflightResultsDB +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `PreflightResultsDB` from the raw preflight runtime results, separating passed, failed and error tests into distinct slices. | +| **Parameters** | `results *plibRuntime.Results` – pointer to the object containing all test outcomes. | +| **Return value** | `PreflightResultsDB` – a container holding three lists of `PreflightTest`, each annotated with name, description, remediation, and optional error details. | +| **Key dependencies** | • `plibRuntime.Result.Name()`
• `plibRuntime.Result.Metadata().Description`
• `plibRuntime.Result.Help().Suggestion`
• `plibRuntime.Result.Error()` (for failed tests) | +| **Side effects** | None – purely functional transformation. | +| **How it fits the package** | Used by both container and operator preflight result handlers to persist test outcomes in a serialisable format for later reporting or caching. | + +#### Internal workflow + +```mermaid +flowchart TD + Start --> ParsePassed["Iterate over Passed"] + ParsePassed --> BuildTest1["Test object from Passed"] + BuildTest1 --> AppendPassed["Append to resultsDB.Passed"] + ParseFailed["Iterate over Failed"] --> BuildTest2["Test object from Failed"] + BuildTest2 --> AppendFailed["Append to resultsDB.Failed"] + ParseErrors["Iterate over Errors"] --> BuildTest3["Test object with Error"] + BuildTest3 --> AppendError["Append to resultsDB.Errors"] + End +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetPreflightResultsDB --> func_Name + func_GetPreflightResultsDB --> func_Metadata + func_GetPreflightResultsDB --> func_Help + func_GetPreflightResultsDB --> func_Error +``` + +#### Functions calling `GetPreflightResultsDB` + +```mermaid +graph TD + func_Container.SetPreflightResults --> func_GetPreflightResultsDB + func_Operator.SetPreflightResults --> func_GetPreflightResultsDB +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetPreflightResultsDB +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + plibRuntime "github.com/opencontainers/runc/libcontainer/runtime" // placeholder import path +) + +func main() { + // Assume results is obtained from a preflight check run + var runtimeResults *plibRuntime.Results + + // Convert to structured DB + db := provider.GetPreflightResultsDB(runtimeResults) + + // db.Passed, db.Failed, db.Errors now contain the test summaries +} +``` + +--- + +### GetRuntimeUID + +**GetRuntimeUID** - Parses the `ContainerID` field of a `ContainerStatus` to separate the runtime prefix and the unique identifier (UID). + +#### Signature + +```go +func GetRuntimeUID(cs *corev1.ContainerStatus) (runtime, uid string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses the `ContainerID` field of a `ContainerStatus` to separate the runtime prefix and the unique identifier (UID). | +| **Parameters** | `cs *corev1.ContainerStatus` – status object containing the container’s ID. | +| **Return value** | `runtime string` – first part before “://”;
`uid string` – last part after “://”. | +| **Key dependencies** | • `strings.Split` (standard library)
• Length checks (`len`) on slices | +| **Side effects** | None. Pure function; no state mutation or I/O. | +| **How it fits the package** | Used by container‑listing logic to record runtime and UID for each pod container, aiding diagnostics and reporting. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Input ContainerStatus"] --> B{"Split ContainerID on ://"} + B --> C{"If parts exist"} + C --> D["Assign runtime = first part"] + C --> E["Assign uid = last part"] + D & E --> F["Return (runtime, uid)"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetRuntimeUID --> strings.Split + func_GetRuntimeUID --> len +``` + +#### Functions calling `GetRuntimeUID` + +```mermaid +graph TD + getPodContainers --> func_GetRuntimeUID +``` + +#### Usage example + +```go +// Minimal example invoking GetRuntimeUID +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + corev1 "k8s.io/api/core/v1" +) + +func main() { + status := &corev1.ContainerStatus{ + ContainerID: "docker://abcdef123456", + } + runtime, uid := provider.GetRuntimeUID(status) + fmt.Printf("runtime=%q uid=%q\n", runtime, uid) // → runtime="docker" uid="abcdef123456" +} +``` + +--- + +### GetTestEnvironment + +**GetTestEnvironment** - Provides read‑only access to the singleton `env` that holds all runtime discovery data. It lazily builds the environment on first call and then returns the cached instance. + +#### Signature (Go) + +```go +func GetTestEnvironment() TestEnvironment +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides read‑only access to the singleton `env` that holds all runtime discovery data. It lazily builds the environment on first call and then returns the cached instance. | +| **Parameters** | None | +| **Return value** | `TestEnvironment` – the fully populated test environment structure. | +| **Key dependencies** | - Calls `buildTestEnvironment()` to initialise `env`.
- Relies on package‑level variables `loaded`, `env`. | +| **Side effects** | On first invocation, triggers a comprehensive discovery process that populates many fields (pods, nodes, services, etc.). Subsequent calls are side‑effect free. | +| **How it fits the package** | Central access point for all components that need cluster state; used by diagnostics, tests, and run logic to avoid repeated discovery work. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check `loaded`"] -->|"false"| B["Call `buildTestEnvironment()`"] + B --> C["Set `loaded = true`"] + C --> D["Return global `env`"] + A -->|"true"| D +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetTestEnvironment --> func_buildTestEnvironment +``` + +#### Functions calling `GetTestEnvironment` (Mermaid) + +```mermaid +graph TD + func_ExecCommandContainerNSEnter --> func_GetTestEnvironment + func_GetPidsFromPidNamespace --> func_GetTestEnvironment + func_Run --> func_GetTestEnvironment + func_MarshalConfigurations --> func_GetTestEnvironment + func_GetCniPlugins --> func_GetTestEnvironment + func_GetHwInfoAllNodes --> func_GetTestEnvironment + func_GetNodeJSON --> func_GetTestEnvironment + func_GetVersionK8s --> func_GetTestEnvironment + func_GetVersionOcp --> func_GetTestEnvironment + func_Log --> func_GetTestEnvironment + func_GetProcessCPUScheduling --> func_GetTestEnvironment + func_LoadChecks --> func_GetTestEnvironment + func_ShouldRun --> func_GetTestEnvironment +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetTestEnvironment +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + env := provider.GetTestEnvironment() + fmt.Printf("Cluster operator count: %d\n", len(env.ClusterOperators)) +} +``` + +--- + +--- + +### GetUpdatedCrObject + +**GetUpdatedCrObject** - Obtains a `scalingv1.Scale` object for the specified custom resource and encapsulates it in a `CrScale`. + +#### Signature (Go) + +```go +func GetUpdatedCrObject(sg scale.ScalesGetter, namespace string, name string, groupResourceSchema schema.GroupResource) (*CrScale, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Obtains a `scalingv1.Scale` object for the specified custom resource and encapsulates it in a `CrScale`. | +| **Parameters** | `sg scale.ScalesGetter – client to query scales`
`namespace string – namespace of the resource`
`name string – name of the resource`
`groupResourceSchema schema.GroupResource – GVR identifying the CRD` | +| **Return value** | `(*CrScale, error)` – wrapped scale object or an error if retrieval fails. | +| **Key dependencies** | • `autodiscover.FindCrObjectByNameByNamespace` – fetches the underlying scale.
• `scale.ScalesGetter` – Kubernetes client interface. | +| **Side effects** | No mutation of external state; only performs a read operation and constructs an in‑memory struct. | +| **How it fits the package** | Provides a thin wrapper that converts the generic scaling API result into the provider’s internal `CrScale` representation, enabling further processing or reporting. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Find scale"} + B --> C["Wrap in CrScale"] + C --> D["Return"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetUpdatedCrObject --> func_FindCrObjectByNameByNamespace +``` + +#### Functions calling `GetUpdatedCrObject` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetUpdatedCrObject +scalesGetter := /* obtain a scale.ScalesGetter instance */ +namespace := "default" +name := "my-crd-instance" +gvr := schema.GroupResource{Group: "example.com", Resource: "mycrds"} + +crScale, err := GetUpdatedCrObject(scalesGetter, namespace, name, gvr) +if err != nil { + // handle error +} +fmt.Printf("Scale of %s/%s: %+v\n", namespace, name, crScale) +``` + +--- + +### GetUpdatedDeployment + +**GetUpdatedDeployment** - Fetches a Kubernetes Deployment by its namespace and name, wraps the result in the package‑specific `Deployment` type, and returns it. + +#### Signature (Go) + +```go +func GetUpdatedDeployment(ac appv1client.AppsV1Interface, namespace, name string) (*Deployment, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Fetches a Kubernetes Deployment by its namespace and name, wraps the result in the package‑specific `Deployment` type, and returns it. | +| **Parameters** | *`ac appv1client.AppsV1Interface`* – client for interacting with Apps V1 resources.
*`namespace string`* – target namespace.
*`name string`* – deployment name. | +| **Return value** | `(*Deployment, error)` – the wrapped deployment or an error if retrieval fails. | +| **Key dependencies** | • `autodiscover.FindDeploymentByNameByNamespace` – performs the actual API call.
• Kubernetes client-go interfaces for Apps V1. | +| **Side effects** | No state mutation; only reads from the cluster and constructs a new struct. | +| **How it fits the package** | Provides a convenient, typed wrapper around raw `appsv1.Deployment` objects so other provider functions can work with the local `Deployment` abstraction. | + +#### Internal workflow + +```mermaid +flowchart TD + A["GetUpdatedDeployment"] --> B["autodiscover.FindDeploymentByNameByNamespace"] + B --> C["Return Deployment struct or error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetUpdatedDeployment --> func_FindDeploymentByNameByNamespace +``` + +#### Functions calling `GetUpdatedDeployment` + +```mermaid +graph TD + func_isDeploymentReady --> func_GetUpdatedDeployment +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetUpdatedDeployment +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + appv1client "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +func example(ac appv1client.AppsV1Interface, ns, name string) { + dep, err := provider.GetUpdatedDeployment(ac, ns, name) + if err != nil { + // handle error + return + } + // use the returned *provider.Deployment + _ = dep +} +``` + +--- + +### GetUpdatedStatefulset + +**GetUpdatedStatefulset** - Fetches the latest Kubernetes StatefulSet identified by *namespace* and *name*, wrapping it in the package‑specific `StatefulSet` type for downstream logic. + +#### Signature (Go) + +```go +func GetUpdatedStatefulset(ac appv1client.AppsV1Interface, namespace, name string) (*StatefulSet, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Fetches the latest Kubernetes StatefulSet identified by *namespace* and *name*, wrapping it in the package‑specific `StatefulSet` type for downstream logic. | +| **Parameters** | `ac appv1client.AppsV1Interface` – client interface to Apps V1 API
`namespace string` – target namespace
`name string` – StatefulSet name | +| **Return value** | `(*StatefulSet, error)` – the wrapped object or an error if retrieval fails | +| **Key dependencies** | • `autodiscover.FindStatefulsetByNameByNamespace`
• Kubernetes Apps V1 client | +| **Side effects** | No mutation of global state; only performs a read operation against the API server. | +| **How it fits the package** | Provides the core lookup routine used by higher‑level utilities (e.g., readiness checks) to obtain an up‑to‑date StatefulSet instance. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Call FindStatefulsetByNameByNamespace"} + B -- Success --> C["Wrap result in &StatefulSet"] + B -- Failure --> D["Return error"] + C --> E["End"] + D --> E +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetUpdatedStatefulset --> func_FindStatefulsetByNameByNamespace +``` + +#### Functions calling `GetUpdatedStatefulset` + +```mermaid +graph TD + func_WaitForStatefulSetReady --> func_GetUpdatedStatefulset + func_isStatefulSetReady --> func_GetUpdatedStatefulset +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetUpdatedStatefulset +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + appv1client "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +func example(ac appv1client.AppsV1Interface, ns, name string) { + ss, err := provider.GetUpdatedStatefulset(ac, ns, name) + if err != nil { + // handle error + } + // use *provider.StatefulSet as needed +} +``` + +--- + +--- + +### IsOCPCluster + +**IsOCPCluster** - Checks whether the test environment represents an OpenShift cluster by comparing the stored version string to a sentinel value for non‑OpenShift clusters. + +```go +func IsOCPCluster() bool +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the test environment represents an OpenShift cluster by comparing the stored version string to a sentinel value for non‑OpenShift clusters. | +| **Parameters** | None | +| **Return value** | `bool` – `true` if `env.OpenshiftVersion` differs from `autodiscover.NonOpenshiftClusterVersion`; otherwise `false`. | +| **Key dependencies** | *`env.OpenshiftVersion` – global test environment variable.
* `autodiscover.NonOpenshiftClusterVersion` – sentinel constant indicating a non‑OpenShift cluster. | +| **Side effects** | None; purely read‑only access to package globals. | +| **How it fits the package** | Provides a lightweight, reusable guard used throughout the provider and diagnostics code to enable or disable OpenShift‑specific logic. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Call IsOCPCluster"] --> B{"Compare env.OpenshiftVersion to NonOpenshiftClusterVersion"} + B -->|"equal"| C["Return false"] + B -->|"not equal"| D["Return true"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `IsOCPCluster` + +```mermaid +graph TD + func_GetVersionOcp --> func_IsOCPCluster + func_createNodes --> func_IsOCPCluster + func_GetNonOCPClusterSkipFn --> func_IsOCPCluster + func_testAllOperatorCertified --> func_IsOCPCluster + func_testAPICompatibilityWithNextOCPRelease --> func_IsOCPCluster + func_LoadChecks --> func_IsOCPCluster +``` + +#### Usage example + +```go +// Minimal example invoking IsOCPCluster +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + if provider.IsOCPCluster() { + fmt.Println("Running OpenShift‑specific tests") + } else { + fmt.Println("Skipping OpenShift‑only logic") + } +} +``` + +--- + +### LoadBalancingDisabled + +**LoadBalancingDisabled** - Determines if both the `cpu-load-balancing.crio.io` and `irq-load-balancing.crio.io` annotations on a pod are set to `"disable"`. If either annotation is missing or has an invalid value, it logs a debug message and returns `false`. + +```go +func LoadBalancingDisabled(p *Pod) bool +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if both the `cpu-load-balancing.crio.io` and `irq-load-balancing.crio.io` annotations on a pod are set to `"disable"`. If either annotation is missing or has an invalid value, it logs a debug message and returns `false`. | +| **Parameters** | `p *Pod –` the pod whose annotations should be inspected. | +| **Return value** | `bool –` `true` if both load‑balancing annotations equal `"disable"`, otherwise `false`. | +| **Key dependencies** | • `internal/log.Logger.Debug` (four invocations for missing or invalid annotations). | +| **Side effects** | Emits debug logs; does not modify the pod or any global state. | +| **How it fits the package** | Used by the CPU‑isolation compliance check (`Pod.IsCPUIsolationCompliant`) to verify that the pod is correctly annotated to disable load balancing before declaring it compliant. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Check cpu-load-balancing annotation"} + B -- Missing --> C["Log missing"] + B -- Invalid --> D["Log invalid"] + B -- "disable" --> E{"Check irq-load-balancing annotation"} + E -- Missing --> F["Log missing"] + E -- Invalid --> G["Log invalid"] + E -- "disable" --> H["Return true"] + C & D & F & G --> I["Return false"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_LoadBalancingDisabled --> func_Logger.Debug +``` + +#### Functions calling `LoadBalancingDisabled` + +```mermaid +graph TD + func_Pod.IsCPUIsolationCompliant --> func_LoadBalancingDisabled +``` + +#### Usage example (Go) + +```go +// Minimal example invoking LoadBalancingDisabled +pod := &provider.Pod{ + Annotations: map[string]string{ + "cpu-load-balancing.crio.io": "disable", + "irq-load-balancing.crio.io": "disable", + }, +} +isDisabled := provider.LoadBalancingDisabled(pod) +// isDisabled == true +``` + +--- + +### NewContainer + +**NewContainer** - Initializes a fresh `Container` struct, embedding an empty `corev1.Container` to serve as the foundation for further configuration. + +Creates and returns a new instance of the `Container` type with an initialized underlying Kubernetes container object. + +#### Signature (Go) + +```go +func NewContainer() *Container +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Initializes a fresh `Container` struct, embedding an empty `corev1.Container` to serve as the foundation for further configuration. | +| **Parameters** | None | +| **Return value** | A pointer to the newly created `Container`. | +| **Key dependencies** | - `corev1.Container` (from Kubernetes API) | +| **Side effects** | No observable state changes outside of returning a new object; no I/O or concurrency. | +| **How it fits the package** | Provides a constructor for the provider's container abstraction, enabling callers to start with a clean configuration before applying custom settings. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> InitializeContainerObject + InitializeContainerObject --> ReturnNewContainer +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `NewContainer` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewContainer +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + c := provider.NewContainer() + // c can now be configured or inspected as needed + _ = c // placeholder to avoid unused variable error +} +``` + +--- + +### NewEvent + +**NewEvent** - Creates an `Event` wrapper around a Kubernetes core event, preserving the original object for further use. + +#### Signature (Go) + +```go +func NewEvent(aEvent *corev1.Event) (out Event) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates an `Event` wrapper around a Kubernetes core event, preserving the original object for further use. | +| **Parameters** | `aEvent *corev1.Event` – pointer to a Kubernetes event that will be encapsulated. | +| **Return value** | `out Event` – the wrapped event containing the original `*corev1.Event`. | +| **Key dependencies** | • `corev1.Event` from the Kubernetes API.
• Assignment of the underlying event to the wrapper field. | +| **Side effects** | None. The function merely constructs and returns a new struct; no global state or I/O is modified. | +| **How it fits the package** | Provides a lightweight conversion layer so that events can be treated uniformly within the provider logic, enabling consistent handling across different parts of the test environment setup. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive *corev1.Event"] --> B["Create Event wrapper"] + B --> C["Set wrapper.Event = aEvent"] + C --> D["Return wrapper"] +``` + +#### Function dependencies + +None – this function does not call any other functions within the package. + +#### Functions calling `NewEvent` (Mermaid) + +```mermaid +graph TD + func_buildTestEnvironment --> func_NewEvent +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewEvent +package main + +import ( + "k8s.io/api/core/v1" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Example Kubernetes event + kubeEvent := &v1.Event{ + Message: "Sample event", + } + + // Wrap it using NewEvent + wrapped := provider.NewEvent(kubeEvent) + + // Use the wrapped event (here we just print its underlying message) + println(wrapped.Event.Message) // Output: Sample event +} +``` + +--- + +### NewPod + +**NewPod** - Wraps a raw Kubernetes `Pod` into the library’s `Pod` type, enriching it with network interface data and container metadata. + +#### Signature (Go) + +```go +func NewPod(aPod *corev1.Pod) (out Pod) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Wraps a raw Kubernetes `Pod` into the library’s `Pod` type, enriching it with network interface data and container metadata. | +| **Parameters** | `aPod *corev1.Pod` – the original pod object to wrap. | +| **Return value** | `out Pod` – a fully populated wrapper containing the original pod pointer, parsed Multus interfaces, PCI addresses, labels that affect test behavior, and the list of containers. | +| **Key dependencies** | • `strings.TrimSpace` (standard library)
• `GetPodIPsPerNet` (internal parsing of CNI status annotation)
• `GetPciPerPod` (PCI extraction from the same annotation)
• `log.Info`, `log.Error` (structured logging)
• `getPodContainers` (builds container list with runtime info) | +| **Side effects** | • Logs informational or error messages when expected annotations are missing or malformed.
• Does not modify the passed pod; all data is read‑only. | +| **How it fits the package** | Central constructor used by discovery and test‑environment setup to convert raw Kubernetes objects into the internal representation required for policy checks and test orchestration. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Has CNI annotation?"} + B -- No --> C["Log missing/empty annotation"] + B -- Yes --> D["Parse IPs with GetPodIPsPerNet"] + D --> E{"Parsing succeeded?"} + E -- No --> F["Log error"] + E -- Yes --> G["Store MultusNetworkInterfaces"] + D --> H["Parse PCI addresses with GetPciPerPod"] + H --> I{"Parsing succeeded?"} + I -- No --> J["Log error"] + I -- Yes --> K["Store MultusPCIs"] + C & G & K --> L{"Check skip labels"} + L --> M["Set SkipNetTests / SkipMultusNetTests flags"] + M --> N["Build Containers list via getPodContainers"] + N --> O["Return Pod wrapper"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_NewPod --> strings.TrimSpace + func_NewPod --> GetPodIPsPerNet + func_NewPod --> GetPciPerPod + func_NewPod --> log.Info + func_NewPod --> log.Error + func_NewPod --> getPodContainers +``` + +#### Functions calling `NewPod` + +```mermaid +graph TD + ConvertArrayPods --> NewPod + buildTestEnvironment --> NewPod +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewPod +import ( + corev1 "k8s.io/api/core/v1" + provider "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +// Assume pod is obtained from a client or fixture +var pod *corev1.Pod = /* ... */ + +wrapped := provider.NewPod(pod) +// wrapped now contains parsed network interfaces and containers ready for tests +``` + +--- + +### Node.GetCSCOSVersion + +**GetCSCOSVersion** - Extracts and returns the CoreOS (CentOS Stream CoreOS) version string from a node’s OS image field. It validates that the node is running a supported CoreOS distribution before parsing. + +#### Signature (Go) + +```go +func (node *Node) GetCSCOSVersion() (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Extracts and returns the CoreOS (CentOS Stream CoreOS) version string from a node’s OS image field. It validates that the node is running a supported CoreOS distribution before parsing. | +| **Parameters** | `node *Node` – receiver; represents the Kubernetes node whose OS information will be examined. | +| **Return value** | `(string, error)` – on success returns the version component (e.g., `"413.92.202303061740-0"`); on failure returns an empty string and an error explaining why extraction failed. | +| **Key dependencies** | • `Node.IsCSCOS()` – checks OS type
• `fmt.Errorf` – constructs error messages
• `strings.Split`, `strings.TrimSpace` – parse the OS image string | +| **Side effects** | None; purely reads node data and returns computed values. | +| **How it fits the package** | In the *provider* package, this method supports higher‑level logic that needs to know a node’s CoreOS release for compliance checks or version‑specific operations. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Is node CS-COS?"} + B -- Yes --> C["Split OSImage by cscosName"] + C --> D["Trim and split on space"] + D --> E["Return first token as version"] + B -- No --> F["Return error invalid OS type"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Node.GetCSCOSVersion --> func_Node.IsCSCOS + func_Node.GetCSCOSVersion --> fmt.Errorf + func_Node.GetCSCOSVersion --> strings.Split + func_Node.GetCSCOSVersion --> strings.TrimSpace +``` + +#### Functions calling `Node.GetCSCOSVersion` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Node.GetCSCOSVersion +node := provider.Node{ /* node initialization */ } + +version, err := node.GetCSCOSVersion() +if err != nil { + fmt.Printf("Failed to get CoreOS version: %v\n", err) + return +} +fmt.Printf("Node is running CoreOS version %s\n", version) +``` + +--- + +### Node.GetRHCOSVersion + +**GetRHCOSVersion** - Extracts the concise Red Hat Enterprise Linux CoreOS (RHCOS) version from a node’s OS image string. It validates that the node is running RHCOS and converts the full “long” version to its short form. + +#### Signature (Go) + +```go +func (node *Node) GetRHCOSVersion() (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Extracts the concise Red Hat Enterprise Linux CoreOS (RHCOS) version from a node’s OS image string. It validates that the node is running RHCOS and converts the full “long” version to its short form. | +| **Parameters** | `node *Node` – receiver; contains node status data including `Data.Status.NodeInfo.OSImage`. | +| **Return value** | `string` – the short RHCOS version (e.g., `"410.84"`) or an empty string on error.
`error` – non‑nil if the OS is not RHCOS, parsing fails, or mapping lookup fails. | +| **Key dependencies** | • `Node.IsRHCOS()` – checks OS type.
• `fmt.Errorf` – formats errors.
• `strings.Split`, `strings.TrimSpace` – string manipulation.
• `operatingsystem.GetShortVersionFromLong` – maps long to short version. | +| **Side effects** | None; purely functional, no state mutation or I/O beyond returning data. | +| **How it fits the package** | Provides a convenient accessor for client code that needs the RHCOS version of a node without exposing parsing logic. It relies on `IsRHCOS` and the external mapping helper. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check IsRHCOS"] -->|"false"| B["Return error invalid OS type"] + A -->|"true"| C["Split OSImage by rhcosName"] + C --> D["Trim whitespace from remainder"] + D --> E["Split on space to isolate long version"] + E --> F["Call GetShortVersionFromLong(long)"] + F -->|"error"| G["Return error"] + F -->|"success"| H["Return short version"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Node.GetRHCOSVersion --> func_Node.IsRHCOS + func_Node.GetRHCOSVersion --> fmt.Errorf + func_Node.GetRHCOSVersion --> strings.Split + func_Node.GetRHCOSVersion --> strings.TrimSpace + func_Node.GetRHCOSVersion --> operatingsystem.GetShortVersionFromLong +``` + +#### Functions calling `Node.GetRHCOSVersion` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Node.GetRHCOSVersion +node := &provider.Node{ /* assume node.Data.Status.NodeInfo.OSImage is populated */ } +ver, err := node.GetRHCOSVersion() +if err != nil { + fmt.Println("error:", err) +} else { + fmt.Println("RHCOS short version:", ver) +} +``` + +--- + +### Node.GetRHELVersion + +**GetRHELVersion** - Returns the Red Hat Enterprise Linux (RHEL) release number extracted from the node’s OS image string. + +#### Signature (Go) + +```go +func (node *Node) GetRHELVersion() (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the Red Hat Enterprise Linux (RHEL) release number extracted from the node’s OS image string. | +| **Parameters** | `node *Node` – receiver; the node whose OS information is examined. | +| **Return value** | `(string, error)` – the version string (e.g., `"8.5"`) or an error if the node is not running RHEL. | +| **Key dependencies** | • `node.IsRHEL()` – checks OS type.
• `fmt.Errorf` – constructs error messages.
• `strings.Split`, `strings.TrimSpace` – parse the OS image string. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Part of the provider’s node utilities, enabling callers to programmatically determine RHEL releases for compliance checks. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Node.GetRHELVersion"] --> B{"Is node RHEL?"} + B -- No --> C["Return error"] + B -- Yes --> D["Split OS image by rhel"] + D --> E["Trim space and split on first space"] + E --> F["Return version string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Node.GetRHELVersion --> func_Node.IsRHEL + func_Node.GetRHELVersion --> fmt.Errorf + func_Node.GetRHELVersion --> strings.Split + func_Node.GetRHELVersion --> strings.TrimSpace +``` + +#### Functions calling `Node.GetRHELVersion` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Node.GetRHELVersion +node := &provider.Node{ /* populate node.Data with OS image */ } +version, err := node.GetRHELVersion() +if err != nil { + log.Fatalf("Failed to get RHEL version: %v", err) +} +fmt.Printf("Node is running RHEL %s\n", version) +``` + +--- + +### Node.HasWorkloadDeployed + +**HasWorkloadDeployed** - Returns `true` if at least one Pod in `podsUnderTest` has its `Spec.NodeName` equal to the Node’s name, indicating that a workload is running on this node. Otherwise returns `false`. + +Checks whether any of the given Pods are scheduled on the receiver Node. + +--- + +#### Signature (Go) + +```go +func (node *Node) HasWorkloadDeployed(podsUnderTest []*Pod) bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` if at least one Pod in `podsUnderTest` has its `Spec.NodeName` equal to the Node’s name, indicating that a workload is running on this node. Otherwise returns `false`. | +| **Parameters** | `podsUnderTest []*Pod – slice of pointers to Pod objects to inspect` | +| **Return value** | `bool – true if a matching Pod exists; false otherwise` | +| **Key dependencies** | *None* – the function performs only local field comparisons. | +| **Side effects** | None – purely functional, no state mutation or I/O. | +| **How it fits the package** | Used by higher‑level provider logic to determine node utilization and workload placement decisions. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over pods"} + B --> C{"Match node name?"} + C -- Yes --> D["Return true"] + C -- No --> E["Continue loop"] + E --> B + B --> F["End of slice"] --> G["Return false"] +``` + +--- + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Functions calling `Node.HasWorkloadDeployed` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking Node.HasWorkloadDeployed + +node := &provider.Node{Data: provider.NodeData{Name: "worker-1"}} +pods := []*provider.Pod{ + {Spec: provider.PodSpec{NodeName: "worker-1"}}, + {Spec: provider.PodSpec{NodeName: "worker-2"}}, +} + +deployed := node.HasWorkloadDeployed(pods) +fmt.Println("Workload deployed on", node.Data.Name, ":", deployed) // true +``` + +--- + +### Node.IsCSCOS + +**IsCSCOS** - Returns `true` when the node’s operating system image indicates it is running CoreOS. The check trims surrounding whitespace and looks for a predefined identifier (`cscosName`). + +#### Signature (Go) + +```go +func (node *Node) IsCSCOS() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` when the node’s operating system image indicates it is running CoreOS. The check trims surrounding whitespace and looks for a predefined identifier (`cscosName`). | +| **Parameters** | *receiver* `node *Node` – the node instance whose OS image is inspected. | +| **Return value** | `bool` – `true` if the OS image contains the CoreOS identifier, otherwise `false`. | +| **Key dependencies** | • `strings.Contains` – searches for the CoreOS marker.
• `strings.TrimSpace` – normalizes the OS image string before searching. | +| **Side effects** | None; purely read‑only inspection of `node.Data.Status.NodeInfo.OSImage`. | +| **How it fits the package** | Used by higher‑level helpers (e.g., `Node.GetCSCOSVersion`) to guard operations that are specific to CoreOS nodes. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Trim node.Data.Status.NodeInfo.OSImage"] --> B["strings.Contains(trimmed, cscosName)"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Node.IsCSCOS --> func_strings.TrimSpace + func_Node.IsCSCOS --> func_strings.Contains +``` + +#### Functions calling `Node.IsCSCOS` + +```mermaid +graph TD + func_Node.GetCSCOSVersion --> func_Node.IsCSCOS +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Node.IsCSCOS +node := &provider.Node{ /* populate node.Data with OSImage */ } +if node.IsCSCOS() { + fmt.Println("This node runs CoreOS.") +} else { + fmt.Println("Non‑CoreOS node detected.") +} +``` + +--- + +### Node.IsControlPlaneNode + +**IsControlPlaneNode** - Returns `true` if the node has at least one label that matches any of the predefined control‑plane labels. + +#### Signature (Go) + +```go +func (node *Node) IsControlPlaneNode() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` if the node has at least one label that matches any of the predefined control‑plane labels. | +| **Parameters** | none – operates on the receiver `node`. | +| **Return value** | `bool`: `true` when a control‑plane label is present, otherwise `false`. | +| **Key dependencies** | • `github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper.StringInSlice`
• `MasterLabels` (slice of strings) – list of known control‑plane labels. | +| **Side effects** | None – purely read‑only logic. | +| **How it fits the package** | Used by test environments and other components to count or filter master nodes. | + +#### Internal workflow + +```mermaid +flowchart TD + node --> LabelsIteration + LabelsIteration --> CheckLabel + CheckLabel -->|"matches MasterLabels"| ReturnTrue + CheckLabel -->|"no match"| ContinueIteration + ContinueIteration --> EndLoop + EndLoop --> ReturnFalse +``` + +#### Function dependencies + +```mermaid +graph TD + func_Node.IsControlPlaneNode --> func_stringhelper.StringInSlice +``` + +#### Functions calling `Node.IsControlPlaneNode` + +```mermaid +graph TD + func_TestEnvironment.GetMasterCount --> func_Node.IsControlPlaneNode +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Node.IsControlPlaneNode +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + node := provider.Node{ + Data: provider.NodeData{ + Labels: map[string]string{ + "node-role.kubernetes.io/master": "", + }, + }, + } + + if node.IsControlPlaneNode() { + fmt.Println("This node is a control plane (master) node.") + } else { + fmt.Println("Regular worker node.") + } +} +``` + +--- + +### Node.IsHyperThreadNode + +**IsHyperThreadNode** - Checks whether the node identified by `node` has more than one thread per core by inspecting its probe pod. + +#### Signature (Go) + +```go +func (node *Node) IsHyperThreadNode(env *TestEnvironment) (bool, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the node identified by `node` has more than one thread per core by inspecting its probe pod. | +| **Parameters** | `env *TestEnvironment` – test environment containing probe pod metadata. | +| **Return value** | `(bool, error)` – `true` if hyper‑threading is present; otherwise `false`. Returns an error if command execution or parsing fails. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• `clientsholder.NewContext(...)`
• `o.ExecCommandContainer(ctx, isHyperThreadCommand)`
• `fmt.Errorf`
• `regexp.MustCompile`
• `strconv.Atoi` | +| **Side effects** | None; performs read‑only queries on the cluster. | +| **How it fits the package** | Part of the `provider` package, enabling tests to adapt based on node hardware capabilities. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Get ClientsHolder"] --> B["Create Context from probe pod"] + B --> C["ExecCommandContainer with isHyperThreadCommand"] + C --> D["Check error / stderr"] + D -- ok --> E["Parse output for threads per core"] + E --> F{"Threads > 1"} + F -->|"yes"| G["Return true, nil"] + F -->|"no"| H["Return false, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Node.IsHyperThreadNode --> func_GetClientsHolder + func_Node.IsHyperThreadNode --> func_NewContext + func_Node.IsHyperThreadNode --> func_ExecCommandContainer + func_Node.IsHyperThreadNode --> fmt.Errorf + func_Node.IsHyperThreadNode --> regexp.MustCompile + func_Node.IsHyperThreadNode --> strconv.Atoi +``` + +#### Functions calling `Node.IsHyperThreadNode` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Node.IsHyperThreadNode +node := &provider.Node{Data: provider.NodeData{Name: "worker-1"}} +env := &provider.TestEnvironment{ + ProbePods: map[string]*v1.Pod{ + "worker-1": probePod, // assume probePod is defined elsewhere + }, +} + +hasHT, err := node.IsHyperThreadNode(env) +if err != nil { + log.Fatalf("Failed to determine hyper‑threading status: %v", err) +} +fmt.Printf("Node %s has hyper‑threading: %t\n", node.Data.Name, hasHT) +``` + +--- + +### Node.IsRHCOS + +**IsRHCOS** - Determines whether the operating system image of a Kubernetes node corresponds to Red Hat Enterprise Linux CoreOS (RHCOS). + +#### Signature (Go) + +```go +func (node *Node) IsRHCOS() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether the operating system image of a Kubernetes node corresponds to Red Hat Enterprise Linux CoreOS (RHCOS). | +| **Parameters** | `node *Node` – the receiver, containing node status information. | +| **Return value** | `bool` – `true` if the OS image contains the RHCOS identifier string; otherwise `false`. | +| **Key dependencies** | • `strings.Contains` (checks for substring)
• `strings.TrimSpace` (removes surrounding whitespace from the OS image name) | +| **Side effects** | None – purely functional, no state mutation or I/O. | +| **How it fits the package** | Provides a quick boolean flag used by other provider functions (e.g., to validate RHCOS‑specific logic in `Node.GetRHCOSVersion`). | + +#### Internal workflow + +```mermaid +flowchart TD + A["TrimSpace(node.Data.Status.NodeInfo.OSImage)"] --> B["Contains(…, rhcosName)"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Node.IsRHCOS --> strings.Contains + func_Node.IsRHCOS --> strings.TrimSpace +``` + +#### Functions calling `Node.IsRHCOS` + +```mermaid +graph TD + func_Node.GetRHCOSVersion --> func_Node.IsRHCOS +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Node.IsRHCOS +node := &provider.Node{ /* assume node is populated */ } +if node.IsRHCOS() { + fmt.Println("Node runs RHCOS") +} else { + fmt.Println("Node does not run RHCOS") +} +``` + +--- + +### Node.IsRHEL + +**IsRHEL** - Determines whether the node’s operating system image is a Red Hat Enterprise Linux (RHEL) release. + +#### Signature (Go) + +```go +func (node *Node) IsRHEL() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether the node’s operating system image is a Red Hat Enterprise Linux (RHEL) release. | +| **Parameters** | `node *Node` – receiver holding node metadata. | +| **Return value** | `bool` – `true` if the OS image contains the RHEL identifier, otherwise `false`. | +| **Key dependencies** | • `strings.TrimSpace` – removes surrounding whitespace from the OS image string.
• `strings.Contains` – checks for the presence of the RHEL name substring. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a quick guard used by other provider utilities (e.g., `GetRHELVersion`) to ensure they operate only on supported OS images. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Trim node.Data.Status.NodeInfo.OSImage"] --> B["Check if contains rhelName"] + B --> C{"Result"} + C -->|"true"| D["Return true"] + C -->|"false"| E["Return false"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Node.IsRHEL --> func_strings.TrimSpace + func_Node.IsRHEL --> func_strings.Contains +``` + +#### Functions calling `Node.IsRHEL` + +```mermaid +graph TD + func_Node.GetRHELVersion --> func_Node.IsRHEL +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Node.IsRHEL +node := &provider.Node{ /* populate node data */ } +if node.IsRHEL() { + fmt.Println(“Node is running RHEL”) +} else { + fmt.Println(“Node is not running RHEL”) +} +``` + +--- + +### Node.IsRTKernel + +**IsRTKernel** - Checks whether the node’s kernel version string contains the substring “rt”, indicating a real‑time kernel. + +#### Signature (Go) + +```go +func (node *Node) IsRTKernel() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the node’s kernel version string contains the substring “rt”, indicating a real‑time kernel. | +| **Parameters** | `node *Node` – receiver; the node whose status is examined. | +| **Return value** | `bool` – `true` if the kernel string includes “rt”; otherwise `false`. | +| **Key dependencies** | - `strings.Contains` from the standard library.
- `strings.TrimSpace` from the standard library. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a quick flag for other components to decide if real‑time kernel specific logic should be applied. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Trim node.Data.Status.NodeInfo.KernelVersion"] --> B{"Contains rt"} + B -- Yes --> C["Return true"] + B -- No --> D["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Node.IsRTKernel --> func_strings.TrimSpace + func_Node.IsRTKernel --> func_strings.Contains +``` + +#### Functions calling `Node.IsRTKernel` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Node.IsRTKernel +node := &provider.Node{ + Data: provider.NodeData{ + Status: provider.Status{ + NodeInfo: provider.NodeInfo{ + KernelVersion: "5.11.0-rt-amd64", + }, + }, + }, +} + +isRT := node.IsRTKernel() +fmt.Printf("Is real‑time kernel? %t\n", isRT) +``` + +--- + +### Node.IsWorkerNode + +**IsWorkerNode** - Returns `true` if the node’s labels contain any key that matches one of the predefined worker‑label identifiers. + +Determines whether a Kubernetes node is classified as a worker node by inspecting its labels. + +#### Signature (Go) + +```go +func (node *Node) IsWorkerNode() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` if the node’s labels contain any key that matches one of the predefined worker‑label identifiers. | +| **Parameters** | `node *Node` – receiver; the node whose status is evaluated. | +| **Return value** | `bool` – `true` when the node is a worker, otherwise `false`. | +| **Key dependencies** | • Calls `stringhelper.StringInSlice(WorkerLabels, nodeLabel, true)` to check label keys.
• Relies on the package‑level variable `WorkerLabels` (a slice of strings). | +| **Side effects** | None. The function is pure; it only reads from the node’s data and returns a value. | +| **How it fits the package** | Part of the *provider* domain model, enabling higher‑level utilities such as `TestEnvironment.GetWorkerCount` to filter nodes by role. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over node.Data.Labels"} + B -->|"label matches WorkerLabels"| C["Return true"] + B --> D["Continue loop"] + D --> B + C --> E["End"] + B -->|"no match after all labels"| F["Return false"] + F --> E +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Node.IsWorkerNode --> func_stringhelper.StringInSlice +``` + +#### Functions calling `Node.IsWorkerNode` (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetWorkerCount --> func_Node.IsWorkerNode +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Node.IsWorkerNode +node := &provider.Node{Data: provider.NodeData{ + Labels: map[string]string{"role": "worker", "zone": "us-west-1"}, +}} +isWorker := node.IsWorkerNode() +fmt.Printf("Is worker node? %t\n", isWorker) +``` + +--- + +### Node.MarshalJSON + +**MarshalJSON** - Provides JSON encoding of the `Node.Data` field, enabling a `Node` value to be marshaled directly by `encoding/json`. + +#### 1) Signature (Go) + +```go +func (node Node) MarshalJSON() ([]byte, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides JSON encoding of the `Node.Data` field, enabling a `Node` value to be marshaled directly by `encoding/json`. | +| **Parameters** | *None* – receives the receiver `node Node` implicitly. | +| **Return value** | A byte slice containing the JSON representation of `node.Data`, or an error if marshalling fails. | +| **Key dependencies** | Calls `json.Marshal` from the standard library to serialize `node.Data`. | +| **Side effects** | None; it only reads the receiver’s state and returns a result without modifying any global or external data. | +| **How it fits the package** | The `provider` package defines a `Node` type that encapsulates certificate provider information. Implementing `MarshalJSON` allows instances of `Node` to be embedded in larger JSON structures (e.g., responses from API endpoints) while ensuring only the relevant payload (`Data`) is exposed. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + node["Receive Node instance"] --> encode["Call json.Marshal(&node.Data)"] + encode --> result{"Return []byte, error"} +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_Node.MMarshalJSON --> func_json.Marshal +``` + +#### 5) Functions calling `Node.MarshalJSON` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Node.MarshalJSON +package main + +import ( + "encoding/json" + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + node := provider.Node{Data: map[string]string{"role": "worker"}} + b, err := node.MarshalJSON() + if err != nil { + panic(err) + } + fmt.Println(string(b)) // {"role":"worker"} +} + +// Alternatively, json.Marshal will automatically use the custom method: +b2, _ := json.Marshal(node) // same output +``` + +--- + +### Operator.SetPreflightResults + +**SetPreflightResults** - Executes a Preflight container check against the operator’s bundle and index images, collects the results, logs output, and assigns them to `op.PreflightResults`. Skips execution if no install plans are present. + +#### 1) Signature (Go) + +```go +func (op *Operator) SetPreflightResults(env *TestEnvironment) error +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes a Preflight container check against the operator’s bundle and index images, collects the results, logs output, and assigns them to `op.PreflightResults`. Skips execution if no install plans are present. | +| **Parameters** | `env *TestEnvironment` – Provides Docker configuration, insecure‑connection flag, and other runtime settings needed for Preflight. | +| **Return value** | `error` – Non‑nil if any step (writer creation, check run, list retrieval, artifact cleanup) fails; otherwise nil. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – obtains Kubernetes clients.
• `artifacts.NewMapWriter()`, `artifacts.ContextWithWriter()` – create in‑memory artifact store.
• `plibOperator.WithDockerConfigJSONFromFile`, `plibOperator.WithInsecureConnection` – configure Preflight options.
• `plibOperator.NewCheck().Run()` – executes the container tests.
• `GetPreflightResultsDB` – converts raw results to the package’s DB format. | +| **Side effects** | • Writes logs to a temporary buffer and prints them via `log.Info`.
• Deletes any existing `artifacts/` directory with `os.RemoveAll`.
• Mutates `op.PreflightResults` field. | +| **How it fits the package** | In the `provider` package, an `Operator` represents a bundle under test. This method gathers quality‑control data for that operator so tests can later assert on pass/fail counts and remediation suggestions. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check if InstallPlans empty"] -->|"Yes"| B["Log warning & exit"] + A -->|"No"| C["Retrieve bundleImage & indexImage"] + C --> D["Get Kubernetes clients"] + D --> E["Create artifacts writer"] + E --> F["Build context with writer"] + F --> G["Configure Preflight options (docker, insecure)"] + G --> H["Setup logger output to buffer"] + H --> I["Instantiate Preflight check"] + I --> J["Run Preflight tests"] + J --> K{"Runtime error?"} + K -->|"Yes"| L["Retrieve test list & record errors"] + K -->|"No"| M["Proceed"] + L --> N["Log results"] + M --> N + N --> O["Remove artifacts directory"] + O --> P["Store results in op.PreflightResults"] + P --> Q["Return nil"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_Operator.SetPreflightResults --> clientsholder.GetClientsHolder + func_Operator.SetPreflightResults --> artifacts.NewMapWriter + func_Operator.SetPreflightResults --> artifacts.ContextWithWriter + func_Operator.SetPreflightResults --> plibOperator.WithDockerConfigJSONFromFile + func_Operator.SetPreflightResults --> plibOperator.WithInsecureConnection + func_Operator.SetPreflightResults --> stdr.New + func_Operator.SetPreflightResults --> logr.NewContext + func_Operator.SetPreflightResults --> plibOperator.NewCheck + func_Operator.SetPreflightResults --> plibOperator.Check.Run + func_Operator.SetPreflightResults --> plibOperator.Check.List + func_Operator.SetPreflightResults --> GetPreflightResultsDB +``` + +#### 5) Functions calling `Operator.SetPreflightResults` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Operator.SetPreflightResults +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + op := &provider.Operator{ + InstallPlans: []provider.InstallPlan{ /* ... */ }, + } + env := provider.NewTestEnvironment(/* configuration args */) + if err := op.SetPreflightResults(env); err != nil { + panic(err) + } +} +``` + +--- + +### Operator.String + +**String** - Formats the fields of an `Operator` instance into a single string for logging or debugging. + +Returns a human‑readable description of an operator configuration. + +```go +func (op *Operator) String() string +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Formats the fields of an `Operator` instance into a single string for logging or debugging. | +| **Parameters** | None – operates on the receiver `op`. | +| **Return value** | A formatted string containing the operator’s name, namespace, subscription name and target namespaces. | +| **Key dependencies** | * `fmt.Sprintf` (standard library) | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a convenient way to inspect an `Operator` value within the provider package, aiding diagnostics and output readability. | + +```mermaid +flowchart TD + A["Start"] --> B{"Build format string"} + B --> C["fmt.Sprintf"] + C --> D["Return result"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Operator.String --> fmt.Sprintf +``` + +#### Functions calling `Operator.String` + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + A["None – this function is currently not referenced elsewhere in the package."] +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Operator.String +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + op := provider.Operator{ + Name: "my-operator", + Namespace: "operators", + SubscriptionName: "sub-1", + TargetNamespaces: []string{"ns-a", "ns-b"}, + } + fmt.Println(op.String()) +} +``` + +--- + +### Pod.AffinityRequired + +**AffinityRequired** - Checks the pod’s labels for the key `AffinityRequiredKey`. If present, parses its string value as a boolean and returns that result. Defaults to `false` if the label is absent or invalid. + +#### Signature (Go) + +```go +func (p *Pod) AffinityRequired() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks the pod’s labels for the key `AffinityRequiredKey`. If present, parses its string value as a boolean and returns that result. Defaults to `false` if the label is absent or invalid. | +| **Parameters** | `p *Pod` – receiver; the pod instance whose affinity requirement is queried. | +| **Return value** | `bool` – `true` when the label indicates affinity is required, otherwise `false`. | +| **Key dependencies** | • `strconv.ParseBool` – converts the string label to a boolean.
• `log.Warn` (from `github.com/redhat-best-practices-for-k8s/certsuite/internal/log`) – logs parsing failures. | +| **Side effects** | None beyond logging on parse error; no state mutation occurs. | +| **How it fits the package** | Provides a helper for filtering pods based on their affinity requirement, used by `TestEnvironment.GetAffinityRequiredPods` and `GetPodsWithoutAffinityRequiredLabel`. | + +#### Internal workflow + +```mermaid +flowchart TD + p.Labels["AffinityRequiredKey"] --> checkExistence + checkExistence -- exists --> parseBool(val) + parseBool(val) -->|"success"| returnResult(result) + parseBool(val) -->|"error"| logWarn(errMsg) + logWarn(errMsg) --> returnFalse + checkExistence -- not found --> returnFalse +``` + +#### Function dependencies + +```mermaid +graph TD + func_Pod.AffinityRequired --> strconv.ParseBool + func_Pod.AffinityRequired --> log.Warn +``` + +#### Functions calling `Pod.AffinityRequired` + +```mermaid +graph TD + TestEnvironment.GetAffinityRequiredPods --> func_Pod.AffinityRequired + TestEnvironment.GetPodsWithoutAffinityRequiredLabel --> func_Pod.AffinityRequired +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.AffinityRequired +p := &Pod{ + Labels: map[string]string{ + "affinity-required": "true", + }, +} +if p.AffinityRequired() { + fmt.Println("This pod requires affinity.") +} else { + fmt.Println("Affinity not required for this pod.") +} +``` + +--- + +### Pod.CheckResourceHugePagesSize + +**CheckResourceHugePagesSize** - Ensures every `hugepages-*` resource request and limit in the pod’s containers matches the supplied `size`. If any huge‑page resource differs, it returns `false`; otherwise `true`. + +--- + +#### Signature (Go) + +```go +func (p *Pod) CheckResourceHugePagesSize(size string) bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures every `hugepages-*` resource request and limit in the pod’s containers matches the supplied `size`. If any huge‑page resource differs, it returns `false`; otherwise `true`. | +| **Parameters** | `size string` – expected huge‑page size (e.g., `"1Mi"`). | +| **Return value** | `bool` – `true` if all huge‑page resources match `size`, `false` otherwise. | +| **Key dependencies** | • `len` to check map emptiness
• `strings.Contains` to detect huge‑page resource names
• `Pod.String` for debugging output (used in other parts of the package) | +| **Side effects** | No state mutation or I/O; purely read‑only logic. | +| **How it fits the package** | Part of the provider’s pod validation helpers, ensuring pods adhere to required huge‑page size constraints before deployment. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate containers"} + B --> C{"Check request/limit maps"} + C -->|"empty"| D["Skip container"] + C -->|"non‑empty"| E["Inspect each resource name"] + E --> F{"Is hugepage?"} + F -->|"yes and size mismatch"| G["Return false"] + F -->|"no or matches"| H["Continue"] + H --> I{"Next container/resource"} + I -->|"done"| J["All passed"] --> K["Return true"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Pod.CheckResourceHugePagesSize --> len + func_Pod.CheckResourceHugePagesSize --> strings.Contains + func_Pod.CheckResourceHugePagesSize --> func_Pod.String +``` + +--- + +#### Functions calling `Pod.CheckResourceHugePagesSize` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.CheckResourceHugePagesSize + +p := &provider.Pod{ + Containers: []provider.Container{ + { + Resources: provider.Resources{ + Requests: map[resource.Quantity]struct{}{ + resource.MustParse("hugepages-1Mi"): {}, + }, + Limits: map[resource.Quantity]struct{}{ + resource.MustParse("hugepages-1Mi"): {}, + }, + }, + }, + }, +} + +if p.CheckResourceHugePagesSize("1Mi") { + fmt.Println("All huge‑page resources match the required size.") +} else { + fmt.Println("Mismatch in huge‑page resource sizes detected.") +} +``` + +--- + +--- + +### Pod.ContainsIstioProxy + +**ContainsIstioProxy** - Determines whether any container in the pod has the name specified by `IstioProxyContainerName`. + +#### Signature (Go) + +```go +func (p *Pod) ContainsIstioProxy() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether any container in the pod has the name specified by `IstioProxyContainerName`. | +| **Parameters** | *receiver* `p *Pod` – the pod instance to inspect. | +| **Return value** | `bool`: `true` if an Istio proxy container is present, otherwise `false`. | +| **Key dependencies** | • Uses the constant `IstioProxyContainerName`.
• Iterates over `p.Containers`. | +| **Side effects** | None – purely read‑only inspection. | +| **How it fits the package** | Provides a utility for higher‑level logic that needs to know whether a pod is part of an Istio service mesh. | + +#### Internal workflow + +```mermaid +flowchart TD + Start --> CheckContainers + CheckContainers -->|"container.Name == IstioProxyContainerName"| ReturnTrue + CheckContainers -->|"end loop"| ReturnFalse +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `Pod.ContainsIstioProxy` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.ContainsIstioProxy +pod := provider.Pod{ + Containers: []provider.Container{ + {Name: "app"}, + {Name: "istio-proxy"}, // assume IstioProxyContainerName equals "istio-proxy" + }, +} +hasProxy := pod.ContainsIstioProxy() +fmt.Println("Contains Istio proxy:", hasProxy) // Output: Contains Istio proxy: true +``` + +--- + +### Pod.CreatedByDeploymentConfig + +**CreatedByDeploymentConfig** - Determines whether the pod originates from an OpenShift `DeploymentConfig` by traversing owner references through a `ReplicationController`. + +#### Signature (Go) + +```go +func (p *Pod) CreatedByDeploymentConfig() (bool, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether the pod originates from an OpenShift `DeploymentConfig` by traversing owner references through a `ReplicationController`. | +| **Parameters** | `p *Pod` – receiver; no explicit parameters. | +| **Return value** | `bool` – true if the pod is linked to a DeploymentConfig, otherwise false.
`error` – any error encountered during client calls. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – obtains Kubernetes client set.
• `p.GetOwnerReferences()` – retrieves owner refs of the pod.
• `oc.K8sClient.CoreV1().ReplicationControllers(...).Get(...)` – fetches ReplicationController object. | +| **Side effects** | No state mutations; only performs read‑only API calls. | +| **How it fits the package** | Used during test environment setup to flag pods that were deployed via legacy OpenShift `DeploymentConfig`, encouraging use of modern controllers (`Deployment`, `StatefulSet`). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Pod.CreatedByDeploymentConfig"] --> B["Get ClientsHolder"] + B --> C["Iterate over Pod owner references"] + C --> D{"Owner.Kind == ReplicationController"} + D -- Yes --> E["Fetch ReplicationController via API"] + E --> F["Check RCs owner refs for DeploymentConfig"] + F --> G{"Found DeploymentConfig?"} + G -- Yes --> H["Return true, nil"] + G -- No --> I["Continue loop"] + D -- No --> J["End loop"] + J --> K["Return false, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Pod.CreatedByDeploymentConfig --> func_GetClientsHolder + func_Pod.CreatedByDeploymentConfig --> func_GetOwnerReferences + func_Pod.CreatedByDeploymentConfig --> func_K8sClient_CoreV1_ReplicationControllers_Get +``` + +#### Functions calling `Pod.CreatedByDeploymentConfig` (Mermaid) + +```mermaid +graph TD + func_buildTestEnvironment --> func_Pod.CreatedByDeploymentConfig +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.CreatedByDeploymentConfig +pod := NewPod(someK8sPodObject) +isDC, err := pod.CreatedByDeploymentConfig() +if err != nil { + log.Fatalf("Failed to determine DeploymentConfig origin: %v", err) +} +fmt.Printf("Created by DeploymentConfig? %t\n", isDC) +``` + +--- + +--- + +### Pod.GetRunAsNonRootFalseContainers + +**GetRunAsNonRootFalseContainers** - Returns containers that either have `securityContext.runAsNonRoot` set to false or `securityContext.runAsUser` set to 0 (both indicating a root user). Pod‑level defaults are respected if container values are missing. + +#### Signature (Go) + +```go +func (p *Pod) GetRunAsNonRootFalseContainers(knownContainersToSkip map[string]bool) ([]*Container, []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns containers that either have `securityContext.runAsNonRoot` set to false or `securityContext.runAsUser` set to 0 (both indicating a root user). Pod‑level defaults are respected if container values are missing. | +| **Parameters** | `knownContainersToSkip map[string]bool` – names of containers that should be excluded from the check. | +| **Return value** | Two slices: `` and ``. Each reason string explains why the container failed. | +| **Key dependencies** | • `Container.IsContainerRunAsNonRoot` – evaluates container or pod run‑as‑nonroot.
• `Container.IsContainerRunAsNonRootUserID` – checks run‑as‑user against 0.
• Standard slice `append`. | +| **Side effects** | None; the function only reads the pod and container data. | +| **How it fits the package** | Part of the provider’s security analysis for Kubernetes pods, enabling compliance checks against the “Run as non‑root” policy. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> PodSecurityContext["Check pod.Spec.SecurityContext"] + PodSecurityContext --> SetPodValues{"podRunAsNonRoot / podRunAsUserID"} + SetPodValues --> IterateContainers["For each container"] + IterateContainers --> SkipKnown{"Is in knownContainersToSkip?"} + SkipKnown -- Yes --> NextContainer + SkipKnown -- No --> EvalNonRoot["Check RunAsNonRoot"] + EvalNonRoot --> EvalUserID["Check RunAsUserID"] + EvalUserID --> Pass{"Pass if either true"} + Pass -- True --> NextContainer + Pass -- False --> Append["Add to nonCompliantContainers & reasons"] + Append --> NextContainer + NextContainer --> End +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Pod.GetRunAsNonRootFalseContainers --> func_Container.IsContainerRunAsNonRoot + func_Pod.GetRunAsNonRootFalseContainers --> func_Container.IsContainerRunAsNonRootUserID + func_Pod.GetRunAsNonRootFalseContainers --> append +``` + +#### Functions calling `Pod.GetRunAsNonRootFalseContainers` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.GetRunAsNonRootFalseContainers + +p := &provider.Pod{ + Spec: provider.PodSpec{ + SecurityContext: &provider.PodSecurityContext{ + RunAsNonRoot: boolPtr(false), + RunAsUser: int64Ptr(0), + }, + }, + Containers: []*provider.Container{ + { + Name: "app", + SecurityContext: &provider.SecurityContext{ + RunAsNonRoot: boolPtr(false), // violates rule + }, + }, + { + Name: "sidecar", + // inherits pod defaults, also violates + }, + }, +} + +skip := map[string]bool{"helper": true} +nonCompliant, reasons := p.GetRunAsNonRootFalseContainers(skip) + +fmt.Println("Violating containers:", nonCompliant) +fmt.Println("Reasons:", reasons) +``` + +--- + +### Pod.GetTopOwner + +**GetTopOwner** - Returns a mapping of top‑level owner identifiers to `podhelper.TopOwner` structs for the Pod instance. + +#### Signature (Go) + +```go +func (p *Pod) GetTopOwner() (topOwners map[string]podhelper.TopOwner, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a mapping of top‑level owner identifiers to `podhelper.TopOwner` structs for the Pod instance. | +| **Parameters** | *receiver* `p *Pod` – the Pod whose owners are queried. | +| **Return value** | `topOwners map[string]podhelper.TopOwner` – key is an identifier (e.g., UID), value contains owner metadata; `err error` if resolution fails. | +| **Key dependencies** | • `github.com/redhat-best-practices-for-k8s/certsuite/pkg/podhelper.GetPodTopOwner`
• uses the Pod’s `Namespace` and `OwnerReferences`. | +| **Side effects** | None; purely read‑only computation. | +| **How it fits the package** | Provides a convenient method on the `Pod` type to expose owner information, used by higher‑level analytics or reporting components within the provider package. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Pod instance"] --> B["Retrieve Namespace & OwnerReferences"] + B --> C["podhelper.GetPodTopOwner(namespace, refs)"] + C --> D["Return topOwners map and error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Pod_GetTopOwner --> func_podhelper_GetPodTopOwner +``` + +#### Functions calling `Pod.GetTopOwner` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.GetTopOwner +pod := &provider.Pod{ + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{/*…*/}, +} + +owners, err := pod.GetTopOwner() +if err != nil { + log.Fatalf("Failed to get top owners: %v", err) +} +for id, owner := range owners { + fmt.Printf("Owner ID: %s, Kind: %s\n", id, owner.Kind) +} +``` + +--- + +### Pod.HasHugepages + +**HasHugepages** - Determines whether any container within the pod requests or limits a hugepage resource. + +#### 1) Signature (Go) + +```go +func (p *Pod) HasHugepages() bool +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether any container within the pod requests or limits a hugepage resource. | +| **Parameters** | `p *Pod` – the receiver; represents a Kubernetes pod. | +| **Return value** | `bool` – `true` if at least one resource name contains the substring `"hugepage"`, otherwise `false`. | +| **Key dependencies** | • `strings.Contains` (from the standard library)
• `Pod.String()` for debugging/logging (not directly used in logic) | +| **Side effects** | None; purely reads pod data. | +| **How it fits the package** | Provides a quick check used by higher‑level functions (e.g., environment filtering) to identify pods that allocate hugepages. | + +#### 3) Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate containers"} + B -->|"for each container"| C{"Check requests"} + C -->|"has hugepage?"| D["Return true"] + C --> E{"Check limits"} + E -->|"has hugepage?"| D + D --> F["End with true"] + E --> G["Continue loop"] + G --> B + B --> H["All containers checked"] --> I["Return false"] +``` + +#### 4) Function dependencies + +```mermaid +graph TD + func_Pod.HasHugepages --> strings.Contains + func_Pod.HasHugepages --> func_Pod.String +``` + +#### 5) Functions calling `Pod.HasHugepages` + +```mermaid +graph TD + func_TestEnvironment.GetHugepagesPods --> func_Pod.HasHugepages +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Pod.HasHugepages +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + pod := provider.Pod{ + Name: "example-pod", + Namespace: "default", + Containers: []provider.Container{ + { + Image: "nginx:latest", + Resources: provider.Resources{ + Requests: map[provider.ResourceName]resource.Quantity{ + provider.HugePagesResourceName: resource.MustParse("1Gi"), + }, + }, + }, + }, + } + + if pod.HasHugepages() { + fmt.Println("Pod uses hugepages") + } else { + fmt.Println("No hugepage usage detected") + } +} +``` + +--- + +### Pod.HasNodeSelector + +**HasNodeSelector** - Determines if the pod’s specification includes at least one key/value pair in `Spec.NodeSelector`, indicating that it targets specific nodes. + +Checks whether a pod definition contains any node selector constraints. + +```go +func (p *Pod) HasNodeSelector() bool +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if the pod’s specification includes at least one key/value pair in `Spec.NodeSelector`, indicating that it targets specific nodes. | +| **Parameters** | `p *Pod` – receiver containing the pod’s spec. | +| **Return value** | `bool` – `true` when `len(p.Spec.NodeSelector) != 0`; otherwise `false`. | +| **Key dependencies** | - Calls Go built‑in function `len` on a map. | +| **Side effects** | None; purely read‑only. | +| **How it fits the package** | Part of the provider’s pod abstraction, enabling callers to decide if scheduling constraints exist before further processing or validation. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start["Start"] --> CheckLen{"Is len(p.Spec.NodeSelector) ≠ 0?"} + CheckLen -- Yes --> ReturnTrue["Return true"] + CheckLen -- No --> ReturnFalse["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Pod.HasNodeSelector --> builtin_len +``` + +#### Functions calling `Pod.HasNodeSelector` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.HasNodeSelector +p := &provider.Pod{ + Spec: provider.PodSpec{ + NodeSelector: map[string]string{"disktype": "ssd"}, + }, +} +if p.HasNodeSelector() { + fmt.Println("Pod targets specific nodes") +} else { + fmt.Println("No node selector defined") +} +``` + +--- + +### Pod.IsAffinityCompliant + +**IsAffinityCompliant** - Validates that a pod has appropriate affinity rules when an `AffinityRequired` flag is set; returns `true` if compliant, otherwise `false` with descriptive error. + +Checks whether a pod satisfies the required affinity constraints defined in its spec. + +#### Signature (Go) + +```go +func (p *Pod) IsAffinityCompliant() (bool, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates that a pod has appropriate affinity rules when an `AffinityRequired` flag is set; returns `true` if compliant, otherwise `false` with descriptive error. | +| **Parameters** | `p *Pod` – the receiver containing the pod specification to inspect. | +| **Return value** | `bool` – compliance status; `` – detailed reason when not compliant (or `nil` on success). | +| **Key dependencies** | • `fmt.Errorf` for error creation
• `Pod.String()` for human‑readable pod identification | +| **Side effects** | None. The function only reads the pod struct and returns status/error. | +| **How it fits the package** | Provides a reusable check used by higher‑level validation routines to enforce affinity policies on Kubernetes pods within the provider package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"p.Spec.Affinity == nil"} + B -- Yes --> C["Return false + error “missing affinity rules”"] + B -- No --> D{"p.Spec.Affinity.PodAntiAffinity != nil"} + D -- Yes --> E["Return false + error “has anti‑affinity rules”"] + D -- No --> F{"p.Spec.Affinity.PodAffinity == nil AND p.Spec.Affinity.NodeAffinity == nil"} + F -- Yes --> G["Return false + error “missing pod/node affinity rules”"] + F -- No --> H["Return true, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Pod.IsAffinityCompliant --> fmt.Errorf + func_Pod.IsAffinityCompliant --> func_Pod.String +``` + +#### Functions calling `Pod.IsAffinityCompliant` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.IsAffinityCompliant +pod := &provider.Pod{ + Spec: provider.PodSpec{ + Affinity: &provider.Affinity{ /* populate as needed */ }, + }, +} +compliant, err := pod.IsAffinityCompliant() +if err != nil { + fmt.Printf("Pod %s is not compliant: %v\n", pod.Name, err) +} else { + fmt.Printf("Pod %s meets affinity requirements.\n", pod.Name) +} +``` + +--- + +### Pod.IsAutomountServiceAccountSetOnSA + +**IsAutomountServiceAccountSetOnSA** - Determines whether the `AutomountServiceAccountToken` field is set for the service account associated with the pod. + +#### Signature (Go) + +```go +func (p *Pod) IsAutomountServiceAccountSetOnSA() (*bool, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether the `AutomountServiceAccountToken` field is set for the service account associated with the pod. | +| **Parameters** | *none* – operates on the receiver `p *Pod`. | +| **Return value** | `` – a pointer to the boolean flag if found, otherwise an error. | +| **Key dependencies** | • `fmt.Errorf` (used twice for error construction) | +| **Side effects** | None; only reads pod data and returns results. | +| **How it fits the package** | Provides a helper for other components to validate service‑account token automount settings during compliance checks. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + p.AllServiceAccountsMap --> checkInitialized + checkInitialized -->|"nil"| returnError1["return isSet, fmt.Errorf(\AllServiceAccountsMap is not initialized …\)"] + checkInitialized -->|"ok"| lookupSA + lookupSA -->|"not found"| returnError2["return isSet, fmt.Errorf(\could not find a service account …\)"] + lookupSA -->|"found"| returnValue["return (*p.AllServiceAccountsMap)[key].AutomountServiceAccountToken, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Pod.IsAutomountServiceAccountSetOnSA --> fmt.Errorf +``` + +#### Functions calling `Pod.IsAutomountServiceAccountSetOnSA` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.IsAutomountServiceAccountSetOnSA +pod := &provider.Pod{ + Namespace: "default", + Name: "example-pod", + Spec: provider.PodSpec{ + ServiceAccountName: "my-sa", + }, + AllServiceAccountsMap: map[string]*provider.ServiceAccount{ + "defaultmy-sa": {AutomountServiceAccountToken: boolPtr(true)}, + }, +} + +isSet, err := pod.IsAutomountServiceAccountSetOnSA() +if err != nil { + log.Fatalf("failed to check automount setting: %v", err) +} +fmt.Printf("Automount set? %v\n", *isSet) + +func boolPtr(b bool) *bool { return &b } +``` + +--- + +### Pod.IsCPUIsolationCompliant + +**IsCPUIsolationCompliant** - Checks that the pod has correct annotations for disabling CPU and IRQ load balancing and specifies a runtime class name, indicating compliance with CPU isolation requirements. + +#### 1) Signature (Go) + +```go +func (p *Pod) IsCPUIsolationCompliant() bool +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks that the pod has correct annotations for disabling CPU and IRQ load balancing and specifies a runtime class name, indicating compliance with CPU isolation requirements. | +| **Parameters** | `p *Pod` – the pod instance to evaluate. | +| **Return value** | `bool` – `true` if all conditions are met; otherwise `false`. | +| **Key dependencies** | • `LoadBalancingDisabled(p)`
• `p.IsRuntimeClassNameSpecified()`
• `log.Debug(msg, args...)` | +| **Side effects** | Emits debug log messages when conditions fail. No state mutation occurs. | +| **How it fits the package** | Used by the test environment to filter pods that are guaranteed with exclusive CPUs and meet isolation requirements. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"LoadBalancingDisabled(p)"} + B -- Yes --> C["Set isCPUIsolated = true"] + B -- No --> D["log.Debug(annotations missing) ; Set isCPUIsolated = false"] + D --> E{"p.IsRuntimeClassNameSpecified()"} + E -- Yes --> F["Return isCPUIsolated"] + E -- No --> G["log.Debug(runtimeClassName missing); Set isCPUIsolated = false"] + G --> F +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_Pod.IsCPUIsolationCompliant --> func_LoadBalancingDisabled + func_Pod.IsCPUIsolationCompliant --> func_Pod.IsRuntimeClassNameSpecified + func_Pod.IsCPUIsolationCompliant --> func_log.Debug +``` + +#### 5) Functions calling `Pod.IsCPUIsolationCompliant` (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs --> func_Pod.IsCPUIsolationCompliant +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Pod.IsCPUIsolationCompliant +p := &Pod{ /* populate fields as needed */ } +if p.IsCPUIsolationCompliant() { + fmt.Println("Pod is compliant with CPU isolation.") +} else { + fmt.Println("Pod fails CPU isolation checks.") +} +``` + +--- + +### Pod.IsPodGuaranteed + +**IsPodGuaranteed** - Checks whether all containers in the pod have identical CPU and memory requests and limits, thereby qualifying the pod for the *Guaranteed* QoS class. + +#### Signature (Go) + +```go +func (p *Pod) IsPodGuaranteed() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether all containers in the pod have identical CPU and memory requests and limits, thereby qualifying the pod for the *Guaranteed* QoS class. | +| **Parameters** | `p *Pod` – the pod instance on which the method is invoked. | +| **Return value** | `bool` – `true` if the pod meets the guaranteed criteria; otherwise `false`. | +| **Key dependencies** | Calls `AreResourcesIdentical(p)` from the same package to perform the actual resource comparison. | +| **Side effects** | None; purely functional, no state mutation or I/O. | +| **How it fits the package** | Provides a concise API for other components (e.g., test environments) to filter pods based on QoS guarantees without exposing internal logic. | + +#### Internal workflow + +```mermaid +flowchart TD + Pod.IsPodGuaranteed --> AreResourcesIdentical +``` + +#### Function dependencies + +```mermaid +graph TD + func_Pod.IsPodGuaranteed --> func_AreResourcesIdentical +``` + +#### Functions calling `Pod.IsPodGuaranteed` + +```mermaid +graph TD + func_TestEnvironment.GetGuaranteedPods --> func_Pod.IsPodGuaranteed + func_TestEnvironment.GetNonGuaranteedPods --> func_Pod.IsPodGuaranteed +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.IsPodGuaranteed +p := &provider.Pod{ /* populate pod fields */ } +if p.IsPodGuaranteed() { + fmt.Println("This pod is guaranteed QoS.") +} else { + fmt.Println("This pod is not guaranteed QoS.") +} +``` + +--- + +### Pod.IsPodGuaranteedWithExclusiveCPUs + +**IsPodGuaranteedWithExclusiveCPUs** - Checks that all containers in a pod request and limit the same integer number of CPUs (no fractional milli‑CPU values). + +Determines whether a pod’s CPU resources are fully specified, integer‑valued and match requests with limits for every container, implying guaranteed exclusive CPU usage. + +```go +func (p *Pod) IsPodGuaranteedWithExclusiveCPUs() bool +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks that all containers in a pod request and limit the same integer number of CPUs (no fractional milli‑CPU values). | +| **Parameters** | `p` – pointer to the Pod instance invoking the method. | +| **Return value** | `bool`: `true` if every container satisfies whole‑unit CPU requests/limits and those values are identical; otherwise `false`. | +| **Key dependencies** | *Calls `AreCPUResourcesWholeUnits(p)` to verify integer CPU specifications.
* Calls `AreResourcesIdentical(p)` to ensure request equals limit. | +| **Side effects** | None; purely read‑only evaluation. | +| **How it fits the package** | Provides a quick check for pods that are guaranteed exclusive CPUs, used by higher‑level filtering functions in the provider package. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Pod.IsPodGuaranteedWithExclusiveCPUs"] --> B{"AreCPUResourcesWholeUnits(p)"} + B -- true --> C{"AreResourcesIdentical(p)"} + C -- true --> D["Return true"] + B -- false --> E["Return false"] + C -- false --> E +``` + +#### Function dependencies + +```mermaid +graph TD + func_Pod.IsPodGuaranteedWithExclusiveCPUs --> func_AreCPUResourcesWholeUnits + func_Pod.IsPodGuaranteedWithExclusiveCPUs --> func_AreResourcesIdentical +``` + +#### Functions calling `Pod.IsPodGuaranteedWithExclusiveCPUs` + +```mermaid +graph TD + func_TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs --> func_Pod.IsPodGuaranteedWithExclusiveCPUs + func_TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs --> func_Pod.IsPodGuaranteedWithExclusiveCPUs +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.IsPodGuaranteedWithExclusiveCPUs +p := &provider.Pod{ /* populate pod fields */ } +if p.IsPodGuaranteedWithExclusiveCPUs() { + fmt.Println("This pod is guaranteed exclusive CPUs.") +} +``` + +--- + +### Pod.IsRunAsUserID + +**IsRunAsUserID** - Determines whether the `Pod`’s security context is configured to run as a specific user ID. + +#### 1) Signature (Go) + +```go +func (p *Pod) IsRunAsUserID(uid int64) bool +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether the `Pod`’s security context is configured to run as a specific user ID. | +| **Parameters** | `uid int64` – The user ID to compare against the pod’s `RunAsUser`. | +| **Return value** | `bool` – `true` if the pod’s `RunAsUser` equals `uid`; otherwise `false`. | +| **Key dependencies** | *None* – uses only standard library types (`int64`, `bool`). | +| **Side effects** | None. The function performs a read‑only check on the pod object. | +| **How it fits the package** | Part of the `provider` package’s pod utilities, enabling callers to validate security context settings. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive *Pod instance"] --> B{"Check Spec.SecurityContext"} + B -- nil or missing RunAsUser --> C["Return false"] + B -- present RunAsUser --> D["Compare with uid"] + D --> E["Return comparison result"] +``` + +#### 4) Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 5) Functions calling `Pod.IsRunAsUserID` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Pod.IsRunAsUserID +p := &provider.Pod{ + Spec: provider.PodSpec{ + SecurityContext: &provider.SecurityContext{ + RunAsUser: int64Ptr(1000), + }, + }, +} + +if p.IsRunAsUserID(1000) { + fmt.Println("Pod runs as user 1000") +} else { + fmt.Println("Pod does not run as user 1000") +} +``` + +*(Assumes a helper `int64Ptr` that returns a pointer to an int64.)* + +--- + +### Pod.IsRuntimeClassNameSpecified + +**IsRuntimeClassNameSpecified** - Determines if the pod’s spec contains a non‑nil `RuntimeClassName`, indicating that a runtime class has been specified. + +Checks whether a Kubernetes Pod has the `runtimeClassName` field set in its specification. + +```go +func (p *Pod) IsRuntimeClassNameSpecified() bool +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if the pod’s spec contains a non‑nil `RuntimeClassName`, indicating that a runtime class has been specified. | +| **Parameters** | `p *Pod` – the Pod instance on which the method is invoked. | +| **Return value** | `bool` – `true` when `p.Spec.RuntimeClassName != nil`; otherwise `false`. | +| **Key dependencies** | • None (direct field access). | +| **Side effects** | None; purely read‑only operation. | +| **How it fits the package** | Provides a lightweight check used by higher‑level compliance functions, e.g., CPU isolation verification. | + +```mermaid +flowchart TD + Pod --> IsRuntimeClassNameSpecified +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + Note["No external calls"] +``` + +#### Functions calling `Pod.IsRuntimeClassNameSpecified` + +```mermaid +graph TD + Pod.IsCPUIsolationCompliant --> Pod.IsRuntimeClassNameSpecified +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.IsRuntimeClassNameSpecified +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + pod := provider.Pod{ /* assume Spec is populated elsewhere */ } + if pod.IsRuntimeClassNameSpecified() { + fmt.Println("runtimeClassName is set") + } else { + fmt.Println("runtimeClassName is not specified") + } +} +``` + +--- + +### Pod.IsShareProcessNamespace + +**IsShareProcessNamespace** - Determines if a pod is configured to share its process namespace with other pods in the same pod. This is used for selecting pods that have `shareProcessNamespace: true` set in their spec. + +Checks whether the pod’s specification enables sharing of the process namespace. + +--- + +#### Signature (Go) + +```go +func (p *Pod) IsShareProcessNamespace() bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if a pod is configured to share its process namespace with other pods in the same pod. This is used for selecting pods that have `shareProcessNamespace: true` set in their spec. | +| **Parameters** | *None* – operates on the receiver `p *Pod`. | +| **Return value** | `bool` – `true` if `ShareProcessNamespace` is non‑nil and dereferenced to `true`; otherwise `false`. | +| **Key dependencies** | - Accesses `p.Spec.ShareProcessNamespace`, a pointer to a boolean. | +| **Side effects** | None; purely reads state without modifying it or performing I/O. | +| **How it fits the package** | Provides a convenient helper for other components (e.g., test environment filtering) to quickly query this pod feature. | + +--- + +#### Internal workflow + +```mermaid +flowchart TD + A["Pod.IsShareProcessNamespace"] --> B{"p.Spec.ShareProcessNamespace != nil"} + B -- true --> C["*p.Spec.ShareProcessNamespace == true?"] + C -- true --> D["Return true"] + C -- false --> E["Return false"] + B -- false --> F["Return false"] +``` + +--- + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Functions calling `Pod.IsShareProcessNamespace` + +```mermaid +graph TD + func_TestEnvironment_GetShareProcessNamespacePods --> func_Pod_IsShareProcessNamespace +``` + +*`TestEnvironment.GetShareProcessNamespacePods` iterates over all pods and selects those where this function returns `true`.* + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.IsShareProcessNamespace +pod := &provider.Pod{ + Spec: provider.PodSpec{ + ShareProcessNamespace: ptr.Bool(true), + }, +} + +if pod.IsShareProcessNamespace() { + fmt.Println("This pod shares its process namespace.") +} +``` + +*In the example, `ptr.Bool` is a helper that returns a pointer to the given boolean value.* + +--- + +### Pod.IsUsingClusterRoleBinding + +**IsUsingClusterRoleBinding** - Checks whether the pod’s service account is referenced as a subject in any provided `ClusterRoleBinding`. Returns a flag, the name of the bound role, and an error if logging fails. + +#### Signature (Go) + +```go +func (p *Pod) IsUsingClusterRoleBinding(clusterRoleBindings []rbacv1.ClusterRoleBinding, logger *log.Logger) (bool, string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the pod’s service account is referenced as a subject in any provided `ClusterRoleBinding`. Returns a flag, the name of the bound role, and an error if logging fails. | +| **Parameters** | `clusterRoleBindings []rbacv1.ClusterRoleBinding` – list of cluster‑role bindings to examine.
`logger *log.Logger` – logger for informational and error output. | +| **Return value** | `bool` – true if a binding is found; otherwise false.
`string` – name of the bound role (empty if none).
`error` – any error encountered during logging (rare, but captured). | +| **Key dependencies** | • `logger.Info` – logs pod identification.
• `logger.Error` – reports a match. | +| **Side effects** | Emits log messages; no state mutation on the pod or bindings. | +| **How it fits the package** | Part of the `provider` package’s pod inspection utilities, enabling security checks that ensure pods are not granted cluster‑wide permissions via their service accounts. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Log pod info"] + B --> C{"Iterate over ClusterRoleBindings"} + C -->|"found subject"| D["Check if subject matches SA & namespace"] + D -->|"match"| E["Log error, return true + role name"] + D -->|"no match"| F["Continue loop"] + F --> C + C --> G["Return false, empty string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Pod.IsUsingClusterRoleBinding --> func_Info + func_Pod.IsUsingClusterRoleBinding --> func_Error +``` + +#### Functions calling `Pod.IsUsingClusterRoleBinding` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.IsUsingClusterRoleBinding +import ( + "log" + rbacv1 "k8s.io/api/rbac/v1" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +// Assume p is an initialized *provider.Pod and crbs is a slice of ClusterRoleBindings. +logger := log.New(os.Stdout, "", log.LstdFlags) +usingCRB, roleName, err := p.IsUsingClusterRoleBinding(crbs, logger) +if err != nil { + // handle logging error +} +fmt.Printf("Uses cluster‑role binding: %v (role: %s)\n", usingCRB, roleName) +``` + +--- + +### Pod.IsUsingSRIOV + +**IsUsingSRIOV** - Returns `true` when at least one of the pod’s attached networks is configured as SR‑I/O‑V. It inspects the pod’s CNCF CNI annotation and checks each referenced NetworkAttachmentDefinition (NAD). + +#### 1) Signature (Go) + +```go +func (p *Pod) IsUsingSRIOV() (bool, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` when at least one of the pod’s attached networks is configured as SR‑I/O‑V. It inspects the pod’s CNCF CNI annotation and checks each referenced NetworkAttachmentDefinition (NAD). | +| **Parameters** | *receiver* `p *Pod` – the pod to inspect. | +| **Return value** | `(bool, error)` – `true` if an SR‑I/O‑V network is found; otherwise `false`. An error is returned if any API call or JSON parsing fails. | +| **Key dependencies** | • `getCNCFNetworksNamesFromPodAnnotation`
• `clientsholder.GetClientsHolder()`
• `oc.CNCFNetworkingClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(...).Get(...)`
• `isNetworkAttachmentDefinitionConfigTypeSRIOV` | +| **Side effects** | None. The function only reads pod annotations, queries the Kubernetes API and performs in‑memory checks; no state is mutated. | +| **How it fits the package** | Within the `provider` package, this method lets higher‑level logic (e.g., test environments) filter pods that use SR‑I/O‑V networking for targeted validation or reporting. | + +#### 3) Internal workflow + +```mermaid +flowchart TD + A["Check pod annotation k8s.v1.cni.cncf.io/networks"] -->|"exists?"| B{"Annotation present"} + B -- No --> C["Return false"] + B -- Yes --> D["Parse network names with getCNCFNetworksNamesFromPodAnnotation"] + D --> E["Acquire Kubernetes clients via clientsholder.GetClientsHolder()"] + E --> F{"Iterate over each network name"} + F --> G["Retrieve NAD using K8sCniCncfIoV1().NetworkAttachmentDefinitions(namespace).Get(...)"] + G --> H["Determine if NAD config type is sriov via isNetworkAttachmentDefinitionConfigTypeSRIOV"] + H -- true --> I["Return true"] + H -- false --> F + I --> J["End"] +``` + +#### 4) Function dependencies + +```mermaid +graph TD + func_Pod.IsUsingSRIOV --> func_getCNCFNetworksNamesFromPodAnnotation + func_Pod.IsUsingSRIOV --> func_clientsholder.GetClientsHolder + func_Pod.IsUsingSRIOV --> func_K8sCniCncfIoV1_NetworkAttachmentDefinitions_Get + func_Pod.IsUsingSRIOV --> func_isNetworkAttachmentDefinitionConfigTypeSRIOV +``` + +#### 5) Functions calling `Pod.IsUsingSRIOV` + +```mermaid +graph TD + func_TestEnvironment.GetPodsUsingSRIOV --> func_Pod.IsUsingSRIOV +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Pod.IsUsingSRIOV +p := &provider.Pod{ + Namespace: "default", + Annotations: map[string]string{"k8s.v1.cni.cncf.io/networks": `["net-eth0","net-sriov"]`}, +} + +usesSRIOV, err := p.IsUsingSRIOV() +if err != nil { + log.Fatalf("error checking SR‑I/O‑V usage: %v", err) +} +fmt.Printf("Pod uses SR‑I/O‑V: %t\n", usesSRIOV) +``` + +--- + +### Pod.IsUsingSRIOVWithMTU + +**IsUsingSRIOVWithMTU** - Returns `true` when any network attachment of the pod is an SR‑IOV type and its MTU has been set via the corresponding `SriovNetworkNodePolicy`. + +#### Signature (Go) + +```go +func (p *Pod) IsUsingSRIOVWithMTU() (bool, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` when any network attachment of the pod is an SR‑IOV type and its MTU has been set via the corresponding `SriovNetworkNodePolicy`. | +| **Parameters** | `p *Pod` – receiver containing annotations, namespace, etc. | +| **Return value** | `(bool, error)` – boolean flag and potential lookup errors. | +| **Key dependencies** | • `getCNCFNetworksNamesFromPodAnnotation`
• `clientsholder.GetClientsHolder()`
• K8s CNI client (`K8sCniCncfIoV1().NetworkAttachmentDefinitions`)
• `env.AllSriovNetworks`, `env.AllSriovNetworkNodePolicies`
• `sriovNetworkUsesMTU` | +| **Side effects** | Reads pod annotations, queries Kubernetes API for NetworkAttachmentDefinitions, logs debug messages; no state mutation. | +| **How it fits the package** | Part of the provider’s pod analysis utilities; used to assess network configuration compliance and detect SR‑IOV MTU usage in pods. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"cncfNetworks annotation exists?"} + B -- No --> C["Return false, nil"] + B -- Yes --> D["Parse CNCF network names"] + D --> E["Get ClientsHolder"] + E --> F{"For each networkName"} + F --> G["Retrieve NetworkAttachmentDefinition"] + G --> H{"sriovNetworkUsesMTU?"} + H -- True --> I["Return true, nil"] + H -- False --> J["Continue loop"] + J --> F + F --> K["End loop"] --> L["Return false, nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Pod.IsUsingSRIOVWithMTU --> getCNCFNetworksNamesFromPodAnnotation + func_Pod.IsUsingSRIOVWithMTU --> clientsholder.GetClientsHolder + func_Pod.IsUsingSRIOVWithMTU --> K8sCniCncfIoV1.NetworkAttachmentDefinitions + func_Pod.IsUsingSRIOVWithMTU --> sriovNetworkUsesMTU +``` + +#### Functions calling `Pod.IsUsingSRIOVWithMTU` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.IsUsingSRIOVWithMTU +pod := &provider.Pod{ + Namespace: "default", + Annotations: map[string]string{"k8s.v1.cni.cncf.io/networks": `["nvidia-network"]`}, +} + +usingMTU, err := pod.IsUsingSRIOVWithMTU() +if err != nil { + log.Fatalf("error checking SR‑IOV MTU usage: %v", err) +} +fmt.Printf("Pod uses SR‑IOV with MTU set? %t\n", usingMTU) +``` + +--- + +### Pod.String + +**String** - Provide a concise string representation of a `Pod`, useful for logging and debugging. + +The method returns a human‑readable description of the pod, including its name and namespace. + +#### Signature (Go) + +```go +func (p *Pod) String() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provide a concise string representation of a `Pod`, useful for logging and debugging. | +| **Parameters** | None | +| **Return value** | A formatted string: `"pod: ns: "`. | +| **Key dependencies** | - `fmt.Sprintf` from the standard library. | +| **Side effects** | None – purely functional; no state changes or I/O. | +| **How it fits the package** | Used throughout the provider package wherever a pod needs to be referenced in logs, error messages, or debug output. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Pod.String()"] --> B["fmt.Sprintf(\pod: %s ns: %s\, p.Name, p.Namespace)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Pod_String --> func_fmt_Sprintf +``` + +#### Functions calling `Pod.String` (Mermaid) + +```mermaid +graph TD + func_AreCPUResourcesWholeUnits --> func_Pod_String + func_AreResourcesIdentical --> func_Pod_String + func_IsAffinityCompliant --> func_Pod_String +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Pod.String +p := &Pod{ + Name: "my-pod", + Namespace: "default", +} +fmt.Println(p.String()) // Output: pod: my-pod ns: default +``` + +--- + +### StatefulSet.IsStatefulSetReady + +**IsStatefulSetReady** - Determines if the StatefulSet’s current status matches the specified number of replicas. It ensures that all replicas are ready, running, and updated. + +Checks whether a StatefulSet instance has reached its desired replica state and is fully operational. + +```go +func (ss *StatefulSet) IsStatefulSetReady() bool +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if the StatefulSet’s current status matches the specified number of replicas. It ensures that all replicas are ready, running, and updated. | +| **Parameters** | None (receiver `ss *StatefulSet`) – operates on the instance itself. | +| **Return value** | `bool` – `true` when all replica counts match; otherwise `false`. | +| **Key dependencies** | Uses fields from `ss.Spec.Replicas`, `ss.Status.ReadyReplicas`, `ss.Status.CurrentReplicas`, and `ss.Status.UpdatedReplicas`. | +| **Side effects** | None – purely read‑only status inspection. | +| **How it fits the package** | Provides a quick health check for StatefulSets within the provider’s lifecycle management. | + +```mermaid +flowchart TD + subgraph "Determine target replicas" + A["Check ss.Spec.Replicas"] --> B{"Is nil?"} + B -- Yes --> C["replicas = 1"] + B -- No --> D["replicas = *ss.Spec.Replicas"] + end + + subgraph "Validate status against target" + E["Compare ss.Status.ReadyReplicas with replicas"] --> F{"Matches?"} + G["Compare ss.Status.CurrentReplicas with replicas"] --> H{"Matches?"} + I["Compare ss.Status.UpdatedReplicas with replicas"] --> J{"Matches?"} + F -- No --> K["Return false"] + H -- No --> K + J -- No --> K + F -- Yes --> L + G -- Yes --> M + I -- Yes --> N + L --> O{"All true?"} + O -- Yes --> P["Return true"] + end +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_StatefulSet_IsStatefulSetReady +``` + +#### Functions calling `StatefulSet.IsStatefulSetReady` + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_StatefulSet_IsStatefulSetReady +``` + +```go +// Minimal example invoking StatefulSet.IsStatefulSetReady +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + ss := &provider.StatefulSet{ + Spec: provider.StatefulSetSpec{Replicas: nil}, + Status: provider.StatefulSetStatus{ + ReadyReplicas: 1, + CurrentReplicas: 1, + UpdatedReplicas: 1, + }, + } + fmt.Println("Is ready:", ss.IsStatefulSetReady()) +} +``` + +--- + +### StatefulSet.ToString + +**ToString** - Produces a human‑readable representation of a StatefulSet, including its name and namespace. + +#### Signature (Go) + +```go +func (ss *StatefulSet) ToString() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a human‑readable representation of a StatefulSet, including its name and namespace. | +| **Parameters** | `ss` – pointer to the `StatefulSet` instance whose data is formatted. | +| **Return value** | A string in the form `"statefulset: ns: "`. | +| **Key dependencies** | • `fmt.Sprintf` from the standard library. | +| **Side effects** | None; the function only reads state and returns a new string. | +| **How it fits the package** | Provides a convenient, readable description for logging or debugging within the `provider` package’s StatefulSet handling logic. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Retrieve ss.Name"] + B --> C["Retrieve ss.Namespace"] + C --> D["Call fmt.Sprintf(\statefulset: %s ns: %s\, ss.Name, ss.Namespace)"] + D --> E["Return formatted string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_StatefulSet.ToString --> func_fmt.Sprintf +``` + +#### Functions calling `StatefulSet.ToString` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking StatefulSet.ToString +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + ss := &provider.StatefulSet{ + Name: "my-statefulset", + Namespace: "default", + } + fmt.Println(ss.ToString()) // prints: statefulset: my-statefulset ns: default +} +``` + +--- + +### TestEnvironment.GetAffinityRequiredPods + +**GetAffinityRequiredPods** - Returns a slice of `*Pod` objects from the test environment that have an affinity requirement. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetAffinityRequiredPods() []*Pod +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a slice of `*Pod` objects from the test environment that have an affinity requirement. | +| **Parameters** | `env *TestEnvironment` – receiver containing the collection of pods to filter. | +| **Return value** | `[]*Pod` – the filtered list of pods requiring affinity. | +| **Key dependencies** | • Calls `p.AffinityRequired()` on each pod.
• Uses the built‑in `append` function to build the result slice. | +| **Side effects** | None; purely functional, no mutation or I/O beyond reading existing state. | +| **How it fits the package** | Provides a helper for tests and filters to quickly access pods that must be scheduled with affinity constraints. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + start(Start) --> loop["For each pod in env.Pods"] + loop --> check{"p.AffinityRequired?"} + check -- Yes --> add["append to filteredPods"] + check -- No --> skip["skip"] + add --> loop + skip --> loop + loop --> endNode["Return filteredPods"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetAffinityRequiredPods --> func_Pod.AffinityRequired + func_TestEnvironment.GetAffinityRequiredPods --> append +``` + +#### Functions calling `TestEnvironment.GetAffinityRequiredPods` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetAffinityRequiredPods +env := &provider.TestEnvironment{ + Pods: []*provider.Pod{ + {Labels: map[string]string{"affinity-required": "true"}}, + {Labels: map[string]string{}}, + }, +} + +affinityPods := env.GetAffinityRequiredPods() +fmt.Printf("Found %d pods requiring affinity\n", len(affinityPods)) +``` + +--- + +### TestEnvironment.GetBaremetalNodes + +**GetBaremetalNodes** - Filters the `TestEnvironment.Nodes` slice and returns only those whose provider ID indicates a bare‑metal host (`"baremetalhost://"` prefix). + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetBaremetalNodes() []Node +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Filters the `TestEnvironment.Nodes` slice and returns only those whose provider ID indicates a bare‑metal host (`"baremetalhost://"` prefix). | +| **Parameters** | None (receiver `env *TestEnvironment`) | +| **Return value** | `[]Node`: slice containing all nodes that match the bare‑metal predicate. | +| **Key dependencies** | • `strings.HasPrefix` – checks the provider ID prefix.
• Built‑in `append` – builds the result slice. | +| **Side effects** | None; purely functional and read‑only on `env`. | +| **How it fits the package** | Provides a convenient accessor for tests that need to operate only on bare‑metal nodes, abstracting away the prefix logic from callers. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> Init["Create empty slice"] + Init --> Loop["Iterate over env.Nodes"] + Loop --> Check{"HasPrefix(node.Data.Spec.ProviderID, baremetalhost://)"} + Check -- Yes --> Append["Append node to baremetalNodes"] + Check -- No --> Skip["Do nothing"] + Loop --> End["Return baremetalNodes"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetBaremetalNodes --> strings.HasPrefix + func_TestEnvironment.GetBaremetalNodes --> append +``` + +#### Functions calling `TestEnvironment.GetBaremetalNodes` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetBaremetalNodes + +env := &provider.TestEnvironment{ + Nodes: []provider.Node{ /* populate with test nodes */ }, +} + +baremetalNodes := env.GetBaremetalNodes() +fmt.Printf("Found %d bare‑metal node(s)\n", len(baremetalNodes)) +``` + +--- + +--- + +### TestEnvironment.GetCPUPinningPodsWithDpdk + +**GetCPUPinningPodsWithDpdk** - Retrieves all pods in the test environment that have CPU pinning enabled and are using DPDK for networking. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetCPUPinningPodsWithDpdk() []*Pod +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Retrieves all pods in the test environment that have CPU pinning enabled and are using DPDK for networking. | +| **Parameters** | *none* – operates on the receiver `env`. | +| **Return value** | `[]*Pod` – a slice of pointers to `Pod` structs that satisfy both CPU‑pinning and DPDK conditions. | +| **Key dependencies** | • Calls `filterDPDKRunningPods(pods []*Pod) []*Pod`
• Uses `env.GetGuaranteedPodsWithExclusiveCPUs()` internally via the called helper | +| **Side effects** | No state mutation or external I/O; purely computes a filtered slice. | +| **How it fits the package** | Provides a high‑level filter for tests that need to target pods with exclusive CPU resources and DPDK networking, building on lower‑level filtering helpers. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["TestEnvironment"] --> B["GetGuaranteedPodsWithExclusiveCPUs"] + B --> C["filterDPDKRunningPods"] + C --> D["Return filtered pods"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetCPUPinningPodsWithDpdk --> func_filterDPDKRunningPods +``` + +#### Functions calling `TestEnvironment.GetCPUPinningPodsWithDpdk` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetCPUPinningPodsWithDpdk +env := &provider.TestEnvironment{ /* populate env.Pods as needed */ } +dpdkPinnedPods := env.GetCPUPinningPodsWithDpdk() +fmt.Printf("Found %d DPDK‑enabled, CPU‑pinned pods\n", len(dpdkPinnedPods)) +``` + +--- + +### TestEnvironment.GetDockerConfigFile + +**GetDockerConfigFile** - Returns the file path to the Docker configuration JSON used for preflight checks. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetDockerConfigFile() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the file path to the Docker configuration JSON used for preflight checks. | +| **Parameters** | `env` – receiver of type `*TestEnvironment`. | +| **Return value** | `string` – the Docker config file path stored in the environment parameters (`PlfDockerconfig`). | +| **Key dependencies** | * None (directly accesses a struct field). | +| **Side effects** | * None. The function is pure. | +| **How it fits the package** | Used by container and operator preflight helpers to supply authentication credentials when interacting with Docker registries. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["GetDockerConfigFile"] --> B["Return env.params.PfltDockerconfig"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `TestEnvironment.GetDockerConfigFile` (Mermaid) + +```mermaid +graph TD + Container.SetPreflightResults --> TestEnvironment.GetDockerConfigFile + Operator.SetPreflightResults --> TestEnvironment.GetDockerConfigFile +``` + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetDockerConfigFile +env := &TestEnvironment{ + params: struct{ PlfDockerconfig string }{PlfDockerconfig: "/path/to/docker/config.json"}, +} +dockerCfgPath := env.GetDockerConfigFile() +fmt.Println("Docker config file:", dockerCfgPath) +``` + +--- + +### TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUs + +**GetGuaranteedPodContainersWithExclusiveCPUs** - Returns a slice of `*Container` objects that belong to pods having exclusive CPU guarantees within the test environment. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUs() []*Container +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a slice of `*Container` objects that belong to pods having exclusive CPU guarantees within the test environment. | +| **Parameters** | `env *TestEnvironment` – receiver; no explicit arguments. | +| **Return value** | `[]*Container` – list of containers from guaranteed‑CPU pods. | +| **Key dependencies** | • Calls helper `getContainers(pods []*Pod) []*Container`.
• Calls method `GetGuaranteedPodsWithExclusiveCPUs()` on the same environment. | +| **Side effects** | None; purely functional and read‑only. | +| **How it fits the package** | Provides a convenient API for filtering containers that are part of pods with exclusive CPU guarantees, used by provider filters and tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["TestEnvironment"] --> B["GetGuaranteedPodsWithExclusiveCPUs()"] + B --> C["getContainers"] + C --> D["*Container slice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + A["TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUs()"] --> B["getContainers"] + A --> C["GetGuaranteedPodsWithExclusiveCPUs()"] +``` + +#### Functions calling `TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUs` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUs +env := &provider.TestEnvironment{ + Pods: []*provider.Pod{ /* pods initialized here */ }, +} + +containers := env.GetGuaranteedPodContainersWithExclusiveCPUs() +fmt.Printf("Found %d containers with exclusive CPUs\n", len(containers)) +``` + +--- + +### TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID + +**GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID** - Returns a slice of `*Container` objects belonging to pods that are guaranteed to use exclusive CPUs and have the host PID feature disabled. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID() []*Container +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a slice of `*Container` objects belonging to pods that are guaranteed to use exclusive CPUs and have the host PID feature disabled. | +| **Parameters** | None (receiver: `env *TestEnvironment`) | +| **Return value** | `[]*Container`: list of containers meeting the above criteria. | +| **Key dependencies** | • `TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs()` – obtains pods that are guaranteed with exclusive CPUs.
• `filterPodsWithoutHostPID(pods []*Pod)` – filters out pods where `Spec.HostPID` is true.
• `getContainers(pods []*Pod)` – flattens a list of pods into their constituent containers. | +| **Side effects** | None. The function performs read‑only operations on the test environment data structures. | +| **How it fits the package** | Part of the `provider` package’s filtering utilities; it supplies container sets for tests that require exclusive CPU guarantees without host PID interference. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["TestEnvironment"] --> B["GetGuaranteedPodsWithExclusiveCPUs"] + B --> C["filterPodsWithoutHostPID"] + C --> D["getContainers"] + D --> E["*Container Slice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID --> func_GetGuaranteedPodsWithExclusiveCPUs + func_TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID --> func_filterPodsWithoutHostPID + func_TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID --> func_getContainers +``` + +#### Functions calling `TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID +env := &provider.TestEnvironment{ + Pods: []*provider.Pod{ /* pre‑populated pods */ }, +} + +containers := env.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID() +fmt.Printf("Found %d containers that are guaranteed with exclusive CPUs and have HostPID disabled.\n", len(containers)) +``` + +--- + +### TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID + +**GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID** - Returns a slice of `*Container` objects belonging to pods that are guaranteed, use isolated CPUs, and do not set the HostPID flag. + +#### 1) Title + +**GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID** – Retrieve containers from guaranteed pods that isolate CPUs and have no HostPID flag. + +#### 2) Signature (Go) + +```go +func (env *TestEnvironment) GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID() []*Container +``` + +#### 3) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a slice of `*Container` objects belonging to pods that are guaranteed, use isolated CPUs, and do not set the HostPID flag. | +| **Parameters** | *None* (receiver `env *TestEnvironment`) | +| **Return value** | `[]*Container`: containers meeting the above criteria | +| **Key dependencies** | • Calls `getContainers` to flatten pod container lists.
• Calls `filterPodsWithoutHostPID` to exclude pods with HostPID enabled.
• Calls `TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs` to obtain the initial set of guaranteed, CPU‑isolated pods. | +| **Side effects** | None – purely functional; no mutation or I/O. | +| **How it fits the package** | Part of the `provider` filter utilities that provide high‑level queries over a test environment’s pod collection. | + +#### 4) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["TestEnvironment"] --> B["GetGuaranteedPodsWithIsolatedCPUs"] + B --> C["filterPodsWithoutHostPID"] + C --> D["getContainers"] + D --> E["*Container slice returned"] +``` + +#### 5) Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID --> func_getContainers + func_TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID --> func_filterPodsWithoutHostPID + func_TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID --> func_TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs +``` + +#### 6) Functions calling `TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 7) Usage example (Go) + +```go +// Minimal example invoking GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID +env := &provider.TestEnvironment{ + Pods: []*provider.Pod{ /* pods populated here */ }, +} + +containers := env.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID() +fmt.Printf("Found %d qualifying containers\n", len(containers)) +``` + +--- + +### TestEnvironment.GetGuaranteedPods + +**GetGuaranteedPods** - Filters and returns all pods in the test environment that satisfy the “guaranteed” criteria defined by `Pod.IsPodGuaranteed`. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetGuaranteedPods() []*Pod +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Filters and returns all pods in the test environment that satisfy the “guaranteed” criteria defined by `Pod.IsPodGuaranteed`. | +| **Parameters** | None. | +| **Return value** | A slice of pointers to `Pod` (`[]*Pod`) containing only the guaranteed pods. | +| **Key dependencies** | • `Pod.IsPodGuaranteed()` – checks if a pod meets guarantee conditions.
• `append` – collects qualifying pods into a result slice. | +| **Side effects** | None; purely functional, no mutation of the environment or I/O. | +| **How it fits the package** | Provides a convenient accessor for other components (e.g., filters, tests) to work with only guaranteed pods within the `provider` package. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Pods"} + B -->|"p.IsPodGuaranteed() == true"| C["Append p to filteredPods"] + B -->|"false"| D["Skip"] + C & D --> E["Continue loop"] + E --> F{"End of list?"} + F -- No --> B + F -- Yes --> G["Return filteredPods"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetGuaranteedPods --> func_Pod.IsPodGuaranteed + func_TestEnvironment.GetGuaranteedPods --> func_append +``` + +#### Functions calling `TestEnvironment.GetGuaranteedPods` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetGuaranteedPods +env := &provider.TestEnvironment{ + Pods: []*provider.Pod{ /* pods initialized here */ }, +} + +guaranteedPods := env.GetGuaranteedPods() +fmt.Printf("Found %d guaranteed pods\n", len(guaranteedPods)) +``` + +--- + +### TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs + +**GetGuaranteedPodsWithExclusiveCPUs** - Filters the `TestEnvironment`’s pod list to include only those pods that are guaranteed to have exclusive CPUs. + +#### 1) Signature (Go) + +```go +func (env *TestEnvironment) GetGuaranteedPodsWithExclusiveCPUs() []*Pod +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Filters the `TestEnvironment`’s pod list to include only those pods that are guaranteed to have exclusive CPUs. | +| **Parameters** | None | +| **Return value** | `[]*Pod` – a slice of pointers to `Pod` objects satisfying the exclusivity criteria. | +| **Key dependencies** | • `Pod.IsPodGuaranteedWithExclusiveCPUs()`
• Built‑in `append` function | +| **Side effects** | None; purely functional and read‑only on the environment’s pod list. | +| **How it fits the package** | Supplies a foundational set of pods used by other query functions (e.g., CPU pinning, container extraction). | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + env.Pods --> forEachPod["For each pod `p` in `env.Pods`"] + forEachPod --> check["p.IsPodGuaranteedWithExclusiveCPUs()"] + check -- Yes --> append["filteredPods = append(filteredPods, p)"] + check -- No --> skip["continue"] + filteredPods --> return["Return `filteredPods`"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs --> func_Pod.IsPodGuaranteedWithExclusiveCPUs + func_TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs --> append +``` + +#### 5) Functions calling `TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs` (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetCPUPinningPodsWithDpdk --> func_TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs + func_TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUs --> func_TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs + func_TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID --> func_TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs +env := &TestEnvironment{ /* populate env.Pods as needed */ } +guaranteedPods := env.GetGuaranteedPodsWithExclusiveCPUs() +fmt.Printf("Found %d guaranteed pods with exclusive CPUs\n", len(guaranteedPods)) +``` + +--- + +### TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs + +**GetGuaranteedPodsWithIsolatedCPUs** - Filters the environment’s pod list to return only those that are guaranteed to run on exclusive CPU units and comply with CPU‑isolation requirements. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetGuaranteedPodsWithIsolatedCPUs() []*Pod +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Filters the environment’s pod list to return only those that are guaranteed to run on exclusive CPU units and comply with CPU‑isolation requirements. | +| **Parameters** | `env *TestEnvironment` – receiver containing the full set of pods (`env.Pods`). | +| **Return value** | `[]*Pod` – slice of pointers to pods satisfying both guarantees and isolation checks. | +| **Key dependencies** | • `Pod.IsPodGuaranteedWithExclusiveCPUs()` – verifies CPU resources are whole units and identical.
• `Pod.IsCPUIsolationCompliant()` – ensures annotations, runtime class name, and load‑balancing settings support isolation.
• Go’s built‑in `append` to build the result slice. | +| **Side effects** | None; purely functional – no state mutation or I/O. | +| **How it fits the package** | Provides a core filtering utility used by higher‑level selectors (e.g., `GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID`) to isolate pods that meet strict CPU isolation policies for testing and validation purposes. | + +#### Internal workflow + +```mermaid +flowchart TD + env.Pods -->|"iterate"| podLoop + podLoop -->|"check IsPodGuaranteedWithExclusiveCPUs & IsCPUIsolationCompliant"| conditionCheck + conditionCheck -- true --> addToResult + addToResult --> appendToSlice + conditionCheck -- false --> skip + skip --> nextIteration + nextIteration -->|"end?"| endProcess +``` + +#### Function dependencies + +```mermaid +graph TD + func_TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs --> func_Pod.IsPodGuaranteedWithExclusiveCPUs + func_TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs --> func_Pod.IsCPUIsolationCompliant + func_TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs --> append +``` + +#### Functions calling `TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs` + +```mermaid +graph TD + func_TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID --> func_TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs +``` + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs +env := &provider.TestEnvironment{ + Pods: []*provider.Pod{ /* ... populate pod list ... */ }, +} +guaranteedPods := env.GetGuaranteedPodsWithIsolatedCPUs() +for _, p := range guaranteedPods { + fmt.Printf("Pod %s meets CPU isolation guarantees.\n", p.Name) +} +``` + +--- + +### TestEnvironment.GetHugepagesPods + +**GetHugepagesPods** - Filters the pods stored in a `TestEnvironment` to return only those that declare huge‑page memory requests or limits. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetHugepagesPods() []*Pod +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Filters the pods stored in a `TestEnvironment` to return only those that declare huge‑page memory requests or limits. | +| **Parameters** | None | +| **Return value** | A slice of pointers to `Pod` objects (`[]*Pod`) that satisfy the huge‑pages condition. | +| **Key dependencies** | • `(*Pod).HasHugepages()` – checks a pod’s containers for huge‑page resources.
• Built‑in `append` function to build the result slice. | +| **Side effects** | No mutation of the environment; purely read‑only traversal and construction of a new slice. | +| **How it fits the package** | In the `provider` package, this helper enables callers (e.g., filters or tests) to quickly obtain only the pods that will affect memory‑related metrics or validations involving huge pages. | + +#### Internal workflow + +```mermaid +flowchart TD + start --> iterate{"Iterate over env.Pods"} + iterate --> check{"p.HasHugepages()"} + check -- Yes --> add["Append to filteredPods"] + add --> iterate + check -- No --> iterate + iterate --> ret["Return filteredPods"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_TestEnvironment.GetHugepagesPods --> func_Pod.HasHugepages + func_TestEnvironment.GetHugepagesPods --> append +``` + +#### Functions calling `TestEnvironment.GetHugepagesPods` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetHugepagesPods +env := &provider.TestEnvironment{ + Pods: []*provider.Pod{ /* … populate pod list … */ }, +} + +hugePodList := env.GetHugepagesPods() +fmt.Printf("Found %d pods with huge pages\n", len(hugePodList)) +``` + +--- + +### TestEnvironment.GetMasterCount + +**GetMasterCount** - Determines how many nodes in the test environment are designated as master (control‑plane) nodes. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetMasterCount() int +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines how many nodes in the test environment are designated as master (control‑plane) nodes. | +| **Parameters** | `env` – pointer to the current `TestEnvironment`. | +| **Return value** | An integer representing the number of control‑plane nodes found. | +| **Key dependencies** | Calls `Node.IsControlPlaneNode()` for each node in `env.Nodes`. | +| **Side effects** | None; performs read‑only traversal and counting. | +| **How it fits the package** | Provides a quick metric used by tests to verify cluster topology (e.g., expecting a specific number of masters). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Nodes"} + B --> C["Check e.IsControlPlaneNode()"] + C -- Yes --> D["Increment masterCount"] + C -- No --> E["Continue loop"] + D --> F["Next node or finish"] + E --> F + F --> G["Return masterCount"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetMasterCount --> func_Node.IsControlPlaneNode +``` + +#### Functions calling `TestEnvironment.GetMasterCount` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetMasterCount +env := &provider.TestEnvironment{ + Nodes: []*provider.Node{ + {Data: provider.NodeData{Labels: map[string]string{"node-role.kubernetes.io/master": ""}}}, + {Data: provider.NodeData{Labels: map[string]string{"worker": ""}}}, + }, +} +masterCount := env.GetMasterCount() +fmt.Printf("Number of master nodes: %d\n", masterCount) +``` + +--- + +### TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID + +**GetNonGuaranteedPodContainersWithoutHostPID** - Returns a slice of `*Container` objects belonging to pods that are not guaranteed and have the `HostPID` feature disabled. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetNonGuaranteedPodContainersWithoutHostPID() []*Container +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a slice of `*Container` objects belonging to pods that are not guaranteed and have the `HostPID` feature disabled. | +| **Parameters** | *env* – the receiver, a pointer to a `TestEnvironment`. | +| **Return value** | `[]*Container` – all containers from qualifying pods. | +| **Key dependencies** | • Calls `getContainers(pods []*Pod) []*Container`
• Calls `filterPodsWithoutHostPID(pods []*Pod) []*Pod`
• Calls `TestEnvironment.GetNonGuaranteedPods()` | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Part of the provider filtering utilities, enabling tests to focus on specific pod/container subsets. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["TestEnvironment"] --> B["GetNonGuaranteedPods"] + B --> C["filterPodsWithoutHostPID"] + C --> D["getContainers"] + D --> E["ReturnContainerSlice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID --> func_getContainers + func_TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID --> func_filterPodsWithoutHostPID + func_TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID --> func_TestEnvironment.GetNonGuaranteedPods +``` + +#### Functions calling `TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID +env := &provider.TestEnvironment{ + Pods: []*provider.Pod{ /* ... populate with test pods ... */ }, +} + +containers := env.GetNonGuaranteedPodContainersWithoutHostPID() +for _, c := range containers { + fmt.Println(“Container name:”, c.Name) +} +``` + +--- + +### TestEnvironment.GetNonGuaranteedPods + +**GetNonGuaranteedPods** - Returns all pods in the environment that are not guaranteed, i.e., whose resource requests do not match limits. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetNonGuaranteedPods() []*Pod +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns all pods in the environment that are not guaranteed, i.e., whose resource requests do not match limits. | +| **Parameters** | `env *TestEnvironment` – receiver containing a slice of all pods (`env.Pods`). | +| **Return value** | `[]*Pod` – slice of pointers to non‑guaranteed pods. | +| **Key dependencies** | • `(*Pod).IsPodGuaranteed()`
• Go built‑in `append` function | +| **Side effects** | None; performs only read operations and constructs a new slice. | +| **How it fits the package** | Provides filtering functionality used by higher‑level query helpers (e.g., container selectors) within the *provider* package. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Pods"} + B -->|"Pod is guaranteed"| C["Skip"] + B -->|"Pod not guaranteed"| D["Append to filteredPods"] + D --> B + B --> E["Return filteredPods"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_TestEnvironment.GetNonGuaranteedPods --> func_Pod.IsPodGuaranteed + func_TestEnvironment.GetNonGuaranteedPods --> func_append +``` + +#### Functions calling `TestEnvironment.GetNonGuaranteedPods` + +```mermaid +graph TD + func_TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID --> func_TestEnvironment.GetNonGuaranteedPods +``` + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetNonGuaranteedPods +env := &provider.TestEnvironment{ + Pods: []*provider.Pod{ /* …populate pods… */ }, +} + +nonGuaranteedPods := env.GetNonGuaranteedPods() +for _, pod := range nonGuaranteedPods { + fmt.Printf("Pod %s is not guaranteed\n", pod.Name) +} +``` + +--- + +### TestEnvironment.GetOfflineDBPath + +**GetOfflineDBPath** - Returns the filesystem location of the offline database used by the test environment. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetOfflineDBPath() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the filesystem location of the offline database used by the test environment. | +| **Parameters** | `env *TestEnvironment` – the receiver instance containing configuration parameters. | +| **Return value** | `string` – the path stored in `env.params.OfflineDB`. | +| **Key dependencies** | • Reads `env.params.OfflineDB` field. | +| **Side effects** | None; purely accessor. | +| **How it fits the package** | Provides a simple API for other components to locate the offline DB without exposing internal struct fields. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["TestEnvironment"] --> B["Return env.params.OfflineDB"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `TestEnvironment.GetOfflineDBPath` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetOfflineDBPath +env := &provider.TestEnvironment{ + params: provider.EnvironmentParams{OfflineDB: "/var/lib/offlinedb"}, +} +offlinePath := env.GetOfflineDBPath() +fmt.Println("Offline DB path:", offlinePath) // Output: /var/lib/offlinedb +``` + +--- + +### TestEnvironment.GetPodsUsingSRIOV + +**GetPodsUsingSRIOV** - Returns all `Pod` instances in the test environment that are configured to use SR‑IOV networking. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetPodsUsingSRIOV() ([]*Pod, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns all `Pod` instances in the test environment that are configured to use SR‑IOV networking. | +| **Parameters** | *None* – operates on the receiver’s internal state (`env.Pods`). | +| **Return value** | `([]*Pod, error)` – a slice of matching pods and an error if any pod’s SR‑IOV check fails. | +| **Key dependencies** | • `Pod.IsUsingSRIOV()` – determines SR‑IOV usage per pod.
• `fmt.Errorf` – formats errors. | +| **Side effects** | None; purely reads state and returns data. | +| **How it fits the package** | Provides a utility for test scenarios that need to isolate pods with SR‑IOV attachments, enabling focused validation or cleanup actions. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + env_Pods["env.Pods"] --> p_IsUsingSRIOV["Pod.IsUsingSRIOV()"] + p_IsUsingSRIOV -- true --> filteredPods["append(p)"] + p_IsUsingSRIOV -- error --> returnError["return nil, fmt.Errorf"] + filteredPods --> returnResult["return(filteredPods, nil)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetPodsUsingSRIOV --> func_Pod.IsUsingSRIOV +``` + +#### Functions calling `TestEnvironment.GetPodsUsingSRIOV` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetPodsUsingSRIOV +env := &provider.TestEnvironment{ + Pods: []*provider.Pod{ /* populate with Pod instances */ }, +} + +sriovPods, err := env.GetPodsUsingSRIOV() +if err != nil { + log.Fatalf("Failed to retrieve SR‑IOV pods: %v", err) +} +fmt.Printf("Found %d pod(s) using SR‑IOV\n", len(sriovPods)) +``` + +--- + +### TestEnvironment.GetPodsWithoutAffinityRequiredLabel + +**GetPodsWithoutAffinityRequiredLabel** - GetPodsWithoutAffinityRequiredLabel returns a slice of Pod objects that do not have the affinity required label. +It iterates over the Pods in the TestEnvironment and filters out the ones that do not have the affinity required label. +The filtered Pods are returned as a slice. + + +**Signature**: `func()([]*Pod)` + +**Purpose**: GetPodsWithoutAffinityRequiredLabel returns a slice of Pod objects that do not have the affinity required label. +It iterates over the Pods in the TestEnvironment and filters out the ones that do not have the affinity required label. +The filtered Pods are returned as a slice. + +**Receiver**: `TestEnvironment` + +--- + +### TestEnvironment.GetShareProcessNamespacePods + +**GetShareProcessNamespacePods** - Returns all `Pod` instances within the environment whose `Spec.ShareProcessNamespace` flag is set to true. + +#### Signature (Go) + +```go +func (env *TestEnvironment) GetShareProcessNamespacePods() []*Pod +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns all `Pod` instances within the environment whose `Spec.ShareProcessNamespace` flag is set to true. | +| **Parameters** | None (receiver `env *TestEnvironment`). | +| **Return value** | A slice of pointers to `Pod` objects (`[]*Pod`) that satisfy the share‑process‑namespace condition. | +| **Key dependencies** | • `(*Pod).IsShareProcessNamespace()` – checks the flag.
• Built‑in `append` – builds the result slice. | +| **Side effects** | No mutation of state; purely reads from `env.Pods`. | +| **How it fits the package** | Provides a filtered view of pods for tests that require shared process namespaces, facilitating targeted assertions or operations. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + env --> PodsArray + PodsArray -->|"for each pod p"| IsShareProcessNamespace(p) + IsShareProcessNamespace(p) -->|"true"| filteredPods + filteredPods --> Return +``` + +#### Function dependencies + +```mermaid +graph TD + func_TestEnvironment.GetShareProcessNamespacePods --> func_Pod.IsShareProcessNamespace + func_TestEnvironment.GetShareProcessNamespacePods --> append +``` + +#### Functions calling `TestEnvironment.GetShareProcessNamespacePods` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetShareProcessNamespacePods +env := &provider.TestEnvironment{ + Pods: []*provider.Pod{ /* … populate pods … */ }, +} + +sharedNSPods := env.GetShareProcessNamespacePods() +fmt.Printf("Found %d pod(s) with shared process namespace\n", len(sharedNSPods)) +``` + +--- + +### TestEnvironment.GetWorkerCount + +**GetWorkerCount** - Counts how many nodes within the `TestEnvironment` are designated as worker nodes. + +```go +func (env *TestEnvironment) GetWorkerCount() int +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Counts how many nodes within the `TestEnvironment` are designated as worker nodes. | +| **Parameters** | `env *TestEnvironment` – receiver holding a slice of `Node`s (`env.Nodes`). | +| **Return value** | `int` – total count of nodes for which `IsWorkerNode()` returns true. | +| **Key dependencies** | Calls the method `node.IsWorkerNode()` on each element in `env.Nodes`. | +| **Side effects** | None; purely functional and read‑only. | +| **How it fits the package** | Provides a quick metric used by tests or orchestration logic to verify that the environment contains the expected number of worker nodes. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph "Count worker nodes" + A["Initialize counter"] --> B{"Iterate over env.Nodes"} + B --> C{"node.IsWorkerNode() == true?"} + C -- Yes --> D["Increment counter"] + C -- No --> E["Skip"] + D --> B + E --> B + end + B --> F["Return workerCount"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestEnvironment.GetWorkerCount --> func_Node.IsWorkerNode +``` + +#### Functions calling `TestEnvironment.GetWorkerCount` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.GetWorkerCount +env := &provider.TestEnvironment{ + Nodes: []*provider.Node{ /* populate with Node instances */ }, +} +workerCount := env.GetWorkerCount() +fmt.Printf("Number of worker nodes: %d\n", workerCount) +``` + +--- + +### TestEnvironment.IsIntrusive + +**IsIntrusive** - Exposes whether the current `TestEnvironment` is configured to run intrusive tests. + +Retrieves the *intrusive* flag from a test environment’s parameters. + +--- + +#### Signature (Go) + +```go +func (env *TestEnvironment) IsIntrusive() bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Exposes whether the current `TestEnvironment` is configured to run intrusive tests. | +| **Parameters** | `env *TestEnvironment` – receiver containing test parameters. | +| **Return value** | `bool` – the value of `env.params.Intrusive`. | +| **Key dependencies** | • Accesses `env.params.Intrusive` field.
• No external packages or functions are called. | +| **Side effects** | None. The function is pure; it only reads state. | +| **How it fits the package** | Provides a public accessor for the internal `Intrusive` flag, enabling callers to conditionally execute intrusive test logic. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Read env.params.Intrusive"} + B --> C["Return value"] +``` + +--- + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Functions calling `TestEnvironment.IsIntrusive` + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.IsIntrusive +env := &provider.TestEnvironment{ + params: provider.EnvironmentParams{Intrusive: true}, +} +if env.IsIntrusive() { + fmt.Println("Running intrusive tests") +} else { + fmt.Println("Skipping intrusive tests") +} +``` + +--- + +### TestEnvironment.IsPreflightInsecureAllowed + +**IsPreflightInsecureAllowed** - Returns whether the test environment allows insecure network connections when running Preflight checks. + +#### Signature (Go) + +```go +func (env *TestEnvironment) IsPreflightInsecureAllowed() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns whether the test environment allows insecure network connections when running Preflight checks. | +| **Parameters** | `env` – receiver of type `*TestEnvironment`. No additional parameters. | +| **Return value** | `bool` – `true` if insecure connections are allowed, otherwise `false`. | +| **Key dependencies** | • Reads the `AllowPreflightInsecure` field from `env.params`. | +| **Side effects** | None. Pure accessor. | +| **How it fits the package** | Used by container and operator preflight runners to decide whether to add an insecure connection option when configuring Preflight checks. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["TestEnvironment"] --> B["Return env.params.AllowPreflightInsecure"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `TestEnvironment.IsPreflightInsecureAllowed` (Mermaid) + +```mermaid +graph TD + func_Container.SetPreflightResults --> func_TestEnvironment.IsPreflightInsecureAllowed + func_Operator.SetPreflightResults --> func_TestEnvironment.IsPreflightInsecureAllowed +``` + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.IsPreflightInsecureAllowed +env := &TestEnvironment{ + params: struct{ AllowPreflightInsecure bool }{AllowPreflightInsecure: true}, +} +allowed := env.IsPreflightInsecureAllowed() +fmt.Println("Insecure Preflight allowed:", allowed) +``` + +--- + +### TestEnvironment.IsSNO + +**IsSNO** - Checks whether the current `TestEnvironment` consists of exactly one node, indicating a Single‑Node OpenShift (SNO) configuration. + +```go +func (env *TestEnvironment) IsSNO() bool +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the current `TestEnvironment` consists of exactly one node, indicating a Single‑Node OpenShift (SNO) configuration. | +| **Parameters** | *None* – operates on the receiver’s internal state. | +| **Return value** | `bool`: `true` if there is precisely one node; otherwise `false`. | +| **Key dependencies** | Calls the built‑in `len` function to evaluate the slice length of `env.Nodes`. | +| **Side effects** | None – purely reads state without modifying it or performing I/O. | +| **How it fits the package** | Provides a quick boolean flag used by other components to tailor behavior for SNO clusters. | + +#### Internal workflow + +```mermaid +flowchart TD + Start --> GetNodesLength["Get length of env.Nodes"] + GetNodesLength --> Compare["Compare length to 1"] + Compare --> Return["Return result (bool)"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_TestEnvironment.IsSNO --> len +``` + +#### Functions calling `TestEnvironment.IsSNO` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example + +```go +// Minimal example invoking TestEnvironment.IsSNO +env := &provider.TestEnvironment{ + Nodes: []string{"node1"}, +} +isSno := env.IsSNO() +fmt.Printf("Is single-node OpenShift? %v\n", isSno) +``` + +--- + +### TestEnvironment.SetNeedsRefresh + +**SetNeedsRefresh** - Flags that the current test environment has become stale and must be refreshed before subsequent use. + +#### Signature (Go) + +```go +func (env *TestEnvironment) SetNeedsRefresh() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Flags that the current test environment has become stale and must be refreshed before subsequent use. | +| **Parameters** | None – operates on the receiver `env`. | +| **Return value** | None. | +| **Key dependencies** | *None* – this method performs a single state mutation. | +| **Side effects** | Sets the package‑level variable `loaded` to `false`, indicating that cached environment data is invalidated. | +| **How it fits the package** | Provides a lightweight way for other components to request a refresh of the test environment, ensuring fresh state on the next operation. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + TestEnvironment_SetNeedsRefresh --> LoadedFalse +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `TestEnvironment.SetNeedsRefresh` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking TestEnvironment.SetNeedsRefresh +env := &provider.TestEnvironment{} +env.SetNeedsRefresh() // marks environment as needing refresh +``` + +--- + +## Local Functions + +### addOperandPodsToTestPods + +**addOperandPodsToTestPods** - Incorporates operand pods into the environment’s pod collection, ensuring duplicates are avoided and proper flags are set. + +#### Signature (Go) + +```go +func addOperandPodsToTestPods(operandPods []*Pod, env *TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Incorporates operand pods into the environment’s pod collection, ensuring duplicates are avoided and proper flags are set. | +| **Parameters** | `operandPods []*Pod` – slice of Pods identified as operands.
`env *TestEnvironment` – mutable test environment to update. | +| **Return value** | None (void). Side‑effects modify `env.Pods`. | +| **Key dependencies** | - `searchPodInSlice(name, namespace string, pods []*Pod) *Pod`
- `log.Info(msg string, args ...any)`
- builtin `append` | +| **Side effects** | Mutates the `Pods` slice within `env`; logs informational messages. | +| **How it fits the package** | Used during test environment construction to merge operand pods with normal test pods, ensuring each pod is represented once and correctly flagged. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over operandPods"} + B -->|"For each operandPod"| C["Check if already in env.Pods via searchPodInSlice"] + C -->|"Found"| D["Log duplicate, set IsOperand=true"] + C -->|"Not found"| E["Log addition, append to env.Pods"] + E --> F["Continue loop"] + D --> F + F --> G["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_addOperandPodsToTestPods --> func_searchPodInSlice + func_addOperandPodsToTestPods --> log.Info + func_addOperandPodsToTestPods --> append +``` + +#### Functions calling `addOperandPodsToTestPods` (Mermaid) + +```mermaid +graph TD + func_buildTestEnvironment --> func_addOperandPodsToTestPods +``` + +#### Usage example (Go) + +```go +// Minimal example invoking addOperandPodsToTestPods + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + env := &provider.TestEnvironment{} + // Suppose we have discovered some operand pods elsewhere + operandPods := []*provider.Pod{ + provider.NewPod(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name:"op-pod-1", Namespace:"ns"}}), + provider.NewPod(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name:"op-pod-2", Namespace:"ns"}}), + } + provider.addOperandPodsToTestPods(operandPods, env) + + // env.Pods now contains the operand pods with IsOperand set to true +} +``` + +--- + +### addOperatorPodsToTestPods + +**addOperatorPodsToTestPods** - Ensures that all operator pods are represented in the environment’s pod collection. If a pod is already present it is marked as an operator; otherwise it is appended to the list. + +#### Signature (Go) + +```go +func addOperatorPodsToTestPods(operatorPods []*Pod, env *TestEnvironment) {} +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that all operator pods are represented in the environment’s pod collection. If a pod is already present it is marked as an operator; otherwise it is appended to the list. | +| **Parameters** | `operatorPods []*Pod` – slice of operator‑specific pod objects.
`env *TestEnvironment` – mutable reference to the test environment containing discovered pods. | +| **Return value** | None (void). The function mutates `env.Pods`. | +| **Key dependencies** | - `searchPodInSlice(name, namespace string, pods []*Pod) *Pod`
- `log.Info(msg string, args ...any)`
- `append` built‑in for slice extension | +| **Side effects** | Mutates the `env.Pods` slice and updates the `IsOperator` flag on existing pods. Emits informational logs. No external I/O or concurrency. | +| **How it fits the package** | Called during test environment construction (`buildTestEnvironment`) to merge operator pods with other discovered pods, ensuring that subsequent tests have access to all relevant pod information. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over operatorPods"} + B --> C["Search for existing pod in env.Pods"] + C -- found --> D["Mark as operator"] + C -- not found --> E["Append to env.Pods"] + D --> F["Log already discovered"] + E --> G["Log added"] + F --> H["End iteration"] + G --> H +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_addOperatorPodsToTestPods --> func_searchPodInSlice + func_addOperatorPodsToTestPods --> Logger.Info + func_addOperatorPodsToTestPods --> append +``` + +#### Functions calling `addOperatorPodsToTestPods` (Mermaid) + +```mermaid +graph TD + buildTestEnvironment --> addOperatorPodsToTestPods +``` + +#### Usage example (Go) + +```go +// Minimal example invoking addOperatorPodsToTestPods +env := &TestEnvironment{Pods: []*Pod{}} +operatorPods := []*Pod{ + NewPod(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "op1", Namespace: "ns"}}), + NewPod(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "op2", Namespace: "ns"}}), +} + +addOperatorPodsToTestPods(operatorPods, env) + +// After the call, env.Pods contains both operator pods and their IsOperator flag is true. +``` + +--- + +### buildContainerImageSource + +**buildContainerImageSource** - Parses the container image URL and the runtime‑reported image ID to populate a `ContainerImageIdentifier` struct with registry, repository, tag, and digest information. + +#### 1) Signature (Go) + +```go +func buildContainerImageSource(urlImage, urlImageID string) (source ContainerImageIdentifier) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses the container image URL and the runtime‑reported image ID to populate a `ContainerImageIdentifier` struct with registry, repository, tag, and digest information. | +| **Parameters** | `urlImage string –` image reference used by the pod (`/:` or `:`).
`urlImageID string –` runtime‑reported image ID (typically a digest such as `sha256:`). | +| **Return value** | `ContainerImageIdentifier` – a struct containing the parsed registry, repository, tag, and digest. | +| **Key dependencies** | • `regexp.MustCompile`, `FindStringSubmatch` for regex parsing.
• `log.Debug` from the internal logging package. | +| **Side effects** | Logs the parsed image components at debug level; no mutation of external state or I/O. | +| **How it fits the package** | Used by `getPodContainers` to enrich container objects with a structured representation of their images, enabling other parts of the provider to reference image details consistently. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Parse urlImage"} + B -->|"Match found"| C["Set Registry, Repository, Tag"] + B -->|"No match"| D["Skip registry assignment"] + C --> E{"Parse urlImageID"} + D --> E + E -->|"Match found"| F["Set Digest"] + E -->|"No match"| G["Digest remains empty"] + F --> H["Log parsed data"] + G --> H + H --> I["Return ContainerImageIdentifier"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_buildContainerImageSource --> func_MustCompile + func_buildContainerImageSource --> func_FindStringSubmatch + func_buildContainerImageSource --> func_Debug +``` + +#### 5) Functions calling `buildContainerImageSource` (Mermaid) + +```mermaid +graph TD + func_getPodContainers --> func_buildContainerImageSource +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking buildContainerImageSource +imageURL := "quay.io/example/repo:latest" +imageID := "sha256:abcdef1234567890" + +parsed := buildContainerImageSource(imageURL, imageID) +fmt.Printf("Registry: %s\nRepository: %s\nTag: %s\nDigest: %s\n", + parsed.Registry, parsed.Repository, parsed.Tag, parsed.Digest) +``` + +--- + +--- + +### buildTestEnvironment + +**buildTestEnvironment** - Initializes global test environment state, loads configuration, deploys probe daemonset (if possible), performs autodiscovery of cluster resources, and populates the `env` variable with all discovered data. + +#### 1) Signature + +```go +func buildTestEnvironment() +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Initializes global test environment state, loads configuration, deploys probe daemonset (if possible), performs autodiscovery of cluster resources, and populates the `env` variable with all discovered data. | +| **Parameters** | None | +| **Return value** | None – updates package‑level variables (`env`, `loaded`). | +| **Key dependencies** |
  • time.Now / time.Since
  • configuration.GetTestParameters
  • configuration.LoadConfiguration
  • deployDaemonSet (internal helper)
  • autodiscover.DoAutoDiscover
  • GetAllOperatorGroups
  • createOperators
  • NewPod, getPodContainers, addOperatorPodsToTestPods, addOperandPodsToTestPods
  • log.Fatal / log.Error / log.Debug / log.Info
| +| **Side effects** | *Modifies global `env` and related maps/arrays.
* May exit process via `log.Fatal`.
* Creates daemonset if configured. | +| **How it fits the package** | Core routine called by `GetTestEnvironment()` to lazily build the test environment on first request; all other functions rely on data populated here. | + +#### 3) Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B["Record start time"] + B --> C{"Load config"} + C -->|"OK"| D["Deploy probe daemonset"] + D --> E["Do autodiscovery"] + E --> F["Populate env fields (config, params, nodes, pods, etc.)"] + F --> G["Process operator/operand pods"] + G --> H["Build containers list"] + H --> I["Log completion time"] + I --> J["End"] +``` + +#### 4) Function dependencies + +```mermaid +graph TD + func_buildTestEnvironment --> func_time.Now + func_buildTestEnvironment --> func_configuration.GetTestParameters + func_buildTestEnvironment --> func_configuration.LoadConfiguration + func_buildTestEnvironment --> func_deployDaemonSet + func_buildTestEnvironment --> func_autodiscover.DoAutoDiscover + func_buildTestEnvironment --> func_GetAllOperatorGroups + func_buildTestEnvironment --> func_createOperators + func_buildTestEnvironment --> func_NewPod + func_buildTestEnvironment --> func_getPodContainers + func_buildTestEnvironment --> func_addOperatorPodsToTestPods + func_buildTestEnvironment --> func_addOperandPodsToTestPods + func_buildTestEnvironment --> func_log.Fatal + func_buildTestEnvironment --> func_log.Error + func_buildTestEnvironment --> func_log.Debug + func_buildTestEnvironment --> func_log.Info +``` + +#### 5) Functions calling `buildTestEnvironment` + +```mermaid +graph TD + func_GetTestEnvironment --> func_buildTestEnvironment +``` + +#### 6) Usage example (Go) + +```go +// Obtain the fully built test environment +env := provider.GetTestEnvironment() + +// env now contains configuration, pods, operators, etc. +fmt.Printf("Probe set up: %v\n", env.DaemonsetFailedToSpawn) +``` + +--- + +### createNodes + +**createNodes** - Converts a slice of Kubernetes `Node` objects into a map keyed by node name, optionally enriching each entry with its MachineConfig when running on an OpenShift cluster. + +#### Signature (Go) + +```go +func createNodes(nodes []corev1.Node) map[string]Node +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Converts a slice of Kubernetes `Node` objects into a map keyed by node name, optionally enriching each entry with its MachineConfig when running on an OpenShift cluster. | +| **Parameters** | `nodes []corev1.Node` – list of nodes retrieved from the cluster | +| **Return value** | `map[string]Node` – mapping from node names to wrapper structs containing the original node data and, if applicable, the corresponding MachineConfig | +| **Key dependencies** | *`IsOCPCluster()` – determines if the environment is OpenShift
* `log.Warn`, `log.Info`, `log.Error` – structured logging
* `getMachineConfig(mcName string, machineConfigs map[string]MachineConfig)` – fetches and caches MachineConfig resources | +| **Side effects** | Emits log messages; downloads MachineConfig objects (network I/O) when needed; stores fetched configs in a local cache to avoid duplicate requests. No global state is modified. | +| **How it fits the package** | Used by `buildTestEnvironment` to populate the test environment’s node list, enabling tests that depend on machine configuration data for OpenShift clusters. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over nodes"} + B -->|"OpenShift? No"| C["Add Node without MC"] + B -->|"OpenShift? Yes"| D["Retrieve MC name from annotation"] + D -->|"Exists?"| E["Fetch MachineConfig via getMachineConfig"] + E --> F["Store node with MC in wrapperNodes"] + C --> G["Log warning and continue"] + F --> H["Continue loop"] + G --> H + H --> I["Return wrapperNodes"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_createNodes --> func_IsOCPCluster + func_createNodes --> log.Warn + func_createNodes --> log.Error + func_createNodes --> log.Info + func_createNodes --> func_getMachineConfig +``` + +#### Functions calling `createNodes` + +```mermaid +graph TD + func_buildTestEnvironment --> func_createNodes +``` + +#### Usage example (Go) + +```go +// Minimal example invoking createNodes +package main + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" +) + +func main() { + nodes := []corev1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node-01"}}, + } + nodeMap := createNodes(nodes) + fmt.Printf("Processed %d nodes\n", len(nodeMap)) +} +``` + +--- + +### createOperators + +**createOperators** - Builds a slice of `*Operator` objects that summarize each unique ClusterServiceVersion (CSV). It enriches operators with subscription, install‑plan, and namespace information. + +#### Signature (Go) + +```go +func createOperators( + csvs []*olmv1Alpha.ClusterServiceVersion, + allSubscriptions []olmv1Alpha.Subscription, + allPackageManifests []*olmpkgv1.PackageManifest, + allInstallPlans []*olmv1Alpha.InstallPlan, + allCatalogSources []*olmv1Alpha.CatalogSource, + succeededRequired bool, + keepCsvDetails bool) []*Operator +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a slice of `*Operator` objects that summarize each unique ClusterServiceVersion (CSV). It enriches operators with subscription, install‑plan, and namespace information. | +| **Parameters** | `csvs`: list of CSVs.
`allSubscriptions`: all discovered subscriptions.
`allPackageManifests`: all package manifests.
`allInstallPlans`: all install plans.
`allCatalogSources`: all catalog sources.
`succeededRequired`: if true, skip CSVs not in `Succeeded` phase.
`keepCsvDetails`: if true, store the original CSV struct inside each operator. | +| **Return value** | Slice of pointers to constructed `Operator` structs. | +| **Key dependencies** | `getUniqueCsvListByName`, `strings.SplitN`, logging helpers (`log.Debug/Info/Error/Warn`), `getAtLeastOneSubscription`, `getOperatorTargetNamespaces`, `getAtLeastOneInstallPlan`, slice append. | +| **Side effects** | No global state is modified; only logs are emitted and the returned slice is built locally. | +| **How it fits the package** | This helper is used by the test environment builder (`buildTestEnvironment`) to populate the operator catalog that drives subsequent tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Unique CSVs"} + B --> C["Iterate over each unique CSV"] + C --> D{"Check phase?"} + D -- yes --> E["Create Operator struct"] + E --> F{"Keep CSV details?"} + F -- yes --> G["Store csv in op.Csv"] + F -- no --> H + G --> I["Set Phase, Name, Namespace"] + H --> I + I --> J["Parse package & version from name"] + J --> K{"Subscription exists?"} + K -- yes --> L["getAtLeastOneSubscription"] + L --> M["getOperatorTargetNamespaces"] + M --> N["Set TargetNamespaces & IsClusterWide"] + K -- no --> O["Warn missing subscription"] + I --> P["getAtLeastOneInstallPlan"] + P --> Q["Append op to operators slice"] + Q --> R{"Next CSV?"} + R -- yes --> C + R -- no --> S["Return operators"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_createOperators --> func_getUniqueCsvListByName + func_createOperators --> strings.SplitN + func_createOperators --> log.Debug + func_createOperators --> log.Info + func_createOperators --> log.Error + func_createOperators --> log.Warn + func_createOperators --> getAtLeastOneSubscription + func_createOperators --> getOperatorTargetNamespaces + func_createOperators --> getAtLeastOneInstallPlan +``` + +#### Functions calling `createOperators` (Mermaid) + +```mermaid +graph TD + func_buildTestEnvironment --> func_createOperators +``` + +#### Usage example (Go) + +```go +// Minimal example invoking createOperators +package main + +import ( + olmv1Alpha "github.com/operator-framework/api/pkg/operators/v1alpha1" + olmpkgv1 "github.com/operator-framework/api/pkg/package/packagemanifests/v1" + provider "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume csvs, subs, manifests, plans, sources are already populated. + var csvs []*olmv1Alpha.ClusterServiceVersion + var subs []olmv1Alpha.Subscription + var manifests []*olmpkgv1.PackageManifest + var plans []*olmv1Alpha.InstallPlan + var sources []*olmv1Alpha.CatalogSource + + operators := provider.createOperators(csvs, subs, manifests, plans, sources, true, false) + + // operators now contains enriched operator information. +} +``` + +--- + +### deployDaemonSet + +**deployDaemonSet** - Ensures that the CertSuite probe daemon set is running in the specified Kubernetes namespace. If it already exists and is ready, the function exits immediately; otherwise it creates the daemon set and waits for readiness. + +#### Signature (Go) + +```go +func deployDaemonSet(namespace string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that the CertSuite probe daemon set is running in the specified Kubernetes namespace. If it already exists and is ready, the function exits immediately; otherwise it creates the daemon set and waits for readiness. | +| **Parameters** | `namespace string` – target namespace where the daemon set should be deployed. | +| **Return value** | `error` – non‑nil if any step (client setup, creation, or waiting) fails. | +| **Key dependencies** | • `k8sPrivilegedDs.SetDaemonSetClient(clientsholder.GetClientsHolder().K8sClient)`
• `k8sPrivilegedDs.IsDaemonSetReady(...)`
• `k8sPrivilegedDs.CreateDaemonSet(...)`
• `k8sPrivilegedDs.WaitDaemonsetReady(...)`
• `configuration.GetTestParameters()`
• `fmt.Errorf` | +| **Side effects** | • Configures the privileged daemon set client.
• May create a new daemon set in the cluster.
• Blocks until the daemon set reports readiness or times out. | +| **How it fits the package** | Part of the test‑environment bootstrap; invoked by `buildTestEnvironment` to guarantee probe pods are available before tests run. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Set privileged client"] --> B["Check if daemon set ready"] + B -- yes --> C["Return nil"] + B -- no --> D["Create daemon set"] + D --> E["Wait for readiness"] + E -- success --> F["Return nil"] + E -- timeout/failure --> G["Return error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_deployDaemonSet --> func_SetDaemonSetClient + func_deployDaemonSet --> func_IsDaemonSetReady + func_deployDaemonSet --> func_CreateDaemonSet + func_deployDaemonSet --> func_WaitDaemonsetReady + func_deployDaemonSet --> func_GetClientsHolder + func_deployDaemonSet --> func_GetTestParameters + func_deployDaemonSet --> fmt_Errorf +``` + +#### Functions calling `deployDaemonSet` (Mermaid) + +```mermaid +graph TD + func_buildTestEnvironment --> func_deployDaemonSet +``` + +#### Usage example (Go) + +```go +// Minimal example invoking deployDaemonSet +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + if err := provider.DeployDaemonSet("certsuite-probes"); err != nil { + log.Fatalf("Failed to deploy probe daemon set: %v", err) + } +} +``` + +*Note:* `DeployDaemonSet` is unexported in the original package; the example assumes it has been made public or accessed via a wrapper. + +--- + +### filterDPDKRunningPods + +**filterDPDKRunningPods** - From a list of pods, return only those whose first Multus PCI address contains the DPDK driver `vfio-pci`. + +#### Signature (Go) + +```go +func filterDPDKRunningPods(pods []*Pod) []*Pod +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | From a list of pods, return only those whose first Multus PCI address contains the DPDK driver `vfio-pci`. | +| **Parameters** | `pods` – slice of pointers to `Pod` objects to be inspected. | +| **Return value** | A new slice containing the filtered pods that satisfy the DPDK condition. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – obtains a Kubernetes client holder.
• `clientsholder.NewContext(...)` – creates a context for executing commands in a pod container.
• `fmt.Sprintf` – builds the command string.
• `o.ExecCommandContainer(ctx, cmd)` – runs the command inside the pod’s first container.
• `strings.Contains` – checks output for `"vfio-pci"`. | +| **Side effects** | Logs an error when a command execution fails; otherwise no external state is mutated. | +| **How it fits the package** | Used by the provider to determine which guaranteed‑CPU pods actually have DPDK enabled, enabling CPU pinning tests that require DPDK support. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over pods"} + B -->|"no Multus PCI"| C["Skip"] + B -->|"has Multus PCI"| D["Create context"] + D --> E["Build find command"] + E --> F["Execute in pod container"] + F --> G{"Command succeeded?"} + G -->|"fail"| H["Log error & skip"] + G -->|"success"| I{"Output contains vfio-pci?"} + I -->|"no"| J["Skip"] + I -->|"yes"| K["Add to filtered list"] + K --> L["Continue loop"] + L --> B + C --> L + H --> L + J --> L + A --> M["Return filteredPods"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_filterDPDKRunningPods --> func_GetClientsHolder + func_filterDPDKRunningPods --> func_NewContext + func_filterDPDKRunningPods --> func_Sprintf + func_filterDPDKRunningPods --> func_ExecCommandContainer + func_filterDPDKRunningPods --> func_Error + func_filterDPDKRunningPods --> func_String + func_filterDPDKRunningPods --> func_Contains + func_filterDPDKRunningPods --> func_append +``` + +#### Functions calling `filterDPDKRunningPods` (Mermaid) + +```mermaid +graph TD + func_GetCPUPinningPodsWithDpdk --> func_filterDPDKRunningPods +``` + +#### Usage example (Go) + +```go +// Minimal example invoking filterDPDKRunningPods +pods := []*Pod{ /* list of Pod pointers */ } +dpdkPods := filterDPDKRunningPods(pods) +// dpdkPods now contains only pods that have vfio-pci driver active. +``` + +--- + +### filterPodsWithoutHostPID + +**filterPodsWithoutHostPID** - Returns a slice containing only the pods from the input list whose `Spec.HostPID` field is false, effectively excluding any pod that shares the host’s PID namespace. + +#### Signature (Go) + +```go +func filterPodsWithoutHostPID(pods []*Pod) []*Pod +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a slice containing only the pods from the input list whose `Spec.HostPID` field is false, effectively excluding any pod that shares the host’s PID namespace. | +| **Parameters** | `pods []*Pod` – slice of pointers to `Pod` objects to be filtered. | +| **Return value** | `[]*Pod` – new slice containing only pods without Host PID enabled. | +| **Key dependencies** | • Built‑in `append` function used to build the result slice.
• Accesses `pod.Spec.HostPID`. | +| **Side effects** | None; operates purely on the provided slice and returns a new slice. | +| **How it fits the package** | Used by test environment helper methods to isolate pod collections that do not use host PID, enabling CPU isolation checks without interference from pods sharing the host namespace. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over input slice"} + B -->|"pod.Spec.HostPID == true"| C["Skip"] + B -->|"false"| D["Append pod to result"] + D --> E["Continue loop"] + E --> B + B --> F["Return filtered slice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_filterPodsWithoutHostPID --> append +``` + +#### Functions calling `filterPodsWithoutHostPID` (Mermaid) + +```mermaid +graph TD + TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID --> func_filterPodsWithoutHostPID + TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID --> func_filterPodsWithoutHostPID + TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID --> func_filterPodsWithoutHostPID +``` + +#### Usage example (Go) + +```go +// Minimal example invoking filterPodsWithoutHostPID +pods := []*Pod{podA, podB, podC} // assume podB has Spec.HostPID = true +filteredPods := filterPodsWithoutHostPID(pods) +// filteredPods now contains only podA and podC +``` + +--- + +### getAtLeastOneCsv + +**getAtLeastOneCsv** - Checks whether the provided `InstallPlan` references the specified `ClusterServiceVersion`. It verifies that the CSV name appears in the plan’s list and that the plan has bundle lookup data. + +#### Signature (Go) + +```go +func getAtLeastOneCsv(csv *olmv1Alpha.ClusterServiceVersion, installPlan *olmv1Alpha.InstallPlan) (atLeastOneCsv bool) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the provided `InstallPlan` references the specified `ClusterServiceVersion`. It verifies that the CSV name appears in the plan’s list and that the plan has bundle lookup data. | +| **Parameters** | `csv *olmv1Alpha.ClusterServiceVersion – the CSV to look for.
`installPlan *olmv1Alpha.InstallPlan – the InstallPlan under inspection.` | +| **Return value** | `bool` – `true` if the plan references the CSV and has bundle lookups; otherwise `false`. | +| **Key dependencies** | • Calls `log.Warn` from `github.com/redhat-best-practices-for-k8s/certsuite/internal/log`. | +| **Side effects** | Emits a warning when an InstallPlan lacks bundle lookups. No state mutation occurs. | +| **How it fits the package** | Used by higher‑level logic that aggregates install plans for operators, ensuring only valid plans are considered. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["getAtLeastOneCsv"] --> B{"Iterate over CSV names"} + B -->|"Match found"| C{"Check bundle lookups"} + C -->|"Present"| D["Return true"] + C -->|"Missing"| E["Warn and continue"] + B -->|"No match"| F["Continue loop"] + F --> G["End loop"] + G --> H["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getAtLeastOneCsv --> func_Warn +``` + +#### Functions calling `getAtLeastOneCsv` (Mermaid) + +```mermaid +graph TD + func_getAtLeastOneInstallPlan --> func_getAtLeastOneCsv +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getAtLeastOneCsv +import ( + olmv1Alpha "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func example() { + csv := &olmv1Alpha.ClusterServiceVersion{ObjectMeta: metav1.ObjectMeta{Name: "my-csv"}} + plan := &olmv1Alpha.InstallPlan{ + Spec: olmv1Alpha.InstallPlanSpec{ + ClusterServiceVersionNames: []string{"my-csv", "other-csv"}, + }, + Status: olmv1Alpha.InstallPlanStatus{ + BundleLookups: []olmv1Alpha.BundleLookup{{Path: "/path/to/bundle"}}, + }, + } + + if getAtLeastOneCsv(csv, plan) { + fmt.Println("InstallPlan contains the CSV and has bundle lookups.") + } else { + fmt.Println("CSV not found or missing bundle data.") + } +} +``` + +--- + +### getAtLeastOneInstallPlan + +**getAtLeastOneInstallPlan** - For a given operator, identifies at least one `InstallPlan` that installs the supplied CSV in the operator’s subscription namespace and records it in the operator’s data. + +#### Signature (Go) + +```go +func getAtLeastOneInstallPlan( + op *Operator, + csv *olmv1Alpha.ClusterServiceVersion, + allInstallPlans []*olmv1Alpha.InstallPlan, + allCatalogSources []*olmv1Alpha.CatalogSource, +) (atLeastOneInstallPlan bool) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | For a given operator, identifies at least one `InstallPlan` that installs the supplied CSV in the operator’s subscription namespace and records it in the operator’s data. | +| **Parameters** | `op *Operator` – target operator structure;
`csv *olmv1Alpha.ClusterServiceVersion` – CSV to match;
`allInstallPlans []*olmv1Alpha.InstallPlan` – list of all install plans in the cluster;
`allCatalogSources []*olmv1Alpha.CatalogSource` – catalog sources used to resolve bundle images. | +| **Return value** | `bool` – true if at least one matching install plan was found and appended, otherwise false. | +| **Key dependencies** | • `getAtLeastOneCsv(csv, installPlan)`
• `getCatalogSourceImageIndexFromInstallPlan(installPlan, allCatalogSources)`
• `log.Debug` (from internal logger)
• `append` to slice | +| **Side effects** | Mutates `op.InstallPlans` by appending a `CsvInstallPlan`. Logs debug messages on lookup failures. | +| **How it fits the package** | Used during operator discovery (`createOperators`) to enrich each `Operator` with install‑plan details, enabling later reporting of bundle images and indices. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> ScanAllInstallPlans["Loop over allInstallPlans"] + ScanAllInstallPlans --> FilterNS{"installPlan.Namespace == op.SubscriptionNamespace"} + FilterNS -- No --> NextPlan + FilterNS -- Yes --> MatchCSV["getAtLeastOneCsv(csv, installPlan)"] + MatchCSV -- False --> NextPlan + MatchCSV -- True --> ResolveIndex["getCatalogSourceImageIndexFromInstallPlan"] + ResolveIndex -- Error --> LogDebug["log.Debug(...)"] --> NextPlan + ResolveIndex -- Success --> Append["op.InstallPlans = append(...)"] + Append --> SetFlag["atLeastOneInstallPlan = true"] + SetFlag --> NextPlan + NextPlan --> End{"End of loop"} +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getAtLeastOneInstallPlan --> func_getAtLeastOneCsv + func_getAtLeastOneInstallPlan --> func_getCatalogSourceImageIndexFromInstallPlan + func_getAtLeastOneInstallPlan --> func_log.Debug + func_getAtLeastOneInstallPlan --> func_append +``` + +#### Functions calling `getAtLeastOneInstallPlan` (Mermaid) + +```mermaid +graph TD + func_createOperators --> func_getAtLeastOneInstallPlan +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getAtLeastOneInstallPlan +op := &Operator{Name: "example-op", SubscriptionNamespace: "operators"} +csv := &olmv1Alpha.ClusterServiceVersion{ObjectMeta: metav1.ObjectMeta{Name: "example-csv", Namespace: "operators"}} +installPlans := []*olmv1Alpha.InstallPlan{ + { + ObjectMeta: metav1.ObjectMeta{Name: "ip-1", Namespace: "operators"}, + Spec: olmv1Alpha.InstallPlanSpec{ClusterServiceVersionNames: []string{"example-csv"}}, + Status: olmv1Alpha.InstallPlanStatus{BundleLookups: []olmv1Alpha.BundleLookup{{Path: "bundle.tar.gz"}}}, + }, +} +catalogSources := []*olmv1Alpha.CatalogSource{ + {ObjectMeta: metav1.ObjectMeta{Name: "cs-1", Namespace: "operators"}, Spec: olmv1Alpha.CatalogSourceSpec{Image: "registry.example.com/index"}}, +} + +found := getAtLeastOneInstallPlan(op, csv, installPlans, catalogSources) +if found { + fmt.Printf("Operator %s now has %d install plan(s)\n", op.Name, len(op.InstallPlans)) +} +``` + +--- + +### getAtLeastOneSubscription + +**getAtLeastOneSubscription** - Finds the first subscription whose `InstalledCSV` matches the provided CSV and populates the `Operator` with subscription metadata; retrieves the default channel from a matching package manifest when needed. + +**Collects the first subscription that matches a given CSV and enriches an `Operator` with its details, including default channel information if missing.** + +--- + +#### 1) Signature (Go) + +```go +func getAtLeastOneSubscription(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, + subscriptions []olmv1Alpha.Subscription, packageManifests []*olmpkgv1.PackageManifest)(bool) +``` + +--- + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Finds the first subscription whose `InstalledCSV` matches the provided CSV and populates the `Operator` with subscription metadata; retrieves the default channel from a matching package manifest when needed. | +| **Parameters** | `op *Operator` – operator to update
`csv *olmv1Alpha.ClusterServiceVersion` – CSV to match against
`subscriptions []olmv1Alpha.Subscription` – list of subscriptions to search
`packageManifests []*olmpkgv1.PackageManifest` – package manifests for channel resolution | +| **Return value** | `bool` – true if at least one matching subscription was found, false otherwise | +| **Key dependencies** | • `getPackageManifestWithSubscription(subscription, packageManifests)`
• `log.Error(msg, args…)` from the internal logging package | +| **Side effects** | Mutates fields of `op` (name, namespace, package, org, channel). Logs an error if default channel cannot be determined. No external I/O or concurrency. | +| **How it fits the package** | Used by `createOperators` to associate CSVs with their owning subscriptions during operator discovery. | + +--- + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate subscriptions"} + B -->|"InstalledCSV matches csv.Name"| C["Set op.SubscriptionName & Namespace"] + C --> D["Set op.Package, Org, Channel"] + D --> E{"op.Channel is empty?"} + E -- Yes --> F["getPackageManifestWithSubscription()"] + F --> G{"Found package manifest?"} + G -- Yes --> H["Set op.Channel to defaultChannel"] + G -- No --> I["log.Error"] + H --> J["Break loop, return true"] + I --> J + E -- No --> J + B -->|"No match"| K["Continue loop"] + K --> B + J --> L["Return atLeastOneSubscription"] +``` + +--- + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_getAtLeastOneSubscription --> func_getPackageManifestWithSubscription + func_getAtLeastOneSubscription --> Logger.Error +``` + +--- + +#### 5) Functions calling `getAtLeastOneSubscription` (Mermaid) + +```mermaid +graph TD + func_createOperators --> func_getAtLeastOneSubscription +``` + +--- + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getAtLeastOneSubscription + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + olmv1Alpha "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +// Assume op, csv, subs, pkgManifests are already defined and populated. +var op provider.Operator +var csv *olmv1Alpha.ClusterServiceVersion +var subs []olmv1Alpha.Subscription +var pkgManifests []*olmpkgv1.PackageManifest + +found := getAtLeastOneSubscription(&op, csv, subs, pkgManifests) +if found { + fmt.Printf("Operator %s now has subscription %s in namespace %s\n", + op.Name, op.SubscriptionName, op.SubscriptionNamespace) +} else { + fmt.Println("No matching subscription found for CSV", csv.Name) +} +``` + +--- + +--- + +### getCNCFNetworksNamesFromPodAnnotation + +**getCNCFNetworksNamesFromPodAnnotation** - Parses the value of the `k8s.v1.cni.cncf.io/networks` annotation and returns only the network names. Supports both comma‑separated lists and JSON array of objects. + +#### Signature (Go) + +```go +func getCNCFNetworksNamesFromPodAnnotation(networksAnnotation string) []string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses the value of the `k8s.v1.cni.cncf.io/networks` annotation and returns only the network names. Supports both comma‑separated lists and JSON array of objects. | +| **Parameters** | `networksAnnotation string` – raw annotation content to parse. | +| **Return value** | `[]string` – slice containing all extracted network names; empty if none found or input is malformed. | +| **Key dependencies** | • `encoding/json: Unmarshal`
• `strings: TrimSpace`, `Split`
• `append` (built‑in) | +| **Side effects** | None – pure function, no mutation of external state or I/O. | +| **How it fits the package** | Utility helper used by `Pod.IsUsingSRIOV` and `Pod.IsUsingSRIOVWithMTU` to determine which NetworkAttachmentDefinitions apply to a pod. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Unmarshal JSON?"} + B -- Yes --> C["Extract names from objects"] + B -- No --> D["Trim and split by comma"] + C --> E["Return names"] + D --> F["Append trimmed names"] + F --> E +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getCNCFNetworksNamesFromPodAnnotation --> func_Unmarshal + func_getCNCFNetworksNamesFromPodAnnotation --> func_Split + func_getCNCFNetworksNamesFromPodAnnotation --> func_TrimSpace +``` + +#### Functions calling `getCNCFNetworksNamesFromPodAnnotation` (Mermaid) + +```mermaid +graph TD + func_IsUsingSRIOV --> func_getCNCFNetworksNamesFromPodAnnotation + func_IsUsingSRIOVWithMTU --> func_getCNCFNetworksNamesFromPodAnnotation +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getCNCFNetworksNamesFromPodAnnotation +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + annotation := `k8s.v1.cni.cncf.io/networks: ["{\"name\":\"net1\"}", "{\"name\":\"net2\"}"]` + names := provider.getCNCFNetworksNamesFromPodAnnotation(annotation) + fmt.Println(names) // Output: [net1 net2] +} +``` + +--- + +--- + +### getCatalogSourceBundleCountFromPackageManifests + +**getCatalogSourceBundleCountFromPackageManifests** - Determines the number of bundle images that belong to a specific `CatalogSource` by inspecting all package manifests in the test environment. + +#### Signature (Go) + +```go +func getCatalogSourceBundleCountFromPackageManifests(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines the number of bundle images that belong to a specific `CatalogSource` by inspecting all package manifests in the test environment. | +| **Parameters** | `env *TestEnvironment` – holds global state including `AllPackageManifests`.
`cs *olmv1Alpha.CatalogSource` – catalog source whose bundles are counted. | +| **Return value** | `int` – total count of related bundle entries. | +| **Key dependencies** | • `len()` (built‑in)
• Iteration over `env.AllPackageManifests` and their channel entries | +| **Side effects** | None; purely computational. | +| **How it fits the package** | Used by `GetCatalogSourceBundleCount` to provide an alternative bundle count method when probe container data is unavailable. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate env.AllPackageManifests"} + B -->|"for each pm"| C["Check if pm matches cs"] + C -->|"match? yes"| D{"Iterate pm.Status.Channels"} + D --> E["Add len(Entries) to total"] + E --> D + D --> F["Next channel"] + C -->|"no match"| G["Continue loop"] + B --> H["End of list"] + H --> I["Return totalRelatedBundles"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getCatalogSourceBundleCountFromPackageManifests --> len +``` + +#### Functions calling `getCatalogSourceBundleCountFromPackageManifests` (Mermaid) + +```mermaid +graph TD + func_GetCatalogSourceBundleCount --> func_getCatalogSourceBundleCountFromPackageManifests +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getCatalogSourceBundleCountFromPackageManifests +env := &TestEnvironment{ + AllPackageManifests: []*olmv1Alpha.PackageManifest{ /* populated elsewhere */ }, +} +cs := &olmv1Alpha.CatalogSource{Name: "my-cs", Namespace: "operators"} +bundleCount := getCatalogSourceBundleCountFromPackageManifests(env, cs) +fmt.Printf("Total bundles for %s/%s: %d\n", cs.Namespace, cs.Name, bundleCount) +``` + +--- + +### getCatalogSourceBundleCountFromProbeContainer + +**getCatalogSourceBundleCountFromProbeContainer** - Determines the number of bundles in a `CatalogSource` by querying its associated service through a probe pod using `grpcurl`. + +#### Signature (Go) + +```go +func getCatalogSourceBundleCountFromProbeContainer(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines the number of bundles in a `CatalogSource` by querying its associated service through a probe pod using `grpcurl`. | +| **Parameters** | `env *TestEnvironment` – test context containing services, probe pods, and logger.
`cs *olmv1Alpha.CatalogSource` – catalog source to inspect. | +| **Return value** | Integer bundle count; returns `-1` if no matching service or all attempts fail. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• `clientsholder.NewContext(...)`
• `o.ExecCommandContainer(ctx, cmd)`
• `log.Info`, `log.Error`, `log.Warn`
• `strings.TrimSpace`, `strings.Trim`, `strconv.Atoi` | +| **Side effects** | Logs informational, warning, and error messages; performs remote command execution inside probe pods. No state mutation outside of logging. | +| **How it fits the package** | Used by `GetCatalogSourceBundleCount` to obtain bundle counts on older OpenShift versions (≤ 4.12) where package manifests are unavailable. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B["Retrieve Kubernetes client holder"] + B --> C{"Find service matching catalog source"} + C -- No --> D["Warn no service found, return -1"] + C -- Yes --> E["Iterate over probe pods"] + E --> F["Create exec context"] + F --> G["Build grpcurl command"] + G --> H["Execute command in pod"] + H --> I{"Command succeeded & output present"} + I -- No --> J["Continue to next probe pod"] + I -- Yes --> K["Trim and parse output"] + K --> L{"Parse successful"} + L -- No --> M["Log error, continue"] + L -- Yes --> N["Return bundle count"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_getCatalogSourceBundleCountFromProbeContainer --> clientsholder.GetClientsHolder + func_getCatalogSourceBundleCountFromProbeContainer --> clientsholder.NewContext + func_getCatalogSourceBundleCountFromProbeContainer --> ExecCommandContainer + func_getCatalogSourceBundleCountFromProbeContainer --> log.Info + func_getCatalogSourceBundleCountFromProbeContainer --> log.Error + func_getCatalogSourceBundleCountFromProbeContainer --> log.Warn + func_getCatalogSourceBundleCountFromProbeContainer --> strings.TrimSpace + func_getCatalogSourceBundleCountFromProbeContainer --> strings.Trim + func_getCatalogSourceBundleCountFromProbeContainer --> strconv.Atoi +``` + +#### Functions calling `getCatalogSourceBundleCountFromProbeContainer` + +```mermaid +graph TD + func_GetCatalogSourceBundleCount --> func_getCatalogSourceBundleCountFromProbeContainer +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getCatalogSourceBundleCountFromProbeContainer +env := &TestEnvironment{ + AllServices: []Service{ /* ... */ }, + ProbePods: []Pod{ /* ... */ }, +} +cs := &olmv1Alpha.CatalogSource{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cs"}, +} + +bundleCount := getCatalogSourceBundleCountFromProbeContainer(env, cs) +fmt.Printf("Catalog source %q contains %d bundles\n", cs.Name, bundleCount) +``` + +--- + +### getCatalogSourceImageIndexFromInstallPlan + +**getCatalogSourceImageIndexFromInstallPlan** - Extracts the `Spec.Image` value of the catalog source that an install plan references. This image is used as the index image for the operator bundle. + +#### Signature (Go) + +```go +func getCatalogSourceImageIndexFromInstallPlan(installPlan *olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource) (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Extracts the `Spec.Image` value of the catalog source that an install plan references. This image is used as the index image for the operator bundle. | +| **Parameters** | `installPlan *olmv1Alpha.InstallPlan` – The install plan containing a reference to a catalog source.
`allCatalogSources []*olmv1Alpha.CatalogSource` – Slice of all catalog sources in the cluster to search within. | +| **Return value** | `` – The image string if found; otherwise an empty string and an error describing the failure. | +| **Key dependencies** | • `fmt.Errorf` – for constructing an error when no matching catalog source is found.
• Access to fields on `installPlan.Status.BundleLookups[0]` and `CatalogSourceRef`. | +| **Side effects** | None; purely functional, performs look‑up without mutating inputs. | +| **How it fits the package** | Used by higher‑level logic that gathers operator metadata (`getAtLeastOneInstallPlan`). It bridges install plan information to catalog source details needed for bundle indexing. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Get CatalogSourceRef"} + B --> C["Retrieve Name & Namespace"] + C --> D{"Search allCatalogSources"} + D -->|"Found"| E["Return Spec.Image, nil"] + D -->|"Not found"| F["Return , Errorf(failed to get catalogsource: not found)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getCatalogSourceImageIndexFromInstallPlan --> fmt_Errorf +``` + +#### Functions calling `getCatalogSourceImageIndexFromInstallPlan` (Mermaid) + +```mermaid +graph TD + func_getAtLeastOneInstallPlan --> func_getCatalogSourceImageIndexFromInstallPlan +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getCatalogSourceImageIndexFromInstallPlan +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + olmv1Alpha "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func example() { + // Assume installPlan and catalogSources are already populated. + var installPlan *olmv1Alpha.InstallPlan + var catalogSources []*olmv1Alpha.CatalogSource + + image, err := provider.GetCatalogSourceImageIndexFromInstallPlan(installPlan, catalogSources) + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + fmt.Printf("Catalog source image: %s\n", image) +} +``` + +--- + +### getContainers + +**getContainers** - Gathers every `Container` object present in the supplied slice of `*Pod`. + +#### 1) Signature (Go) + +```go +func getContainers(pods []*Pod) []*Container +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Gathers every `Container` object present in the supplied slice of `*Pod`. | +| **Parameters** | `pods []*Pod – slice of pointers to Pod objects to be processed. | +| **Return value** | `[]*Container – a flattened slice containing all containers from the input pods. | +| **Key dependencies** | • `append` (built‑in function) – used to accumulate containers.
• `pod.Containers` field – accessed for each pod. | +| **Side effects** | None; purely functional transformation of data. | +| **How it fits the package** | Serves as a helper for various test environment methods that need a consolidated list of containers, e.g., when filtering pods by CPU guarantees or host‑PID settings. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"pods empty?"} + B -- Yes --> C["Return nil"] + B -- No --> D["Initialize containers slice"] + D --> E["Loop over each pod"] + E --> F["Append pod.Containers to containers"] + F --> G["End of loop"] + G --> H["Return containers"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_getContainers --> append +``` + +#### 5) Functions calling `getContainers` (Mermaid) + +```mermaid +graph TD + TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUs --> getContainers + TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID --> getContainers + TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID --> getContainers + TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID --> getContainers +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getContainers +pods := []*Pod{pod1, pod2} +containers := getContainers(pods) +// containers now holds all Container pointers from pod1 and pod2 +``` + +--- + +### getMachineConfig + +**getMachineConfig** - Fetches an OpenShift `MachineConfig` by name, caches it in the supplied map to avoid duplicate API calls, and parses its raw configuration into a structured `MachineConfig` value. + +#### Signature (Go) + +```go +func getMachineConfig(mcName string, machineConfigs map[string]MachineConfig) (MachineConfig, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Fetches an OpenShift `MachineConfig` by name, caches it in the supplied map to avoid duplicate API calls, and parses its raw configuration into a structured `MachineConfig` value. | +| **Parameters** | *`mcName string` – The name of the MachineConfig resource.
* `machineConfigs map[string]MachineConfig` – Cache mapping names to already‑fetched configs. | +| **Return value** | *`MachineConfig` – Parsed machine configuration, including its raw Kubernetes object and unmarshaled spec.
* `error` – Non‑nil if any API call or JSON unmarshal fails. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – obtains shared k8s client set.
• `client.MachineCfg.MachineconfigurationV1().MachineConfigs().Get(...)` – retrieves the resource from the cluster.
• `json.Unmarshal` – decodes the raw YAML/JSON spec into Go struct.
• `fmt.Errorf` – formats error messages. | +| **Side effects** | No global state mutation; only local cache lookup and population, and network I/O via the k8s client. | +| **How it fits the package** | Used by node‑wrapping logic (`createNodes`) to enrich each node with its current MachineConfig, enabling downstream analysis of node configuration. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Cache hit?"} + B -- Yes --> C["Return cached MachineConfig"] + B -- No --> D["Retrieve MachineConfig from API"] + D --> E["Unmarshal raw config into struct"] + E --> F["Return parsed MachineConfig"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getMachineConfig --> func_GetClientsHolder + func_getMachineConfig --> func_MachineConfigs + func_getMachineConfig --> func_Unmarshal + func_getMachineConfig --> func_Errorf +``` + +#### Functions calling `getMachineConfig` (Mermaid) + +```mermaid +graph TD + func_createNodes --> func_getMachineConfig +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getMachineConfig +import ( + "fmt" +) + +func main() { + cache := map[string]MachineConfig{} + mc, err := getMachineConfig("worker-000", cache) + if err != nil { + fmt.Println("Error:", err) + return + } + fmt.Printf("Fetched MachineConfig: %+v\n", mc) +} +``` + +--- + +### getOperatorTargetNamespaces + +**getOperatorTargetNamespaces** - Queries the OpenShift Cluster‑Lifecycle‑Manager API to fetch the first `OperatorGroup` in a given namespace and returns its target namespaces. + +#### 1) Signature (Go) + +```go +func getOperatorTargetNamespaces(namespace string) ([]string, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Queries the OpenShift Cluster‑Lifecycle‑Manager API to fetch the first `OperatorGroup` in a given namespace and returns its target namespaces. | +| **Parameters** | `namespace string` – The Kubernetes namespace where OperatorGroups are listed. | +| **Return value** | `([]string, error)` – A slice of namespace names that the operator targets; an error if the request fails or no groups exist. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – obtains a shared client set.
• `client.OlmClient.OperatorsV1().OperatorGroups(namespace).List(...)` – performs the API call.
• `errors.New("no OperatorGroup found")` – constructs an error when list is empty. | +| **Side effects** | None (pure function aside from external API call). | +| **How it fits the package** | Used by `createOperators` to determine whether an operator is cluster‑wide or namespace‑specific, influencing subsequent logic for subscription handling. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Get ClientsHolder"} + B --> C["List OperatorGroups in "] + C --> D{"Check list length"} + D -- >0 --> E["Return first groups TargetNamespaces"] + D -- 0 --> F["Return error no OperatorGroup found"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_getOperatorTargetNamespaces --> func_GetClientsHolder + func_getOperatorTargetNamespaces --> func_List + func_getOperatorTargetNamespaces --> func_OperatorsV1 + func_getOperatorTargetNamespaces --> func_New_error +``` + +#### 5) Functions calling `getOperatorTargetNamespaces` (Mermaid) + +```mermaid +graph TD + func_createOperators --> func_getOperatorTargetNamespaces +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getOperatorTargetNamespaces +ns := "openshift-operators" +targetNSs, err := getOperatorTargetNamespaces(ns) +if err != nil { + fmt.Printf("Error: %v\n", err) +} else { + fmt.Printf("Target namespaces for operator group in %s: %v\n", ns, targetNSs) +} +``` + +--- + +### getPackageManifestWithSubscription + +**getPackageManifestWithSubscription** - Finds and returns the `PackageManifest` that corresponds to a specific `Subscription`. It matches on package name, catalog source namespace, and catalog source. + +#### Signature (Go) + +```go +func(*olmv1Alpha.Subscription, []*olmpkgv1.PackageManifest)(*olmpkgv1.PackageManifest) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Finds and returns the `PackageManifest` that corresponds to a specific `Subscription`. It matches on package name, catalog source namespace, and catalog source. | +| **Parameters** | `subscription *olmv1Alpha.Subscription` – The subscription whose manifest is sought.
`packageManifests []*olmpkgv1.PackageManifest` – Slice of available package manifests to search. | +| **Return value** | `*olmpkgv1.PackageManifest` – The matching manifest, or `nil` if none found. | +| **Key dependencies** | • Iterates over the slice; no external packages are invoked.
• Relies on fields from `olmv1Alpha.Subscription` and `olmpkgv1.PackageManifest`. | +| **Side effects** | None – purely functional lookup. | +| **How it fits the package** | Used by higher‑level logic to determine default channels or other metadata when a subscription is processed. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> ForEachIndex + ForEachIndex --> CheckConditions + CheckConditions -- match --> ReturnManifest + CheckConditions -- no match --> NextIndex + NextIndex --> End +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `getPackageManifestWithSubscription` (Mermaid) + +```mermaid +graph TD + func_getAtLeastOneSubscription --> func_getPackageManifestWithSubscription +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getPackageManifestWithSubscription +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + olmv1Alpha "github.com/operator-framework/api/pkg/operators/v1alpha1" + olmpkgv1 "github.com/operator-framework/api/pkg/package/versions/v1alpha1" +) + +func main() { + // Assume we have a subscription and a list of manifests + var sub *olmv1Alpha.Subscription + var manifests []*olmpkgv1.PackageManifest + + // Retrieve the matching manifest (or nil) + manifest := provider.GetPackageManifestWithSubscription(sub, manifests) + + if manifest != nil { + println("Found package:", manifest.Status.PackageName) + } else { + println("No matching package manifest found") + } +} +``` + +--- + +### getPodContainers + +**getPodContainers** - Builds a slice of `*Container` objects representing each container in the supplied Pod, enriched with status and runtime information. It optionally skips containers that match an ignore list. + +#### Signature (Go) + +```go +func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Container) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a slice of `*Container` objects representing each container in the supplied Pod, enriched with status and runtime information. It optionally skips containers that match an ignore list. | +| **Parameters** | `aPod *corev1.Pod` – the Kubernetes pod from which to extract containers.
`useIgnoreList bool` – when true, containers whose names match known ignored patterns are omitted. | +| **Return value** | `[]*Container` – a slice of container descriptors ready for further processing or testing. | +| **Key dependencies** | *`GetRuntimeUID` – extracts runtime and UID from a `ContainerStatus`.
* `buildContainerImageSource` – parses image reference and ID into a `ContainerImageIdentifier`.
*`log.Warn` – logs non‑ready or non‑running containers.
* `Container.HasIgnoredContainerName` – determines if a container should be skipped. | +| **Side effects** | Emits warning logs for containers that are not ready or not running; otherwise performs only data extraction. No state mutation outside the returned slice. | +| **How it fits the package** | Used by `NewPod` and during environment construction to populate the pod’s container list, providing a unified representation of runtime, image, status, and health for subsequent tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over Pod.Spec.Containers"} + B --> C["Find matching ContainerStatus"] + C --> D["Extract Runtime & UID via GetRuntimeUID"] + D --> E["Build Container struct with image info"] + E --> F{"Check Readiness / Running state"} + F -->|"Not Ready"| G["Warn about readiness"] + F -->|"Not Running"| H["Warn about state"] + F --> I["Check ignore list if useIgnoreList"] + I -->|"Ignored"| J["Skip container"] + I -->|"Allowed"| K["Append to containerList"] + K --> B + J --> B + G --> B + H --> B + E --> B + L["Return containerList"] --> A +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getPodContainers --> GetRuntimeUID + func_getPodContainers --> buildContainerImageSource + func_getPodContainers --> log.Warn + func_getPodContainers --> Container.HasIgnoredContainerName +``` + +#### Functions calling `getPodContainers` (Mermaid) + +```mermaid +graph TD + NewPod --> getPodContainers + buildTestEnvironment --> getPodContainers +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getPodContainers +pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: "default"}, + Spec: corev1.PodSpec{ + NodeName: "node-1", + Containers: []corev1.Container{{Name: "app", Image: "nginx:latest"}}, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "app", + Ready: true, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + ImageID: "docker://sha256:abcd1234", + RestartCount: 0, + }}, + }, +} + +containers := getPodContainers(pod, false) +for _, c := range containers { + fmt.Printf("Container %s running image %s on node %s\n", c.Container.Name, c.ContainerImageIdentifier, c.NodeName) +} +``` + +--- + +### getSummaryAllOperators + +**getSummaryAllOperators** - Builds a unique, human‑readable summary for each operator, including phase, package name, version, and namespace scope. Returns the summaries sorted alphabetically. + +#### Signature (Go) + +```go +func getSummaryAllOperators(operators []*Operator) (summary []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a unique, human‑readable summary for each operator, including phase, package name, version, and namespace scope. Returns the summaries sorted alphabetically. | +| **Parameters** | `operators []*Operator` – slice of pointers to `Operator` structs representing discovered operators. | +| **Return value** | `summary []string` – alphabetically ordered list of summary strings. | +| **Key dependencies** | • `fmt.Sprintf` (formatting)
• `sort.Strings` (sorting) | +| **Side effects** | None; operates only on its inputs and returns a new slice. | +| **How it fits the package** | Used by `buildTestEnvironment` to populate `AllOperatorsSummary`, providing a concise overview of all discovered operators for logging or reporting purposes. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over operators"} + B -->|"for each o"| C["Create key string"] + C --> D["Add to map if unique"] + D --> B + B --> E["Collect keys from map"] + E --> F["Append to summary slice"] + F --> G["Sort alphabetically"] + G --> H["Return summary"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getSummaryAllOperators --> fmt.Sprintf + func_getSummaryAllOperators --> sort.Strings +``` + +#### Functions calling `getSummaryAllOperators` (Mermaid) + +```mermaid +graph TD + buildTestEnvironment --> getSummaryAllOperators +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getSummaryAllOperators +operators := []*Operator{ + {Phase: "Install", PackageFromCsvName: "prometheus-operator", Version: "0.12.1", IsClusterWide: true}, + {Phase: "Uninstall", PackageFromCsvName: "cert-manager", Version: "v1.5.3", TargetNamespaces: []string{"kube-system"}}, +} +summary := getSummaryAllOperators(operators) +for _, s := range summary { + fmt.Println(s) +} +``` + +--- + +### getUniqueCsvListByName + +**getUniqueCsvListByName** - Filters a slice of CSV objects so that each distinct `csv.Name` appears only once, then returns the list sorted by name. + +Creates a deterministic list of ClusterServiceVersions (CSV) that contains only one entry per unique CSV name, sorted alphabetically by the CSV name. + +--- + +#### Signature (Go) + +```go +func getUniqueCsvListByName(csvs []*olmv1Alpha.ClusterServiceVersion) []*olmv1Alpha.ClusterServiceVersion +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Filters a slice of CSV objects so that each distinct `csv.Name` appears only once, then returns the list sorted by name. | +| **Parameters** | `csvs []*olmv1Alpha.ClusterServiceVersion` – input slice that may contain duplicates. | +| **Return value** | `[]*olmv1Alpha.ClusterServiceVersion` – unique, alphabetically‑sorted CSV slice. | +| **Key dependencies** | *`log.Info` from the internal logging package (for debug output).
* Built‑in `len`, `append` functions.
* `sort.Slice` from the standard library for ordering. | +| **Side effects** | None beyond emitting log messages; does not modify the input slice or any global state. | +| **How it fits the package** | Used by `createOperators` to deduplicate CSVs before building operator objects, ensuring each operator is represented once per unique name. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Build map"} + B --> C["Iterate csvs"] + C --> D["Add to uniqueCsvsMap by Name"] + D --> E["Create list from map values"] + E --> F["Log each unique CSV"] + F --> G["Sort list by Name"] + G --> H["Return sorted list"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getUniqueCsvListByName --> Logger.Info + func_getUniqueCsvListByName --> len + func_getUniqueCsvListByName --> append + func_getUniqueCsvListByName --> sort.Slice +``` + +--- + +#### Functions calling `getUniqueCsvListByName` (Mermaid) + +```mermaid +graph TD + createOperators --> getUniqueCsvListByName +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking getUniqueCsvListByName + +import ( + olmv1Alpha "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func main() { + // Sample CSVs, some with duplicate names. + csvs := []*olmv1Alpha.ClusterServiceVersion{ + {ObjectMeta: metav1.ObjectMeta{Name: "foo.v1", Namespace: "ns1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "bar.v2", Namespace: "ns2"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "foo.v1", Namespace: "ns3"}}, // duplicate name + } + + unique := getUniqueCsvListByName(csvs) + + for _, csv := range unique { + fmt.Printf("CSV: %s in namespace %s\n", csv.Name, csv.Namespace) + } +} +``` + +--- + +--- + +### isNetworkAttachmentDefinitionConfigTypeSRIOV + +**isNetworkAttachmentDefinitionConfigTypeSRIOV** - Checks whether the JSON configuration of a NetworkAttachmentDefinition contains an SR‑I/O‑V plugin. It supports both single‑plugin and multi‑plugin CNI specifications. + +#### Signature (Go) + +```go +func isNetworkAttachmentDefinitionConfigTypeSRIOV(nadConfig string) (bool, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the JSON configuration of a NetworkAttachmentDefinition contains an SR‑I/O‑V plugin. It supports both single‑plugin and multi‑plugin CNI specifications. | +| **Parameters** | `nadConfig string` – Raw JSON string from `nad.Spec.Config`. | +| **Return value** | `(bool, error)` – `true` if the config declares a `sriov` type; `false` otherwise. Returns an error on malformed JSON or unexpected structure. | +| **Key dependencies** | • `encoding/json.Unmarshal`
• `fmt.Errorf`
• `github.com/redhat‑best‑practices-for-k8s/certsuite/internal/log.Logger.Debug` (for debug logging) | +| **Side effects** | No state mutation; only logs debug messages. | +| **How it fits the package** | Used by `Pod.IsUsingSRIOV` to decide if a pod relies on SR‑I/O‑V networking, enabling subsequent compliance checks. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Parse JSON"} + B -->|"Error"| C["Return error"] + B --> D{"Is single plugin?"} + D -- Yes --> E{"Type equals sriov?"} + E -->|"Yes"| F["Return true"] + E -->|"No"| G["Return false"] + D -- No --> H{"Has plugins list?"} + H -->|"Missing"| C + H --> I["Iterate over plugins"] + I --> J{"Plugin type == sriov?"} + J -->|"Yes"| F + J -->|"No"| K["Continue loop"] + K --> L{"End of loop?"} + L -- Yes --> G + L -- No --> I +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_isNetworkAttachmentDefinitionConfigTypeSRIOV --> func_Unmarshal["encoding/json.Unmarshal"] + func_isNetworkAttachmentDefinitionConfigTypeSRIOV --> func_Errorf1["fmt.Errorf"] + func_isNetworkAttachmentDefinitionConfigTypeSRIOV --> func_Debug1["log.Logger.Debug"] + func_isNetworkAttachmentDefinitionConfigTypeSRIOV --> func_Errorf2["fmt.Errorf"] + func_isNetworkAttachmentDefinitionConfigTypeSRIOV --> func_Debug2["log.Logger.Debug"] +``` + +#### Functions calling `isNetworkAttachmentDefinitionConfigTypeSRIOV` (Mermaid) + +```mermaid +graph TD + func_Pod_IsUsingSRIOV["Pod.IsUsingSRIOV"] --> func_isNetworkAttachmentDefinitionConfigTypeSRIOV +``` + +#### Usage example (Go) + +```go +// Minimal example invoking isNetworkAttachmentDefinitionConfigTypeSRIOV +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + nadJSON := `{ + "cniVersion": "0.4.0", + "name": "sriov-network", + "type": "sriov" + }` + + isSRIOV, err := provider.isNetworkAttachmentDefinitionConfigTypeSRIOV(nadJSON) + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + fmt.Printf("Is SR‑I/O‑V: %t\n", isSRIOV) +} +``` + +--- + +### isNetworkAttachmentDefinitionSRIOVConfigMTUSet + +**isNetworkAttachmentDefinitionSRIOVConfigMTUSet** - Parses a CNI configuration JSON and checks whether any SR‑I/O V plugin declares an MTU value greater than zero. + +#### Signature (Go) + +```go +func isNetworkAttachmentDefinitionSRIOVConfigMTUSet(nadConfig string) (bool, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses a CNI configuration JSON and checks whether any SR‑I/O V plugin declares an MTU value greater than zero. | +| **Parameters** | `nadConfig` – JSON string representing the Network Attachment Definition. | +| **Return value** | *bool* – `true` if an SR‑I/O V plugin with a positive MTU is present; otherwise `false`.
*error* – non‑nil if the input cannot be parsed or the configuration structure is invalid. | +| **Key dependencies** | • `encoding/json.Unmarshal` – decodes the JSON.
• `fmt.Errorf` – creates error messages.
• `log.Debug` – logs plugin details for debugging. | +| **Side effects** | None that modify external state; only logging occurs. | +| **How it fits the package** | Used internally by the provider package to validate Network Attachment Definitions before applying network policies or configuration changes. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Parse JSON"} + B -- success --> C{"Check Plugins"} + B -- failure --> D["Return error"] + C -- SR‑I/O V MTU >0 --> E["Return true, nil"] + C -- otherwise --> F["Continue loop / no match"] + F --> G["End with false, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_isNetworkAttachmentDefinitionSRIOVConfigMTUSet --> json_Unmarshal + func_isNetworkAttachmentDefinitionSRIOVConfigMTUSet --> fmt_Errorf + func_isNetworkAttachmentDefinitionSRIOVConfigMTUSet --> log_Debug +``` + +#### Functions calling `isNetworkAttachmentDefinitionSRIOVConfigMTUSet` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking isNetworkAttachmentDefinitionSRIOVConfigMTUSet +package main + +import ( + "fmt" +) + +func main() { + nadJSON := `{ + "cniVersion": "0.4.0", + "name": "vlan-100", + "plugins": [ + { + "type": "sriov", + "master": "ext0", + "mtu": 1500, + "vlanId": 100, + "linkInContainer": true, + "ipam": {"type":"whereabouts","ipRanges":[{"range":"1.1.1.0/24"}]} + } + ] + }` + hasMTU, err := isNetworkAttachmentDefinitionSRIOVConfigMTUSet(nadJSON) + if err != nil { + fmt.Println("Error:", err) + return + } + fmt.Printf("SR‑I/O V MTU set? %v\n", hasMTU) +} +``` + +--- + +### isSkipHelmChart + +**isSkipHelmChart** - Checks whether a Helm chart identified by `helmName` appears in the provided list of charts to skip (`skipHelmChartList`). If found, logs the event and returns `true`; otherwise returns `false`. + +#### Signature (Go) + +```go +func isSkipHelmChart(helmName string, skipHelmChartList []configuration.SkipHelmChartList) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether a Helm chart identified by `helmName` appears in the provided list of charts to skip (`skipHelmChartList`). If found, logs the event and returns `true`; otherwise returns `false`. | +| **Parameters** | `helmName string` – name of the Helm release.
`skipHelmChartList []configuration.SkipHelmChartList` – slice containing names of charts that should be ignored. | +| **Return value** | `bool` – `true` if the chart is in the skip list, otherwise `false`. | +| **Key dependencies** | • `len` (builtin) to check empty slice.
• `log.Info` from `github.com/redhat-best-practices-for-k8s/certsuite/internal/log` for debugging output. | +| **Side effects** | Emits an informational log message when a chart is skipped; otherwise no observable side‑effects. No state mutations or I/O beyond logging. | +| **How it fits the package** | Used by `buildTestEnvironment` to filter out Helm releases that should not be considered during test environment construction. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check if skip list is empty"] -->|"yes"| B["Return false"] + A -->|"no"| C["Iterate over skipHelmChartList"] + C --> D{"Match helmName?"} + D -- yes --> E["Log skip event"] --> F["Return true"] + D -- no --> G["Continue loop"] + G --> D + F --> H["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_isSkipHelmChart --> builtin_len + func_isSkipHelmChart --> func_log_Info +``` + +#### Functions calling `isSkipHelmChart` (Mermaid) + +```mermaid +graph TD + func_buildTestEnvironment --> func_isSkipHelmChart +``` + +#### Usage example (Go) + +```go +// Minimal example invoking isSkipHelmChart +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/configuration" +) + +func main() { + skipList := []configuration.SkipHelmChartList{ + {Name: "example-chart"}, + } + + shouldSkip := provider.isSkipHelmChart("example-chart", skipList) + fmt.Printf("Should skip? %v\n", shouldSkip) // Output: Should skip? true +} +``` + +--- + +### searchPodInSlice + +**searchPodInSlice** - Returns the first `*Pod` from `pods` that matches the supplied `name` and `namespace`. If none match, returns `nil`. + +#### Signature (Go) + +```go +func searchPodInSlice(name, namespace string, pods []*Pod) *Pod +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the first `*Pod` from `pods` that matches the supplied `name` and `namespace`. If none match, returns `nil`. | +| **Parameters** |
  • `name` (string) – Pod name to locate.
  • `namespace` (string) – Namespace of the pod.
  • `pods` ([]*Pod) – Collection of pods to search.
| +| **Return value** | `*Pod` – Matching pod or `nil` if not found. | +| **Key dependencies** |
  • Builds a map keyed by `types.NamespacedName` (from `k8s.io/apimachinery/pkg/types`).
  • Uses the `Name` and `Namespace` fields of each `*Pod`.
| +| **Side effects** | None. Function is pure; it only reads from inputs and returns a value. | +| **How it fits the package** | Used by helper functions that merge operator and operand pod lists into a test environment, ensuring no duplicate pods are added. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Build map"} + B --> C["Iterate over pods"] + C --> D["Add to map with key = namespace/name"] + D --> E["End of loop"] + E --> F["Create search key"] + F --> G{"Key exists?"} + G -- Yes --> H["Return pod"] + G -- No --> I["Return nil"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_searchPodInSlice --> None +``` + +#### Functions calling `searchPodInSlice` (Mermaid) + +```mermaid +graph TD + func_addOperandPodsToTestPods --> func_searchPodInSlice + func_addOperatorPodsToTestPods --> func_searchPodInSlice +``` + +#### Usage example (Go) + +```go +// Minimal example invoking searchPodInSlice +pods := []*Pod{ + {Name: "frontend", Namespace: "prod"}, + {Name: "backend", Namespace: "prod"}, +} + +found := searchPodInSlice("frontend", "prod", pods) +if found != nil { + fmt.Printf("Found pod %s/%s\n", found.Namespace, found.Name) +} else { + fmt.Println("Pod not found") +} +``` + +--- + +### sriovNetworkUsesMTU + +**sriovNetworkUsesMTU** - Determines whether a SriovNetwork identified by `nadName` has an MTU value defined in any corresponding SriovNetworkNodePolicy. + +#### Signature (Go) + +```go +func sriovNetworkUsesMTU(sriovNetworks, sriovNetworkNodePolicies []unstructured.Unstructured, nadName string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether a SriovNetwork identified by `nadName` has an MTU value defined in any corresponding SriovNetworkNodePolicy. | +| **Parameters** | `sriovNetworks []unstructured.Unstructured` – list of SriovNetwork resources.
`sriovNetworkNodePolicies []unstructured.Unstructured` – list of SriovNetworkNodePolicy resources.
`nadName string` – name of the NetworkAttachmentDefinition to match. | +| **Return value** | `bool` – `true` if an MTU is found for the matching network; otherwise `false`. | +| **Key dependencies** | • `unstructured.NestedMap`, `unstructured.NestedString`, `unstructured.NestedInt64` (accessing fields)
• `log.Debug` from the internal logging package
• `GetName`, `GetNamespace` methods of `unstructured.Unstructured` | +| **Side effects** | Only logs debug information; no mutation of inputs or external state. | +| **How it fits the package** | Used by `Pod.IsUsingSRIOVWithMTU` to decide if a pod’s SR‑I/O‑V network attachment has an MTU configured via CRDs. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["sriovNetworks"] --> B["Iterate over SriovNetwork"] + B --> C{"Match nadName?"} + C -- yes --> D["Retrieve spec.resourceName"] + D --> E["Check node policies in same namespace"] + E --> F{"resourceName matches?"} + F -- yes --> G["Get mtu from policySpec"] + G --> H{"mtu found & >0?"} + H -- true --> I["Return true"] + I --> J["End"] + H -- false --> K["Continue loop"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_sriovNetworkUsesMTU --> func_GetName + func_sriovNetworkUsesMTU --> func_NestedMap + func_sriovNetworkUsesMTU --> func_NestedString + func_sriovNetworkUsesMTU --> func_NestedInt64 + func_sriovNetworkUsesMTU --> Logger.Debug +``` + +#### Functions calling `sriovNetworkUsesMTU` (Mermaid) + +```mermaid +graph TD + func_Pod.IsUsingSRIOVWithMTU --> func_sriovNetworkUsesMTU +``` + +#### Usage example (Go) + +```go +// Minimal example invoking sriovNetworkUsesMTU +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func main() { + // Assume we already have slices of unstructured objects for SriovNetworks and NodePolicies. + var sriovNetworks []unstructured.Unstructured + var nodePolicies []unstructured.Unstructured + nadName := "example-nad" + + usesMTU := sriovNetworkUsesMTU(sriovNetworks, nodePolicies, nadName) + fmt.Printf("SriovNetwork %q uses MTU: %v\n", nadName, usesMTU) +} +``` + +--- + +### updateCrUnderTest + +**updateCrUnderTest** - Transforms a slice of `autodiscover.ScaleObject` into the package’s own `ScaleObject` representation, preserving scaling data and resource schema. + +#### Signature (Go) + +```go +func updateCrUnderTest(scaleCrUnderTest []autodiscover.ScaleObject) []ScaleObject +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Transforms a slice of `autodiscover.ScaleObject` into the package’s own `ScaleObject` representation, preserving scaling data and resource schema. | +| **Parameters** | `scaleCrUnderTest []autodiscover.ScaleObject` – collection obtained from autodiscovery. | +| **Return value** | `[]ScaleObject` – transformed slice ready for use in test environment construction. | +| **Key dependencies** | • `append` (built‑in)
• `autodiscover.ScaleObject` type
• local `ScaleObject`, `CrScale` types | +| **Side effects** | None; pure function. No mutation of input slice, no I/O or concurrency. | +| **How it fits the package** | Called by `buildTestEnvironment` to populate `env.ScaleCrUnderTest` after autodiscovery has produced raw scale objects. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"for each index i in scaleCrUnderTest"} + B --> C["Create a new ScaleObject"] + C --> D["Append to temp slice"] + D --> E{"continue loop?"} + E -- Yes --> B + E -- No --> F["Return temp slice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_updateCrUnderTest --> func_append +``` + +#### Functions calling `updateCrUnderTest` (Mermaid) + +```mermaid +graph TD + func_buildTestEnvironment --> func_updateCrUnderTest +``` + +#### Usage example (Go) + +```go +// Minimal example invoking updateCrUnderTest +import "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider/autodiscover" + +func main() { + raw := []autodiscover.ScaleObject{ + {Scale: autodiscover.CrScale{Min: 1, Max: 5}, GroupResourceSchema: "apps/v1/Deployment"}, + } + internal := provider.updateCrUnderTest(raw) + fmt.Printf("%+v\n", internal) // prints transformed ScaleObjects +} +``` + +--- diff --git a/docs/pkg/scheduling/scheduling.md b/docs/pkg/scheduling/scheduling.md new file mode 100644 index 000000000..50dbe9068 --- /dev/null +++ b/docs/pkg/scheduling/scheduling.md @@ -0,0 +1,350 @@ +# Package scheduling + +**Path**: `pkg/scheduling` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [GetProcessCPUScheduling](#getprocesscpuscheduling) + - [PolicyIsRT](#policyisrt) + - [ProcessPidsCPUScheduling](#processpidscpuscheduling) +- [Local Functions](#local-functions) + - [parseSchedulingPolicyAndPriority](#parseschedulingpolicyandpriority) + +## Overview + +The scheduling package provides utilities for inspecting and validating CPU scheduling policies of processes running inside Kubernetes containers. It retrieves policy information via `chrt -p ` executed in probe pods, parses the output, and exposes helpers to check real‑time compliance. + +### Key Features + +- Retrieves scheduling policy and priority for any process ID within a container using remote command execution +- Parses raw `chrt` output into structured values and determines if policies are real‑time +- Aggregates per‑process compliance results into report objects for test frameworks + +### Design Notes + +- Uses function variables (GetProcessCPUSchedulingFn, CrcClientExecCommandContainerNSEnter) to allow dependency injection in tests +- Parsing logic expects a specific `chrt` output format; any deviation leads to an error and is logged +- Compliance checks are performed per‑process and results split into compliant/non‑compliant slices + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GetProcessCPUScheduling(pid int, testContainer *provider.Container)(string,int,error)](#getprocesscpuscheduling) | Executes `chrt -p ` inside the node’s probe pod to read a process’s scheduling policy and priority. | +| [func PolicyIsRT(schedPolicy string) bool](#policyisrt) | Checks whether the provided scheduling policy name corresponds to one of the real‑time policies (`SCHED_FIFO` or `SCHED_RR`). | +| [func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *provider.Container, check string, logger *log.Logger) ([]*testhelper.ReportObject, []*testhelper.ReportObject)](#processpidscpuscheduling) | Determines whether each process in a container satisfies the CPU scheduling policy specified by `check`. Returns two slices of report objects: compliant and non‑compliant. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func parseSchedulingPolicyAndPriority(chrtCommandOutput string) (schedPolicy string, schedPriority int, err error)](#parseschedulingpolicyandpriority) | Extracts the CPU scheduling policy and priority from the text produced by `chrt -p `; returns them or an error if parsing fails. | + +## Exported Functions + +### GetProcessCPUScheduling + +**GetProcessCPUScheduling** - Executes `chrt -p ` inside the node’s probe pod to read a process’s scheduling policy and priority. + +#### 1) Signature (Go) + +```go +func GetProcessCPUScheduling(pid int, testContainer *provider.Container)(string,int,error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes `chrt -p ` inside the node’s probe pod to read a process’s scheduling policy and priority. | +| **Parameters** | `pid int` – target process ID; `` – container metadata used to locate the probe pod. | +| **Return value** | `schedulePolicy string`, `schedulePriority int`, `err error`. On success, the policy (e.g., `"SCHED_OTHER"`) and priority are returned; on failure an error is provided and priority defaults to `InvalidPriority`. | +| **Key dependencies** | - `log.Info` for tracing
- `fmt.Sprintf` for command construction
- `provider.GetTestEnvironment` to obtain test env
- `crclient.GetNodeProbePodContext` to get probe pod context
- `clientsholder.GetClientsHolder` and `ExecCommandContainer` to run the command
- `parseSchedulingPolicyAndPriority` to parse the output | +| **Side effects** | No mutation of global state; only logs and executes a shell command in the cluster. | +| **How it fits the package** | Central utility for performance tests that need to verify process scheduling constraints within containers. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Log start info"] + B --> C["Build command `chrt -p `"] + C --> D["Get test env"] + D --> E["Probe pod context via node & container"] + E --> F["Get client holder"] + F --> G["Execute command in probe pod"] + G --> H{"Command succeeded?"} + H -- Yes --> I["Parse scheduling policy/priority"] + I --> J{"Parsing succeeded?"} + J -- Yes --> K["Log success"] + K --> L["Return values"] + H -- No --> M["Return error (stderr or exec fail)"] + J -- No --> N["Return error (parse fail)"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetProcessCPUScheduling --> func_logger.Info + func_GetProcessCPUScheduling --> fmt.Sprintf + func_GetProcessCPUScheduling --> provider.GetTestEnvironment + func_GetProcessCPUScheduling --> crclient.GetNodeProbePodContext + func_GetProcessCPUScheduling --> clientsholder.GetClientsHolder + func_GetProcessCPUScheduling --> clientsholder.ExecCommandContainer + func_GetProcessCPUScheduling --> parseSchedulingPolicyAndPriority +``` + +#### 5) Functions calling `GetProcessCPUScheduling` (Mermaid) + +```mermaid +graph TD + testRtAppsNoExecProbes --> GetProcessCPUScheduling +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking GetProcessCPUScheduling +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func demo() { + // Assume we have a container object and a PID to inspect + var cont *provider.Container // populated elsewhere + pid := 1234 + + policy, priority, err := scheduling.GetProcessCPUScheduling(pid, cont) + if err != nil { + fmt.Printf("Failed: %v\n", err) + return + } + fmt.Printf("PID %d has policy %s with priority %d\n", pid, policy, priority) +} +``` + +--- + +### PolicyIsRT + +**PolicyIsRT** - Checks whether the provided scheduling policy name corresponds to one of the real‑time policies (`SCHED_FIFO` or `SCHED_RR`). + +#### Signature (Go) + +```go +func PolicyIsRT(schedPolicy string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the provided scheduling policy name corresponds to one of the real‑time policies (`SCHED_FIFO` or `SCHED_RR`). | +| **Parameters** | `schedPolicy string – The name of the scheduling policy to test. | +| **Return value** | `bool – true if the policy is either FIFO or Round‑Robin, otherwise false. | +| **Key dependencies** | *Uses constants* `SchedulingFirstInFirstOut` and `SchedulingRoundRobin`. | +| **Side effects** | None. Pure function with no state mutation or I/O. | +| **How it fits the package** | Provides a simple predicate used by tests to determine if a container process runs under a real‑time scheduling policy. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + PolicyIsRT --> compareFIFOOrRR +``` + +*`compareFIFOOrRR` represents the conditional check against the two constants.* + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `PolicyIsRT` (Mermaid) + +```mermaid +graph TD + testRtAppsNoExecProbes --> PolicyIsRT +``` + +The test function `testRtAppsNoExecProbes` from the performance tests calls `PolicyIsRT` to verify that processes with exec probes are not running under a real‑time scheduling policy. + +#### Usage example (Go) + +```go +// Minimal example invoking PolicyIsRT +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/scheduling" +) + +func main() { + policy := "SCHED_FIFO" // Example policy name + if scheduling.PolicyIsRT(policy) { + fmt.Printf("%s is a real‑time scheduling policy.\n", policy) + } else { + fmt.Printf("%s is NOT a real‑time scheduling policy.\n", policy) + } +} +``` + +--- + +--- + +### ProcessPidsCPUScheduling + +**ProcessPidsCPUScheduling** - Determines whether each process in a container satisfies the CPU scheduling policy specified by `check`. Returns two slices of report objects: compliant and non‑compliant. + +#### Signature (Go) + +```go +func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *provider.Container, check string, logger *log.Logger) ([]*testhelper.ReportObject, []*testhelper.ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether each process in a container satisfies the CPU scheduling policy specified by `check`. Returns two slices of report objects: compliant and non‑compliant. | +| **Parameters** | - `processes []*crclient.Process` – list of processes to evaluate.
- `testContainer *provider.Container` – container whose processes are checked.
- `check string` – policy type (`SharedCPUScheduling`, `ExclusiveCPUScheduling`, or `IsolatedCPUScheduling`).
- `logger *log.Logger` – logger for debug/error messages. | +| **Return value** | Two slices of `*testhelper.ReportObject`: first compliant processes, second non‑compliant. | +| **Key dependencies** | - `GetProcessCPUSchedulingFn(process.Pid, testContainer)`
- `SetContainerProcessValues(schedulePolicy, fmt.Sprint(schedulePriority), process.Args)`
- `testhelper.NewContainerReportObject(...)` | +| **Side effects** | Generates log output; creates report objects but does not modify input data or container state. | +| **How it fits the package** | Central routine used by tests to enforce CPU scheduling rules on containers’ processes, feeding results into performance checks. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over processes"} + B --> C["Get schedule policy & priority"] + C --> D{"Check passes?"} + D -- No --> E["Log error, create non‑compliant report"] + D -- Yes --> F["Log success, create compliant report"] + E --> G["Append to nonCompliantContainerPids"] + F --> H["Append to compliantContainerPids"] + G & H --> I["Continue loop"] + I --> B + B --> J["Return slices"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ProcessPidsCPUScheduling --> func_GetProcessCPUSchedulingFn + func_ProcessPidsCPUScheduling --> func_SetContainerProcessValues + func_ProcessPidsCPUScheduling --> testhelper_NewContainerReportObject + func_ProcessPidsCPUScheduling --> fmt_Sprint +``` + +#### Functions calling `ProcessPidsCPUScheduling` (Mermaid) + +```mermaid +graph TD + testSchedulingPolicyInCPUPool --> func_ProcessPidsCPUScheduling +``` + +#### Usage example (Go) + +```go +// Minimal example invoking ProcessPidsCPUScheduling +import ( + "log" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/crclient" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume we already have a container and its processes. + var container *provider.Container + var processes []*crclient.Process + + logger := log.Default() + compliant, nonCompliant := ProcessPidsCPUScheduling(processes, container, SharedCPUScheduling, logger) + + fmt.Printf("Compliant: %d, Non‑compliant: %d\n", len(compliant), len(nonCompliant)) +} +``` + +--- + +## Local Functions + +### parseSchedulingPolicyAndPriority + +**parseSchedulingPolicyAndPriority** - Extracts the CPU scheduling policy and priority from the text produced by `chrt -p `; returns them or an error if parsing fails. + +```go +func parseSchedulingPolicyAndPriority(chrtCommandOutput string) (schedPolicy string, schedPriority int, err error) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Extracts the CPU scheduling policy and priority from the text produced by `chrt -p `; returns them or an error if parsing fails. | +| **Parameters** | `chrtCommandOutput string` – raw stdout of the command, typically containing two lines such as:
“pid 476's current scheduling policy: SCHED_OTHER”
“pid 476's current scheduling priority: 0” | +| **Return value** | `` – parsed values or an error. `InvalidPriority` is returned if conversion fails. | +| **Key dependencies** | • `strings.Split`, `strings.Fields`, `strings.Contains`
• `len` (builtin)
• `strconv.Atoi`
• `log.Error` from the internal logging package
• `fmt.Errorf` | +| **Side effects** | Logs an error message via `log.Error` when `Atoi` fails. No other state changes. | +| **How it fits the package** | Helper for `GetProcessCPUScheduling`, turning command output into structured data that callers use to verify scheduling policies on test containers. | + +```mermaid +flowchart TD + subgraph ParseLines["Parse each line"] + A["Split output by newline"] --> B{"Line empty?"} + B -- Yes --> C["Skip line"] + B -- No --> D["Tokenize with Fields"] + D --> E["Take last token"] + E --> F{"Matches policy text?"} + F -- Yes --> G["Set schedPolicy"] + F -- No --> H{"Matches priority text?"} + H -- Yes --> I["Convert to int (Atoi)"] + I --> J{"Error?"} + J -- Yes --> K["Log error, return InvalidPriority"] + J -- No --> L["Set schedPriority"] + H -- No --> M["Return invalid line error"] + end + subgraph Return["Finish"] + G & L --> N["Return parsed values"] + end +``` + +```mermaid +graph TD + func_parseSchedulingPolicyAndPriority --> strings_Split + func_parseSchedulingPolicyAndPriority --> strings_Fields + func_parseSchedulingPolicyAndPriority --> len + func_parseSchedulingPolicyAndPriority --> strings_Contains + func_parseSchedulingPolicyAndPriority --> strconv_Atoi + func_parseSchedulingPolicyAndPriority --> log_Error + func_parseSchedulingPolicyAndPriority --> fmt_Errf +``` + +```mermaid +graph TD + func_GetProcessCPUScheduling --> func_parseSchedulingPolicyAndPriority +``` + +#### Usage example (Go) + +```go +// Minimal example invoking parseSchedulingPolicyAndPriority +output := "pid 476's current scheduling policy: SCHED_OTHER\n" + + "pid 476's current scheduling priority: 0" + +policy, priority, err := parseSchedulingPolicyAndPriority(output) +if err != nil { + fmt.Println("Error:", err) +} else { + fmt.Printf("Policy: %s, Priority: %d\n", policy, priority) +} +``` + +--- diff --git a/docs/pkg/stringhelper/stringhelper.md b/docs/pkg/stringhelper/stringhelper.md new file mode 100644 index 000000000..796fbbbb6 --- /dev/null +++ b/docs/pkg/stringhelper/stringhelper.md @@ -0,0 +1,421 @@ +# Package stringhelper + +**Path**: `pkg/stringhelper` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [HasAtLeastOneCommonElement](#hasatleastonecommonelement) + - [PointerToString](#pointertostring) + - [RemoveEmptyStrings](#removeemptystrings) + - [StringInSlice](#stringinslice) + - [SubSlice](#subslice) + +## Overview + +The stringhelper package supplies small utilities for working with strings and string slices—checking membership, filtering empty values, and handling pointer-to-string conversions for logging. + +### Key Features + +- Determines whether a value exists in a slice (StringInSlice) and supports optional containment checks. +- Removes empty entries from a string slice (RemoveEmptyStrings). +- Generates human‑readable text of any typed pointer (PointerToString). + +### Design Notes + +- Uses Go generics for StringInSlice to accept any comparable type, assuming the caller trims spaces if needed. +- The trimming logic only removes leading/trailing whitespace; it does not normalize internal spacing. +- Functions are designed as pure helpers; they should be called directly rather than wrapped in a struct. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func HasAtLeastOneCommonElement(s1, s2 []string) bool](#hasatleastonecommonelement) | Returns `true` if any string in `s2` is also present in `s1`; otherwise returns `false`. | +| [func PointerToString[T any](p *T) string](#pointertostring) | Provides a default textual form of a value pointed to by `p`, used mainly in log traces for Kubernetes resource pointer fields. | +| [func RemoveEmptyStrings(s []string) []string](#removeemptystrings) | Returns a new slice containing only non‑empty strings from the input slice. | +| [func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool](#stringinslice) | Determines if `str` exists in slice `s`. If `containsCheck` is `false`, the comparison is exact after trimming spaces; otherwise it checks for containment of `str` within each element. | +| [func SubSlice(s, sub []string) bool](#subslice) | Determines whether every string in `sub` appears in the slice `s`. | + +## Exported Functions + +### HasAtLeastOneCommonElement + +**HasAtLeastOneCommonElement** - Returns `true` if any string in `s2` is also present in `s1`; otherwise returns `false`. + +Checks whether two string slices share at least one identical element. + +#### Signature (Go) + +```go +func HasAtLeastOneCommonElement(s1, s2 []string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` if any string in `s2` is also present in `s1`; otherwise returns `false`. | +| **Parameters** | `s1 []string – first slice to search; s2 []string – second slice whose elements are checked against the first.` | +| **Return value** | `bool – true when at least one common element exists, false otherwise.` | +| **Key dependencies** | • Calls `StringInSlice` from the same package. | +| **Side effects** | None; purely functional and side‑effect free. | +| **How it fits the package** | Provides a simple set intersection test used by other helpers in `stringhelper`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph HasAtLeastOneCommonElement + A["Iterate over s2"] --> B{"Check each v"} + B -->|"StringInSlice(s1, v, false)"| C["Return true"] + B --> D["Continue loop"] + D --> A + C --> E["End"] + end +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_HasAtLeastOneCommonElement --> func_StringInSlice +``` + +#### Functions calling `HasAtLeastOneCommonElement` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper" +) + +func main() { + a := []string{"apple", "banana", "cherry"} + b := []string{"durian", "banana", "fig"} + + if stringhelper.HasAtLeastOneCommonElement(a, b) { + fmt.Println("The slices share at least one element.") + } else { + fmt.Println("No common elements found.") + } +} +``` + +--- + +### PointerToString + +**PointerToString** - Provides a default textual form of a value pointed to by `p`, used mainly in log traces for Kubernetes resource pointer fields. + +A generic helper that converts a pointer value into its string representation, returning `"nil"` when the pointer is nil. + +#### Signature (Go) + +```go +func PointerToString[T any](p *T) string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides a default textual form of a value pointed to by `p`, used mainly in log traces for Kubernetes resource pointer fields. | +| **Parameters** | `p *T` – any type pointer whose underlying value should be stringified. | +| **Return value** | `string` – `"nil"` if `p` is nil; otherwise the result of `fmt.Sprint(*p)`. | +| **Key dependencies** | • `fmt.Sprint` from the standard library. | +| **Side effects** | None – purely functional, no state mutation or I/O. | +| **How it fits the package** | Part of the `stringhelper` utility package; facilitates readable logging across the project where pointer values are common (e.g., security contexts). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Check if p is nil"] --> B{"Is nil?"} + B -- Yes --> C["Return \nil\"] + B -- No --> D["Dereference *p"] + D --> E["Call fmt.Sprint(*p)"] + E --> F["Return string result"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_PointerToString --> func_Sprint +``` + +#### Functions calling `PointerToString` + +```mermaid +graph TD + func_IsContainerRunAsNonRoot --> func_PointerToString + func_IsContainerRunAsNonRootUserID --> func_PointerToString +``` + +#### Usage example (Go) + +```go +// Minimal example invoking PointerToString +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper" +) + +func main() { + var b *bool + fmt.Println(stringhelper.PointerToString(b)) // "nil" + + bTrue := true + fmt.Println(stringhelper.PointerToString(&bTrue)) // "true" + + num := 1984 + fmt.Println(stringhelper.PointerToString(&num)) // "1984" +} +``` + +--- + +### RemoveEmptyStrings + +**RemoveEmptyStrings** - Returns a new slice containing only non‑empty strings from the input slice. + +```go +func RemoveEmptyStrings(s []string) []string +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a new slice containing only non‑empty strings from the input slice. | +| **Parameters** | `s []string` – Slice of strings to filter. | +| **Return value** | `[]string` – New slice with all empty (`""`) entries removed. | +| **Key dependencies** | • Built‑in `append` function. | +| **Side effects** | None; operates purely on the input data and returns a new slice. | +| **How it fits the package** | Utility helper for normalizing string slices, used internally wherever clean lists of strings are required. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over s"} + B --> C["Check if str !="] + C -->|"True"| D["Append to result"] + C -->|"False"| E["Skip"] + D --> F["Continue loop"] + E --> F + F --> G{"End of slice?"} + G -->|"Yes"| H["Return result r"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_RemoveEmptyStrings --> func_append +``` + +#### Functions calling `RemoveEmptyStrings` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example + +```go +// Minimal example invoking RemoveEmptyStrings +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper" +) + +func main() { + input := []string{"foo", "", "bar", "", "baz"} + cleaned := stringhelper.RemoveEmptyStrings(input) + fmt.Println(cleaned) // Output: [foo bar baz] +} +``` + +--- + +### StringInSlice + +**StringInSlice** - Determines if `str` exists in slice `s`. If `containsCheck` is `false`, the comparison is exact after trimming spaces; otherwise it checks for containment of `str` within each element. + +Checks whether a given value is present in a slice of comparable string types, optionally performing substring matching. + +--- + +#### Signature (Go) + +```go +func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if `str` exists in slice `s`. If `containsCheck` is `false`, the comparison is exact after trimming spaces; otherwise it checks for containment of `str` within each element. | +| **Parameters** | `s []T` – slice to search
`str T` – value or substring to find
`containsCheck bool` – flag selecting exact vs. contains match | +| **Return value** | `bool` – `true` if a matching element is found, otherwise `false` | +| **Key dependencies** | • `strings.TrimSpace(string(v))`
• `strings.Contains(strings.TrimSpace(string(v)), string(str))`
• implicit conversion to `string` | +| **Side effects** | None – purely functional; no mutation or I/O. | +| **How it fits the package** | Provides a generic helper for slice membership checks used across many other utilities in `pkg/stringhelper`. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"containsCheck?"} + B -- false --> C["Trim v, compare == str"] + B -- true --> D["Trim v, check Contains(v, str)"] + C --> E["Return true if match"] + D --> E + E --> F["End"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_StringInSlice --> strings.TrimSpace + func_StringInSlice --> string + func_StringInSlice --> strings.Contains +``` + +--- + +#### Functions calling `StringInSlice` (Mermaid) + +```mermaid +graph TD + func_getServices --> func_StringInSlice + func_isIstioServiceMeshInstalled --> func_StringInSlice + func_GetTestSuites --> func_StringInSlice + func_BetaRHCOSVersionsFoundToMatch --> func_StringInSlice + func_Node.IsControlPlaneNode --> func_StringInSlice + func_Node.IsWorkerNode --> func_StringInSlice + func_HasAtLeastOneCommonElement --> func_StringInSlice + func_SubSlice --> func_StringInSlice + func_isContainerCapabilitySet --> func_StringInSlice + func_testSysPtraceCapability --> func_StringInSlice + func_TestCrsNamespaces --> func_StringInSlice + func_checkContainCategory --> func_StringInSlice + func_isMultiNamespacedOperator --> func_StringInSlice + func_testOperatorCatalogSourceBundleCount --> func_StringInSlice + func_FsDiff.intersectTargetFolders --> func_StringInSlice +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking StringInSlice +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper" +) + +func main() { + names := []string{"alpha", "beta", "gamma"} + fmt.Println(stringhelper.StringInSlice(names, "beta", false)) // true + fmt.Println(stringhelper.StringInSlice(names, "b", true)) // true (contains) + fmt.Println(stringhelper.StringInSlice(names, "delta", false)) // false +} +``` + +--- + +### SubSlice + +**SubSlice** - Determines whether every string in `sub` appears in the slice `s`. + +#### 1) Signature (Go) + +```go +func SubSlice(s, sub []string) bool +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether every string in `sub` appears in the slice `s`. | +| **Parameters** | *`s []string`* – candidate slice.
*`sub []string`* – slice whose elements must be present in `s`. | +| **Return value** | `bool` – `true` if all elements of `sub` are found in `s`; otherwise `false`. | +| **Key dependencies** | • Calls generic helper `StringInSlice[T]` (defined in the same package). | +| **Side effects** | None. The function performs only read‑only checks. | +| **How it fits the package** | Utility for set‑like containment tests, used by other components such as capability validation logic. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A([Iterate over each element v in sub]) + B["StringInSlice"] + C{"check result"} + D([Continue loop]) --> A + E([Return false]) --> F([After loop: Return true]) + A --> B --> C + C --true--> D + C --false--> E +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_SubSlice --> func_StringInSlice["T ~string"] +``` + +#### 5) Functions calling `SubSlice` (Mermaid) + +```mermaid +graph TD + func_updateCapabilitiesFromContainer --> func_SubSlice +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking SubSlice +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper" +) + +func main() { + s := []string{"read", "write", "execute"} + sub := []string{"read", "execute"} + + if stringhelper.SubSlice(s, sub) { + fmt.Println("All elements of sub are present in s") + } else { + fmt.Println("Not all elements of sub are present in s") + } +} +``` + +--- diff --git a/docs/pkg/testhelper/testhelper.md b/docs/pkg/testhelper/testhelper.md new file mode 100644 index 000000000..cfe2ac2b1 --- /dev/null +++ b/docs/pkg/testhelper/testhelper.md @@ -0,0 +1,3834 @@ +# Package testhelper + +**Path**: `pkg/testhelper` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [FailureReasonOut](#failurereasonout) + - [ReportObject](#reportobject) +- [Exported Functions](#exported-functions) + - [Equal](#equal) + - [FailureReasonOut.Equal](#failurereasonout.equal) + - [FailureReasonOutTestString](#failurereasonoutteststring) + - [GetDaemonSetFailedToSpawnSkipFn](#getdaemonsetfailedtospawnskipfn) + - [GetNoAffinityRequiredPodsSkipFn](#getnoaffinityrequiredpodsskipfn) + - [GetNoBareMetalNodesSkipFn](#getnobaremetalnodesskipfn) + - [GetNoCPUPinningPodsSkipFn](#getnocpupinningpodsskipfn) + - [GetNoCatalogSourcesSkipFn](#getnocatalogsourcesskipfn) + - [GetNoContainersUnderTestSkipFn](#getnocontainersundertestskipfn) + - [GetNoCrdsUnderTestSkipFn](#getnocrdsundertestskipfn) + - [GetNoDeploymentsUnderTestSkipFn](#getnodeploymentsundertestskipfn) + - [GetNoGuaranteedPodsWithExclusiveCPUsSkipFn](#getnoguaranteedpodswithexclusivecpusskipfn) + - [GetNoHugepagesPodsSkipFn](#getnohugepagespodsskipfn) + - [GetNoIstioSkipFn](#getnoistioskipfn) + - [GetNoNamespacesSkipFn](#getnonamespacesskipfn) + - [GetNoNodesWithRealtimeKernelSkipFn](#getnonodeswithrealtimekernelskipfn) + - [GetNoOperatorCrdsSkipFn](#getnooperatorcrdsskipfn) + - [GetNoOperatorPodsSkipFn](#getnooperatorpodsskipfn) + - [GetNoOperatorsSkipFn](#getnooperatorsskipfn) + - [GetNoPersistentVolumeClaimsSkipFn](#getnopersistentvolumeclaimsskipfn) + - [GetNoPersistentVolumesSkipFn](#getnopersistentvolumesskipfn) + - [GetNoPodsUnderTestSkipFn](#getnopodsundertestskipfn) + - [GetNoRolesSkipFn](#getnorolesskipfn) + - [GetNoSRIOVPodsSkipFn](#getnosriovpodsskipfn) + - [GetNoServicesUnderTestSkipFn](#getnoservicesundertestskipfn) + - [GetNoStatefulSetsUnderTestSkipFn](#getnostatefulsetsundertestskipfn) + - [GetNoStorageClassesSkipFn](#getnostorageclassesskipfn) + - [GetNonOCPClusterSkipFn](#getnonocpclusterskipfn) + - [GetNotEnoughWorkersSkipFn](#getnotenoughworkersskipfn) + - [GetNotIntrusiveSkipFn](#getnotintrusiveskipfn) + - [GetPodsWithoutAffinityRequiredLabelSkipFn](#getpodswithoutaffinityrequiredlabelskipfn) + - [GetSharedProcessNamespacePodsSkipFn](#getsharedprocessnamespacepodsskipfn) + - [NewCatalogSourceReportObject](#newcatalogsourcereportobject) + - [NewCertifiedContainerReportObject](#newcertifiedcontainerreportobject) + - [NewClusterOperatorReportObject](#newclusteroperatorreportobject) + - [NewClusterVersionReportObject](#newclusterversionreportobject) + - [NewContainerReportObject](#newcontainerreportobject) + - [NewCrdReportObject](#newcrdreportobject) + - [NewDeploymentReportObject](#newdeploymentreportobject) + - [NewHelmChartReportObject](#newhelmchartreportobject) + - [NewNamespacedNamedReportObject](#newnamespacednamedreportobject) + - [NewNamespacedReportObject](#newnamespacedreportobject) + - [NewNodeReportObject](#newnodereportobject) + - [NewOperatorReportObject](#newoperatorreportobject) + - [NewPodReportObject](#newpodreportobject) + - [NewReportObject](#newreportobject) + - [NewStatefulSetReportObject](#newstatefulsetreportobject) + - [NewTaintReportObject](#newtaintreportobject) + - [ReportObject.AddField](#reportobject.addfield) + - [ReportObject.SetContainerProcessValues](#reportobject.setcontainerprocessvalues) + - [ReportObject.SetType](#reportobject.settype) + - [ReportObjectTestString](#reportobjectteststring) + - [ReportObjectTestStringPointer](#reportobjectteststringpointer) + - [ResultObjectsToString](#resultobjectstostring) + - [ResultToString](#resulttostring) + +## Overview + +The testhelper package supplies utilities for generating and comparing compliance reports in the certsuite test framework, providing helper types, constants, and a rich set of functions to create report objects, evaluate test conditions, and format results. + +### Key Features + +- Convenient construction of typed ReportObject instances representing Kubernetes resources (pods, containers, operators, etc.) +- Skip‑condition generators that return closures to decide whether tests should run based on the current TestEnvironment +- Functions for serialising and comparing report collections to produce human‑readable diagnostics + +### Design Notes + +- ReportObject fields are stored as parallel key/value slices rather than a map for deterministic ordering in output +- Skip functions are closures to defer evaluation until test execution, allowing dynamic environment checks +- Comparison helpers treat nil pointers as equal only when both are nil to avoid false positives + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**FailureReasonOut**](#failurereasonout) | One-line purpose | +| [**ReportObject**](#reportobject) | One-line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func Equal(p, other []*ReportObject) bool](#equal) | Determines whether two slices of pointers to `ReportObject` contain the same elements in the same order, treating `nil` pointers as equal only when both are `nil`. | +| [func (p FailureReasonOut) Equal(other FailureReasonOut) bool](#failurereasonout.equal) | Determines if two `FailureReasonOut` instances contain the same compliant and non‑compliant objects. | +| [func FailureReasonOutTestString(p FailureReasonOut) (out string)](#failurereasonoutteststring) | Generates a deterministic, human‑readable string that represents the contents of a `FailureReasonOut` value for use in tests and debugging. | +| [func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getdaemonsetfailedtospawnskipfn) | Generates a closure that indicates if the probe daemonset did not start successfully. The returned function can be used as a skip condition in tests. | +| [func (*provider.TestEnvironment)(func() (bool, string))](#getnoaffinityrequiredpodsskipfn) | Returns a closure that evaluates whether the test should be skipped because no pods with required affinity are present. | +| [func (*provider.TestEnvironment)(func() (bool, string))](#getnobaremetalnodesskipfn) | Returns a closure that indicates if tests should be skipped because the test environment contains no bare‑metal nodes. The returned function yields `true` and an explanatory message when the condition is met. | +| [func(*provider.TestEnvironment)(func() (bool, string))](#getnocpupinningpodsskipfn) | Supplies a closure used by tests to skip execution when no CPU‑pinning pods exist. | +| [func GetNoCatalogSourcesSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnocatalogsourcesskipfn) | Returns a closure that decides whether to skip tests when no catalog sources are available in the test environment. | +| [func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnocontainersundertestskipfn) | Returns a function that evaluates whether a test should be skipped because the test environment contains zero containers. The returned function yields a boolean indicating skip status and an accompanying message. | +| [func GetNoCrdsUnderTestSkipFn(env *provider.TestEnvironment)(func() (bool, string))](#getnocrdsundertestskipfn) | Returns a closure that checks if `env.Crds` is empty; if so, signals the test framework to skip with an explanatory message. | +| [func GetNoDeploymentsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnodeploymentsundertestskipfn) | Returns a closure that signals whether tests requiring deployments should be skipped when the environment contains no deployment objects. | +| [func GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnoguaranteedpodswithexclusivecpusskipfn) | Creates a closure that determines whether tests requiring guaranteed pods with exclusive CPUs should be skipped. It returns `true` if none are found, along with an explanatory message. | +| [func GetNoHugepagesPodsSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnohugepagespodsskipfn) | Generates a closure that determines if tests requiring huge‑page pods should be skipped. The closure returns `true` and a message when no such pods are present in the test environment. | +| [func GetNoIstioSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnoistioskipfn) | Returns a closure that determines if tests requiring Istio should be skipped because no Istio service mesh was detected. | +| [func GetNoNamespacesSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnonamespacesskipfn) | Returns a function that evaluates the current `TestEnvironment`. If the environment contains no namespaces, the returned function signals to skip the test with an explanatory message. | +| [func GetNoNodesWithRealtimeKernelSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnonodeswithrealtimekernelskipfn) | Returns a closure that evaluates whether the test environment contains any node with a realtime kernel. If none are found, the closure returns `true` and an explanatory message to skip tests that require such nodes. | +| [func GetNoOperatorCrdsSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnooperatorcrdsskipfn) | Returns a closure that decides if a test must be skipped because no Operator Custom Resource Definitions (CRDs) are present in the provided test environment. | +| [func GetNoOperatorPodsSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnooperatorpodsskipfn) | Returns a closure that checks if the test environment contains any operator pods; if none are present, it signals that tests should be skipped with an explanatory message. | +| [func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnooperatorsskipfn) | Creates a predicate function that checks if `env.Operators` is empty and signals whether tests should be skipped with an explanatory message. | +| [func GetNoPersistentVolumeClaimsSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnopersistentvolumeclaimsskipfn) | Returns a closure that signals whether to skip tests due to absence of Persistent Volume Claims in the provided environment. | +| [func GetNoPersistentVolumesSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnopersistentvolumesskipfn) | Returns a closure that checks if the `env` contains any persistent volumes. If none are found, the closure reports that the test should be skipped with an explanatory message. | +| [func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnopodsundertestskipfn) | Generates a skip function that evaluates the presence of pods in the supplied test environment. If no pods exist, the test is skipped with an explanatory message. | +| [func GetNoRolesSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnorolesskipfn) | Returns a closure that evaluates whether tests should be skipped due to an empty `Roles` slice in the supplied test environment. | +| [func GetNoSRIOVPodsSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnosriovpodsskipfn) | Returns a closure that decides if the test suite should be skipped when no SR‑IOV enabled pods are available. | +| [func GetNoServicesUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnoservicesundertestskipfn) | Supplies a closure that determines whether a test should be skipped because the provided `TestEnvironment` contains no services. | +| [func GetNoStatefulSetsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnostatefulsetsundertestskipfn) | Returns a function that signals to skip tests when the test environment contains no StatefulSet objects. | +| [func GetNoStorageClassesSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnostorageclassesskipfn) | Generates a closure that determines if the test should be skipped due to the absence of storage classes. | +| [func GetNonOCPClusterSkipFn() func() (bool, string)](#getnonocpclusterskipfn) | Provides a function that returns `true` and a message when the environment is *not* an OCP cluster, allowing tests to be skipped in non‑OCP contexts. | +| [func GetNotEnoughWorkersSkipFn(env *provider.TestEnvironment, minWorkerNodes int) func() (bool, string)](#getnotenoughworkersskipfn) | Generates a closure that determines whether to skip a test because the cluster contains fewer worker nodes than `minWorkerNodes`. | +| [func GetNotIntrusiveSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getnotintrusiveskipfn) | Creates a closure that indicates whether the current test should be skipped because it is not meant to run in an intrusive environment. | +| [func GetPodsWithoutAffinityRequiredLabelSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getpodswithoutaffinityrequiredlabelskipfn) | Returns a closure that signals whether to skip a test when no pods are found without the required affinity label. | +| [func GetSharedProcessNamespacePodsSkipFn(env *provider.TestEnvironment) func() (bool, string)](#getsharedprocessnamespacepodsskipfn) | Returns a closure that checks if any shared‑process‑namespace pods are present in the given `TestEnvironment`. If none exist, the closure signals to skip the test with an explanatory message. | +| [func NewCatalogSourceReportObject(aNamespace, aCatalogSourceName, aReason string, isCompliant bool) *ReportObject](#newcatalogsourcereportobject) | Builds a `*ReportObject` that records the status of a catalog source, including its namespace, name, compliance reason, and type. | +| [func NewCertifiedContainerReportObject(cii provider.ContainerImageIdentifier, aReason string, isCompliant bool) *ReportObject](#newcertifiedcontainerreportobject) | Builds a `ReportObject` that records compliance information for a container image identified by its digest, repository, tag, and registry. | +| [func NewClusterOperatorReportObject(aClusterOperatorName, aReason string, isCompliant bool) (*ReportObject)](#newclusteroperatorreportobject) | Builds a `ReportObject` for a cluster operator, setting its type and compliance reason. | +| [func NewClusterVersionReportObject(version, aReason string, isCompliant bool) (out *ReportObject)](#newclusterversionreportobject) | Constructs a `ReportObject` for a cluster’s version and records whether the version meets compliance criteria. | +| [func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (*ReportObject)](#newcontainerreportobject) | Builds a `ReportObject` representing the status of a specific container within a pod. It records namespace, pod name, container name, compliance reason, and whether the container complies with policy. | +| [func NewCrdReportObject(aName, aVersion, aReason string, isCompliant bool) (*ReportObject)](#newcrdreportobject) | Instantiates a `ReportObject` describing a Custom Resource Definition (CRD), embedding its name, version, compliance reason, and status. | +| [func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (*ReportObject)](#newdeploymentreportobject) | Instantiates a `ReportObject` representing the status of a Kubernetes deployment, embedding namespace, name, compliance reason, and type. | +| [func NewHelmChartReportObject(aNamespace, aHelmChartName, aReason string, isCompliant bool) (*ReportObject)](#newhelmchartreportobject) | Constructs a `ReportObject` tailored for a Helm chart, embedding namespace, chart name, compliance reason, and status. | +| [func NewNamespacedNamedReportObject(aReason, aType string, isCompliant bool, aNamespace, aName string) (*ReportObject)](#newnamespacednamedreportobject) | Builds a `ReportObject` pre‑populated with the supplied reason, type and compliance flag, then adds namespace and name fields. | +| [func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (*ReportObject)](#newnamespacedreportobject) | Builds a `ReportObject` that records the reason for compliance/non‑compliance, its type, and associates it with a specific Kubernetes namespace. | +| [func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject)](#newnodereportobject) | Instantiates a `ReportObject` representing a Kubernetes node, populating it with the node’s name, compliance status and associated reason. | +| [func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (*ReportObject)](#newoperatorreportobject) | Instantiates a `ReportObject` for an operator, populating it with namespace, name, and compliance reason. | +| [func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (*ReportObject)](#newpodreportobject) | Constructs a `ReportObject` that describes compliance for a specific pod. The object includes the namespace, pod name, and a reason indicating whether the pod complies with expected policies. | +| [func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject)](#newreportobject) | Instantiates a `ReportObject`, sets its type and attaches a reason field that indicates compliance or non‑compliance. | +| [func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject)](#newstatefulsetreportobject) | Builds a `ReportObject` representing a StatefulSet, attaching its namespace and name. | +| [func NewTaintReportObject(taintBit, nodeName, aReason string, isCompliant bool) *ReportObject](#newtaintreportobject) | Instantiates a `ReportObject` that represents the taint status of a node, attaching the node name, the specific taint bit, and an explanation for compliance or non‑compliance. | +| [func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject)](#reportobject.addfield) | Appends the supplied key and value to the `ObjectFieldsKeys` and `ObjectFieldsValues` slices of the receiver, then returns the modified object for chaining. | +| [func (obj *ReportObject) SetContainerProcessValues(aPolicy, aPriority, aCommandLine string) *ReportObject](#reportobject.setcontainerprocessvalues) | Adds the scheduling policy, priority, and command line of a container process to the report object and marks its type as `ContainerProcessType`. | +| [func (obj *ReportObject) SetType(aType string) (*ReportObject)](#reportobject.settype) | Assigns a new value to the `ObjectType` field of the receiver and returns the updated object. | +| [func ReportObjectTestString(p []*ReportObject) (out string)](#reportobjectteststring) | Builds a single string that lists each `ReportObject` in the provided slice, using Go’s `%#v` format for readability. The output is wrapped like `[]testhelper.ReportObject{...}`. | +| [func([]*ReportObject)(string)](#reportobjectteststringpointer) | Produces a formatted string that lists the dereferenced values of each `*ReportObject` in a slice. | +| [func ResultObjectsToString(compliantObject, nonCompliantObject []*ReportObject) (string, error)](#resultobjectstostring) | Serialises compliant and non‑compliant report objects into a JSON representation of `FailureReasonOut`. | +| [func ResultToString(result int) (str string)](#resulttostring) | Translates predefined integer result codes (`SUCCESS`, `FAILURE`, `ERROR`) into human‑readable strings. Unrecognized codes yield an empty string. | + +## Structs + +### FailureReasonOut + +Represents the result of a policy check, separating objects that satisfy the rule from those that do not. + +#### Fields + +| Field | Type | Description | +|----------------------|----------------------|-------------| +| `CompliantObjectsOut` | `[]*ReportObject` | Slice of pointers to report objects that met the compliance criteria. | +| `NonCompliantObjectsOut` | `[]*ReportObject` | Slice of pointers to report objects that failed the compliance check. | + +#### Purpose + +The struct is used by test helpers to capture and compare the outcomes of policy evaluations. Each field holds a list of `ReportObject` instances; compliant ones are stored in `CompliantObjectsOut`, while non‑compliant ones go into `NonCompliantObjectsOut`. This separation facilitates clear assertions in tests and debugging. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `Equal` | Determines whether two `FailureReasonOut` values are identical by comparing both the compliant and non‑compliant object slices. | +| `FailureReasonOutTestString` | Generates a formatted string representation of a `FailureReasonOut`, useful for debugging or test output. | + +--- + +--- + +### ReportObject + +A lightweight container that holds a report item’s type and its key‑value metadata. + +--- + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `ObjectType` | `string` | Identifier of the resource or object being reported (e.g., “Deployment”, “Node”). | +| `ObjectFieldsKeys` | `[]string` | Ordered list of field names that describe the object. | +| `ObjectFieldsValues` | `[]string` | Corresponding values for each key; paired with `ObjectFieldsKeys`. | + +--- + +#### Purpose + +`ReportObject` aggregates information needed to represent a compliance or non‑compliance finding in the test suite. +The struct stores: + +1. **Type** – which resource type the report refers to. +2. **Metadata** – arbitrary key/value pairs that describe attributes of that resource (namespace, name, version, etc.). + +Functions create and manipulate `ReportObject` instances; the fields are accessed when generating output or comparing reports. + +--- + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NewReportObject(aReason, aType string, isCompliant bool)` | Initializes a new `ReportObject`, sets its type, and adds either a compliance or non‑compliance reason field. | +| `AddField(aKey, aValue string)` | Appends a key/value pair to the object’s metadata slices. | +| `SetContainerProcessValues(aPolicy, aPriority, aCommandLine string)` | Adds scheduling policy, priority, and command line fields; sets type to “ContainerProcess”. | +| `SetType(aType string)` | Updates the object’s type field. | +| `NewCatalogSourceReportObject`, `NewCertifiedContainerReportObject`, `NewClusterOperatorReportObject`, `NewClusterVersionReportObject`, `NewContainerReportObject`, `NewCrdReportObject`, `NewDeploymentReportObject`, `NewHelmChartReportObject`, `NewNamespacedNamedReportObject`, `NewNamespacedReportObject`, `NewNodeReportObject`, `NewOperatorReportObject`, `NewPodReportObject`, `NewStatefulSetReportObject`, `NewTaintReportObject` | Factory helpers that construct a `ReportObject` with pre‑populated fields for specific Kubernetes resources. | +| `Equal(p, other []*ReportObject)` | Compares two slices of `ReportObject` pointers for deep equality. | +| `ResultObjectsToString(compliantObject, nonCompliantObject []*ReportObject)` | Serialises compliant and non‑compliant objects into JSON. | +| `ReportObjectTestString`, `ReportObjectTestStringPointer` | Helper functions that format a slice of `ReportObject` instances as Go code for tests. | + +--- + +--- + +## Exported Functions + +### Equal + +**Equal** - Determines whether two slices of pointers to `ReportObject` contain the same elements in the same order, treating `nil` pointers as equal only when both are `nil`. + +#### Signature (Go) + +```go +func Equal(p, other []*ReportObject) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether two slices of pointers to `ReportObject` contain the same elements in the same order, treating `nil` pointers as equal only when both are `nil`. | +| **Parameters** | `p []*ReportObject` – first slice; `other []*ReportObject` – second slice. | +| **Return value** | `bool` – `true` if slices are equivalent, otherwise `false`. | +| **Key dependencies** | • Built‑in `len` function (called three times)
• `reflect.DeepEqual` from the standard library for deep comparison of dereferenced values. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a helper to validate test outputs in the `testhelper` package by comparing expected and actual report objects. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CheckLength["Check length of slices"] + CheckLength --> Cond1{"Lengths equal?"} + Cond1 -- No --> ReturnFalse["Return false"] + Cond1 -- Yes --> Loop["Iterate over indices"] + Loop --> CheckNil{"Both nil?"} + CheckNil -- Yes --> Continue["Continue loop"] + CheckNil -- No --> Cond2{"One nil?"} + Cond2 -- Yes --> ReturnFalse + Cond2 -- No --> DeepEqual{"Deep equal?"} + DeepEqual -- No --> ReturnFalse + DeepEqual -- Yes --> NextIndex["Next index"] + NextIndex --> EndLoop{"More indices?"} + EndLoop -- No --> ReturnTrue["Return true"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Equal --> len + func_Equal --> len + func_Equal --> len + func_Equal --> reflect.DeepEqual +``` + +#### Functions calling `Equal` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Equal +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + var a, b testhelper.ReportObject + p := []*testhelper.ReportObject{&a} + q := []*testhelper.ReportObject{&b} + + equal := testhelper.Equal(p, q) + fmt.Println("Slices are equal:", equal) +} +``` + +--- + +### FailureReasonOut.Equal + +**Equal** - Determines if two `FailureReasonOut` instances contain the same compliant and non‑compliant objects. + +Checks whether two `FailureReasonOut` values are identical by comparing their compliant and non‑compliant object lists. + +#### Signature (Go) + +```go +func (p FailureReasonOut) Equal(other FailureReasonOut) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if two `FailureReasonOut` instances contain the same compliant and non‑compliant objects. | +| **Parameters** | `other FailureReasonOut – the instance to compare against`. | +| **Return value** | `bool – true if both object lists match, false otherwise`. | +| **Key dependencies** | Calls the package‑level `Equal` helper for slice comparison of `CompliantObjectsOut` and `NonCompliantObjectsOut`. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides value equality logic used by tests to assert that failure reasons match expected results. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Compare Compliant"} + B --> C["Equal(p.CompliantObjectsOut, other.CompliantObjectsOut)"] + C --> D{"Compare Non‑Compliant"} + D --> E["Equal(p.NonCompliantObjectsOut, other.NonCompliantObjectsOut)"] + E --> F["Return true if both true, else false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_FailureReasonOut.Equal --> func_Equal +``` + +*Note: `func_Equal` refers to the helper that compares two slices of objects.* + +#### Functions calling `FailureReasonOut.Equal` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking FailureReasonOut.Equal + +out1 := testhelper.FailureReasonOut{ + CompliantObjectsOut: []string{"objA", "objB"}, + NonCompliantObjectsOut: []string{"objC"}, +} +out2 := testhelper.FailureReasonOut{ + CompliantObjectsOut: []string{"objA", "objB"}, + NonCompliantObjectsOut: []string{"objC"}, +} + +equal := out1.Equal(out2) // true +``` + +--- + +### FailureReasonOutTestString + +**FailureReasonOutTestString** - Generates a deterministic, human‑readable string that represents the contents of a `FailureReasonOut` value for use in tests and debugging. + +#### 1) Signature (Go) + +```go +func FailureReasonOutTestString(p FailureReasonOut) (out string) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Generates a deterministic, human‑readable string that represents the contents of a `FailureReasonOut` value for use in tests and debugging. | +| **Parameters** | *p* – `FailureReasonOut` – the struct to convert. | +| **Return value** | `out string` – formatted representation of `p`. | +| **Key dependencies** | • `fmt.Sprintf` (standard library)
• `ReportObjectTestStringPointer` (helper that formats slices of pointers to `ReportObject`) | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a test‑only helper for serializing complex report structures, enabling easier assertion messages and logs within the `testhelper` package. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start["Start"] --> BuildString["Build initial string: \testhelper.FailureReasonOut{\"] + BuildString --> FormatCompliant["Format CompliantObjectsOut via ReportObjectTestStringPointer"] + FormatCompliant --> AppendCompliant["Append to output"] + AppendCompliant --> FormatNonCompliant["Format NonCompliantObjectsOut via ReportObjectTestStringPointer"] + FormatNonCompliant --> AppendNonCompliant["Append to output"] + AppendNonCompliant --> Close["Close with \}\"] + Close --> Return["Return output string"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_FailureReasonOutTestString --> fmt.Sprintf + func_FailureReasonOutTestString --> func_ReportObjectTestStringPointer +``` + +#### 5) Functions calling `FailureReasonOutTestString` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking FailureReasonOutTestString +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + // Construct a sample FailureReasonOut + fr := testhelper.FailureReasonOut{ + CompliantObjectsOut: []*testhelper.ReportObject{ + {Name: "obj1", Status: "ok"}, + }, + NonCompliantObjectsOut: []*testhelper.ReportObject{}, + } + + // Get the string representation for debugging or assertions + s := testhelper.FailureReasonOutTestString(fr) + println(s) +} +``` + +--- + +--- + +### GetDaemonSetFailedToSpawnSkipFn + +**GetDaemonSetFailedToSpawnSkipFn** - Generates a closure that indicates if the probe daemonset did not start successfully. The returned function can be used as a skip condition in tests. + +Returns a test skip function that signals whether the probe daemonset failed to spawn. + +--- + +#### Signature (Go) + +```go +func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Generates a closure that indicates if the probe daemonset did not start successfully. The returned function can be used as a skip condition in tests. | +| **Parameters** | `env *provider.TestEnvironment` – Test environment containing the `DaemonsetFailedToSpawn` flag. | +| **Return value** | A zero‑argument function returning `(bool, string)` where the boolean is true to skip and the string provides a reason. | +| **Key dependencies** | None (only accesses the `env.DaemonsetFailedToSpawn` field). | +| **Side effects** | No state mutation or I/O; purely reads from the provided environment. | +| **How it fits the package** | Utility for test helpers to conditionally skip tests when a critical daemonset is missing. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph "GetDaemonSetFailedToSpawnSkipFn" + A["Receive env"] --> B{"env.DaemonsetFailedToSpawn"} + B -- true --> C["Return (true, reason)"] + B -- false --> D["Return (false, )"] + end +``` + +--- + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Functions calling `GetDaemonSetFailedToSpawnSkipFn` + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking GetDaemonSetFailedToSpawnSkipFn + +env := &provider.TestEnvironment{ + DaemonsetFailedToSpawn: true, // simulate failure +} + +skipFn := testhelper.GetDaemonSetFailedToSpawnSkipFn(env) + +shouldSkip, reason := skipFn() +if shouldSkip { + fmt.Println("Skipping test:", reason) +} +``` + +--- + +### GetNoAffinityRequiredPodsSkipFn + +**GetNoAffinityRequiredPodsSkipFn** - Returns a closure that evaluates whether the test should be skipped because no pods with required affinity are present. + +#### 1) Signature (Go) + +```go +func (*provider.TestEnvironment)(func() (bool, string)) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that evaluates whether the test should be skipped because no pods with required affinity are present. | +| **Parameters** | `env *provider.TestEnvironment` – test environment containing pod information. | +| **Return value** | A function of type `func() (bool, string)` where:
• `bool` indicates if the test should be skipped.
• `string` provides an explanatory message when skipping. | +| **Key dependencies** | • `len` – built‑in to count pods.
• `env.GetAffinityRequiredPods()` – retrieves pods that have required affinity annotations. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used in test helper utilities to conditionally skip tests when required‑affinity pods are absent, simplifying test setup logic. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph GetNoAffinityRequiredPodsSkipFn["GetNoAffinityRequiredPodsSkipFn"] + A["Check length of env.GetAffinityRequiredPods()"] --> B{"Zero?"} + B -->|"Yes"| C["Return true, no pods with required affinity found"] + B -->|"No"| D["Return false,"] + end +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoAffinityRequiredPodsSkipFn --> func_len + func_GetNoAffinityRequiredPodsSkipFn --> func_GetAffinityRequiredPods +``` + +#### 5) Functions calling `GetNoAffinityRequiredPodsSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking GetNoAffinityRequiredPodsSkipFn +env := &provider.TestEnvironment{} +skipFn := env.GetNoAffinityRequiredPodsSkipFn() +shouldSkip, msg := skipFn() +if shouldSkip { + fmt.Println("Skipping test:", msg) +} +``` + +--- + +### GetNoBareMetalNodesSkipFn + +**GetNoBareMetalNodesSkipFn** - Returns a closure that indicates if tests should be skipped because the test environment contains no bare‑metal nodes. The returned function yields `true` and an explanatory message when the condition is met. + +#### Signature (Go) + +```go +func (*provider.TestEnvironment)(func() (bool, string)) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that indicates if tests should be skipped because the test environment contains no bare‑metal nodes. The returned function yields `true` and an explanatory message when the condition is met. | +| **Parameters** | `env *provider.TestEnvironment` – The test environment from which to retrieve node information. | +| **Return value** | `func() (bool, string)` – A function that returns a boolean indicating skip status and a string containing the reason if skipped. | +| **Key dependencies** | • `len` (built‑in)
• `env.GetBaremetalNodes()` (method of `TestEnvironment`) | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used in test helpers to conditionally skip tests that require bare‑metal nodes, ensuring tests do not fail when such nodes are absent. | + +#### Internal workflow + +```mermaid +flowchart TD + Start --> CheckBareMetalNodes + CheckBareMetalNodes -->|"Count == 0"| Skip{"Skip=true"} + CheckBareMetalNodes -->|"Count > 0"| Continue{"Continue=false"} +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoBareMetalNodesSkipFn --> len + func_GetNoBareMetalNodesSkipFn --> GetBaremetalNodes +``` + +#### Functions calling `GetNoBareMetalNodesSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoBareMetalNodesSkipFn +env := &provider.TestEnvironment{ /* ... populate as needed ... */ } +skipFn := GetNoBareMetalNodesSkipFn(env) + +shouldSkip, reason := skipFn() +if shouldSkip { + fmt.Println("Skipping test:", reason) +} +``` + +--- + +### GetNoCPUPinningPodsSkipFn + +**GetNoCPUPinningPodsSkipFn** - Supplies a closure used by tests to skip execution when no CPU‑pinning pods exist. + +A helper that returns a test‑skipping predicate which determines whether any CPU‑pinning pods are present in the environment. + +```go +func(*provider.TestEnvironment)(func() (bool, string)) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Supplies a closure used by tests to skip execution when no CPU‑pinning pods exist. | +| **Parameters** | `env *provider.TestEnvironment` – the test environment containing pod information. | +| **Return value** | A function that returns `(bool, string)`: `true` signals “skip this test” and the accompanying message; otherwise `false`. | +| **Key dependencies** | • Calls built‑in `len` to count pods.
• Invokes `env.GetCPUPinningPodsWithDpdk()` to retrieve relevant pods. | +| **Side effects** | None – purely functional, no state mutation or I/O. | +| **How it fits the package** | Used by test suites in the `testhelper` package to conditionally skip tests that require CPU‑pinning pods when none are available. | + +#### Internal workflow + +```mermaid +flowchart TD + subgraph Closure + CheckPods --> CountPods + CountPods -->|">0"| ReturnFalse + CountPods -->|"=0"| ReturnTrue + end + Start --> CheckPods +``` + +- The returned closure first obtains the list of CPU‑pinning pods via `env.GetCPUPinningPodsWithDpdk()`. +- It counts them with `len`. +- If the count is zero, it returns `(true, "no CPU pinning pods to check found")`; otherwise it returns `(false, "")`. + +#### Function dependencies + +```mermaid +graph TD + func_GetNoCPUPinningPodsSkipFn --> len + func_GetNoCPUPinningPodsSkipFn --> env.GetCPUPinningPodsWithDpdk +``` + +#### Functions calling `GetNoCPUPinningPodsSkipFn` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoCPUPinningPodsSkipFn +env := provider.NewTestEnvironment() +skipFn := testhelper.GetNoCPUPinningPodsSkipFn(env) + +// In a test: +if skip, msg := skipFn(); skip { + t.Skip(msg) +} +``` + +This demonstrates how the returned function can be used to conditionally skip tests when no CPU‑pinning pods are available. + +--- + +### GetNoCatalogSourcesSkipFn + +**GetNoCatalogSourcesSkipFn** - Returns a closure that decides whether to skip tests when no catalog sources are available in the test environment. + +#### Signature (Go) + +```go +func GetNoCatalogSourcesSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that decides whether to skip tests when no catalog sources are available in the test environment. | +| **Parameters** | `env *provider.TestEnvironment` – The test environment containing catalog source data. | +| **Return value** | A function returning `(bool, string)` where `true` indicates the test should be skipped and the string provides a skip reason. | +| **Key dependencies** | Calls the built‑in `len` to check the slice length of `env.AllCatalogSources`. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used in test helper utilities to conditionally bypass tests that require catalog sources when none are configured. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Return closure"] --> B{"len(env.AllCatalogSources) == 0"} + B -- Yes --> C["Return true, no catalog sources found"] + B -- No --> D["Return false,"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoCatalogSourcesSkipFn --> builtin_len +``` + +#### Functions calling `GetNoCatalogSourcesSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoCatalogSourcesSkipFn +env := &provider.TestEnvironment{AllCatalogSources: []string{}} +skipFn := testhelper.GetNoCatalogSourcesSkipFn(env) + +if skip, reason := skipFn(); skip { + fmt.Println("Skipping test:", reason) +} else { + // proceed with the test +} +``` + +--- + +### GetNoContainersUnderTestSkipFn + +**GetNoContainersUnderTestSkipFn** - Returns a function that evaluates whether a test should be skipped because the test environment contains zero containers. The returned function yields a boolean indicating skip status and an accompanying message. + +**Determines whether a test should be skipped when no containers are present in the environment.** + +--- + +#### Signature (Go) + +```go +func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a function that evaluates whether a test should be skipped because the test environment contains zero containers. The returned function yields a boolean indicating skip status and an accompanying message. | +| **Parameters** | `env *provider.TestEnvironment` – A pointer to the test environment struct, used to inspect the `Containers` slice. | +| **Return value** | `func() (bool, string)` – An anonymous function that returns `true` with a message when there are no containers; otherwise `false` and an empty string. | +| **Key dependencies** | • Calls the built‑in `len` function on `env.Containers`. | +| **Side effects** | None. The function only reads from the environment and produces a closure without mutating state or performing I/O. | +| **How it fits the package** | Used in test setup to conditionally skip container‑related tests when the target environment lacks containers, improving test robustness. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetNoContainersUnderTestSkipFn --> AnonymousFunction + AnonymousFunction --> ConditionalCheck + ConditionalCheck --> ReturnTrueOrFalse +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoContainersUnderTestSkipFn --> len +``` + +--- + +#### Functions calling `GetNoContainersUnderTestSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoContainersUnderTestSkipFn +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + env := &provider.TestEnvironment{Containers: []string{}} + skipFn := testhelper.GetNoContainersUnderTestSkipFn(env) + if skip, msg := skipFn(); skip { + fmt.Println("Skipping test:", msg) + } else { + fmt.Println("Proceeding with test") + } +} +``` + +--- + +### GetNoCrdsUnderTestSkipFn + +**GetNoCrdsUnderTestSkipFn** - Returns a closure that checks if `env.Crds` is empty; if so, signals the test framework to skip with an explanatory message. + +Provides a skip function that determines whether tests should be skipped when no Custom Resource Definitions (CRDs) are present in the test environment. + +```go +func GetNoCrdsUnderTestSkipFn(env *provider.TestEnvironment)(func() (bool, string)) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that checks if `env.Crds` is empty; if so, signals the test framework to skip with an explanatory message. | +| **Parameters** | `env *provider.TestEnvironment` – the test environment containing a slice of CRDs. | +| **Return value** | A function `func() (bool, string)` that returns `(true, "no roles to check")` when there are no CRDs, otherwise `(false, "")`. | +| **Key dependencies** | *calls* → built‑in `len`; accesses the `Crds` field of `TestEnvironment`. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by test helpers to conditionally skip tests that require CRDs when none are available in the environment. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Check len(env.Crds)"] --> B{"len == 0"} + B -- Yes --> C["Return true, no roles to check"] + B -- No --> D["Return false,"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetNoCrdsUnderTestSkipFn --> len +``` + +#### Functions calling `GetNoCrdsUnderTestSkipFn` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoCrdsUnderTestSkipFn +env := &provider.TestEnvironment{Crds: []string{}} +skipFn := GetNoCrdsUnderTestSkipFn(env) + +shouldSkip, reason := skipFn() +if shouldSkip { + fmt.Println("Skipping test:", reason) +} +``` + +--- + +### GetNoDeploymentsUnderTestSkipFn + +**GetNoDeploymentsUnderTestSkipFn** - Returns a closure that signals whether tests requiring deployments should be skipped when the environment contains no deployment objects. + +#### Signature (Go) + +```go +func GetNoDeploymentsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that signals whether tests requiring deployments should be skipped when the environment contains no deployment objects. | +| **Parameters** | `env *provider.TestEnvironment` – test environment holding the list of deployments to evaluate. | +| **Return value** | A function that, when invoked, returns `(bool, string)` where the boolean indicates skip status and the string provides a reason. | +| **Key dependencies** | Calls the built‑in `len` function to inspect `env.Deployments`. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by test helpers to conditionally skip tests that depend on deployment objects, ensuring graceful handling of empty environments. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Invoke GetNoDeploymentsUnderTestSkipFn"] --> B["Return closure"] + B --> C{"Check len(env.Deployments)"} + C -- 0 --> D["Return true, “no deployments to check found”"] + C -- >0 --> E["Return false, “”"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoDeploymentsUnderTestSkipFn --> builtin_len +``` + +#### Functions calling `GetNoDeploymentsUnderTestSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoDeploymentsUnderTestSkipFn +env := &provider.TestEnvironment{Deployments: []deploymentType{}} +skipFn := testhelper.GetNoDeploymentsUnderTestSkipFn(env) +shouldSkip, reason := skipFn() +if shouldSkip { + fmt.Println("Skipping test:", reason) +} +``` + +--- + +### GetNoGuaranteedPodsWithExclusiveCPUsSkipFn + +**GetNoGuaranteedPodsWithExclusiveCPUsSkipFn** - Creates a closure that determines whether tests requiring guaranteed pods with exclusive CPUs should be skipped. It returns `true` if none are found, along with an explanatory message. + +Returns a skip function that signals when there are no guaranteed pods with exclusive CPUs in the test environment. + +#### Signature (Go) + +```go +func GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a closure that determines whether tests requiring guaranteed pods with exclusive CPUs should be skipped. It returns `true` if none are found, along with an explanatory message. | +| **Parameters** | `env *provider.TestEnvironment` – the test environment from which to query existing pods. | +| **Return value** | A function of type `func() (bool, string)` that evaluates the skip condition at runtime. The first return is a boolean indicating whether to skip; the second is an optional message. | +| **Key dependencies** | • Calls Go’s built‑in `len` function.
• Invokes `env.GetGuaranteedPodsWithExclusiveCPUs()` from the `provider.TestEnvironment`. | +| **Side effects** | None – it only reads state and returns a closure; no mutation or I/O occurs. | +| **How it fits the package** | In the `testhelper` package, this helper supports conditional test execution by providing skip logic for scenarios that require exclusive‑CPU pods. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["GetNoGuaranteedPodsWithExclusiveCPUsSkipFn"] --> B["Return closure"] + B --> C{"Check len(env.GetGuaranteedPodsWithExclusiveCPUs())"} + C -- 0 --> D["Return true, no pods with exclusive CPUs found"] + C -- >0 --> E["Return false,"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoGuaranteedPodsWithExclusiveCPUsSkipFn --> func_len + func_GetNoGuaranteedPodsWithExclusiveCPUsSkipFn --> func_GetGuaranteedPodsWithExclusiveCPUs +``` + +#### Functions calling `GetNoGuaranteedPodsWithExclusiveCPUsSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoGuaranteedPodsWithExclusiveCPUsSkipFn + +env := provider.NewTestEnvironment() +skipFn := testhelper.GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(env) + +// Later, during a test: +if skip, msg := skipFn(); skip { + t.Skip(msg) // or handle accordingly +} +``` + +--- + +### GetNoHugepagesPodsSkipFn + +**GetNoHugepagesPodsSkipFn** - Generates a closure that determines if tests requiring huge‑page pods should be skipped. The closure returns `true` and a message when no such pods are present in the test environment. + +Returns a skip function that signals whether tests should be skipped when no pods request huge pages. + +#### Signature (Go) + +```go +func GetNoHugepagesPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Generates a closure that determines if tests requiring huge‑page pods should be skipped. The closure returns `true` and a message when no such pods are present in the test environment. | +| **Parameters** | `env *provider.TestEnvironment` – Test environment containing pod information. | +| **Return value** | A function of type `func() (bool, string)` that evaluates the presence of huge‑page‑requesting pods. | +| **Key dependencies** | - Calls built‑in `len`.
- Invokes `env.GetHugepagesPods()` to fetch relevant pod list. | +| **Side effects** | No state mutation or I/O; purely functional evaluation. | +| **How it fits the package** | Located in `pkg/testhelper`, this helper supports test suites by providing a standardized skip condition for scenarios that require huge‑page pods. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetNoHugepagesPodsSkipFn --> ReturnClosure + ReturnClosure --> EvaluateLen + EvaluateLen -->|"len == 0"| SkipTrue["true, \no pods requesting hugepages found\"] + EvaluateLen -->|"len > 0"| SkipFalse["false, \\"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + GetNoHugepagesPodsSkipFn --> len + GetNoHugepagesPodsSkipFn --> GetHugepagesPods +``` + +#### Functions calling `GetNoHugepagesPodsSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoHugepagesPodsSkipFn +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + env := &provider.TestEnvironment{} + skipFn := testhelper.GetNoHugepagesPodsSkipFn(env) + shouldSkip, msg := skipFn() + if shouldSkip { + fmt.Println("Skipping test:", msg) + } else { + fmt.Println("Proceeding with test") + } +} +``` + +--- + +### GetNoIstioSkipFn + +**GetNoIstioSkipFn** - Returns a closure that determines if tests requiring Istio should be skipped because no Istio service mesh was detected. + +The function produces a test‑skipping predicate that checks whether an Istio service mesh is present in the supplied test environment. + +--- + +#### Signature (Go) + +```go +func GetNoIstioSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that determines if tests requiring Istio should be skipped because no Istio service mesh was detected. | +| **Parameters** | `env` – *provider.TestEnvironment* – the test environment configuration to inspect. | +| **Return value** | A function returning `(bool, string)`: `true` indicates the test should skip, and the accompanying message explains why. | +| **Key dependencies** | *provider.TestEnvironment* (used for the `IstioServiceMeshFound` field). | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a reusable predicate for conditionally skipping tests when Istio is absent, used by other test helper utilities. | + +--- + +#### Internal workflow + +```mermaid +flowchart TD + A["GetNoIstioSkipFn"] --> B["Return closure"] + B --> C{"Check env.IstioServiceMeshFound"} + C -- false --> D["return true, no istio service mesh found"] + C -- true --> E["return false,"] +``` + +--- + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Functions calling `GetNoIstioSkipFn` + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoIstioSkipFn +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + env := &provider.TestEnvironment{IstioServiceMeshFound: false} + skipFn := testhelper.GetNoIstioSkipFn(env) + + if skip, reason := skipFn(); skip { + fmt.Println("Skipping test:", reason) + } else { + fmt.Println("Running test") + } +} +``` + +--- + +### GetNoNamespacesSkipFn + +**GetNoNamespacesSkipFn** - Returns a function that evaluates the current `TestEnvironment`. If the environment contains no namespaces, the returned function signals to skip the test with an explanatory message. + +#### Signature (Go) + +```go +func GetNoNamespacesSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a function that evaluates the current `TestEnvironment`. If the environment contains no namespaces, the returned function signals to skip the test with an explanatory message. | +| **Parameters** | `env *provider.TestEnvironment` – The test environment whose namespace list is inspected. | +| **Return value** | A closure of type `func() (bool, string)` that, when invoked, returns:
• `true` and a skip reason if no namespaces are present.
• `false` and an empty message otherwise. | +| **Key dependencies** | • Calls the built‑in `len` function on `env.Namespaces`. | +| **Side effects** | None; purely functional, no state mutation or I/O. | +| **How it fits the package** | Part of the `testhelper` utilities that provide conditional test execution logic based on environment configuration. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph "Closure Creation" + A["Return closure"] --> B{"Check namespace count"} + end + B -- len == 0 --> C["Return true, skip message"] + B -- otherwise --> D["Return false, empty string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoNamespacesSkipFn --> func_len +``` + +#### Functions calling `GetNoNamespacesSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoNamespacesSkipFn +env := &provider.TestEnvironment{Namespaces: []string{}} +skipFn := GetNoNamespacesSkipFn(env) + +shouldSkip, reason := skipFn() +if shouldSkip { + fmt.Println("Skipping test:", reason) +} else { + // Proceed with the test +} +``` + +--- + +### GetNoNodesWithRealtimeKernelSkipFn + +**GetNoNodesWithRealtimeKernelSkipFn** - Returns a closure that evaluates whether the test environment contains any node with a realtime kernel. If none are found, the closure returns `true` and an explanatory message to skip tests that require such nodes. + +Retrieves a skip function that indicates whether any node in the test environment uses a realtime kernel; if none do, the returned function signals a skip. + +--- + +#### Signature (Go) + +```go +func GetNoNodesWithRealtimeKernelSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that evaluates whether the test environment contains any node with a realtime kernel. If none are found, the closure returns `true` and an explanatory message to skip tests that require such nodes. | +| **Parameters** | `env *provider.TestEnvironment` – the test environment whose nodes are inspected. | +| **Return value** | `func() (bool, string)` – a function that when called yields `(skip bool, reason string)`. | +| **Key dependencies** | • Calls `node.IsRTKernel()` on each node.
• Relies on the `provider.TestEnvironment` type to expose `Nodes`. | +| **Side effects** | None; purely functional. No state mutation or I/O occurs. | +| **How it fits the package** | In the `testhelper` package, this helper provides a reusable skip predicate for tests that require realtime kernel nodes, allowing those tests to be conditionally omitted when the environment lacks such nodes. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetNoNodesWithRealtimeKernelSkipFn --> forEachNode["“Iterate over env.Nodes”"] + forEachNode --> checkRT["Check node.IsRTKernel()"] + checkRT -- true --> returnFalse["Return false, \\"] + checkRT -- false --> continue["Continue loop"] + forEachNode -- end of loop --> returnTrue["Return true, \no nodes with realtime kernel type found\"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + GetNoNodesWithRealtimeKernelSkipFn --> IsRTKernel["func IsRTKernel() bool"] +``` + +--- + +#### Functions calling `GetNoNodesWithRealtimeKernelSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoNodesWithRealtimeKernelSkipFn + +env := provider.NewTestEnvironment() // Assume this creates an environment with nodes. +skipFn := testhelper.GetNoNodesWithRealtimeKernelSkipFn(env) + +// Later, within a test: +if skip, reason := skipFn(); skip { + t.Skip(reason) // Skip the test if no realtime kernel node is present. +} +``` + +--- + +### GetNoOperatorCrdsSkipFn + +**GetNoOperatorCrdsSkipFn** - Returns a closure that decides if a test must be skipped because no Operator Custom Resource Definitions (CRDs) are present in the provided test environment. + +#### Signature (Go) + +```go +func GetNoOperatorCrdsSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that decides if a test must be skipped because no Operator Custom Resource Definitions (CRDs) are present in the provided test environment. | +| **Parameters** | `env *provider.TestEnvironment` – Test environment containing a slice of CRDs (`Crds`). | +| **Return value** | A function returning `(bool, string)` where the boolean indicates whether to skip and the string provides an explanatory message. | +| **Key dependencies** | Calls the built‑in `len` function to inspect the length of `env.Crds`. | +| **Side effects** | None; purely functional with no state mutation or I/O. | +| **How it fits the package** | Part of the *testhelper* utilities, used to gate tests that require Operator CRDs. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["GetNoOperatorCrdsSkipFn"] --> B["Return closure"] + B --> C{"len(env.Crds) == 0"} + C -- Yes --> D["Return true, no operator crds found"] + C -- No --> E["Return false,"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoOperatorCrdsSkipFn --> len +``` + +#### Functions calling `GetNoOperatorCrdsSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoOperatorCrdsSkipFn +env := &provider.TestEnvironment{Crds: []string{}} +skipFn := GetNoOperatorCrdsSkipFn(env) + +shouldSkip, reason := skipFn() +if shouldSkip { + fmt.Println("Skipping test:", reason) +} else { + fmt.Println("Running test") +} +``` + +--- + +### GetNoOperatorPodsSkipFn + +**GetNoOperatorPodsSkipFn** - Returns a closure that checks if the test environment contains any operator pods; if none are present, it signals that tests should be skipped with an explanatory message. + +#### Signature (Go) + +```go +func GetNoOperatorPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that checks if the test environment contains any operator pods; if none are present, it signals that tests should be skipped with an explanatory message. | +| **Parameters** | `env *provider.TestEnvironment` – Test environment holding pod information. | +| **Return value** | A function returning `(bool, string)` where `true` indicates the test should skip and the string provides a reason. | +| **Key dependencies** | • Calls built‑in `len` to count entries in `env.CSVToPodListMap`. | +| **Side effects** | None – purely functional; no mutation or I/O. | +| **How it fits the package** | Used by test helpers to conditionally skip tests that require operator pods when none are deployed. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"env.CSVToPodListMap empty?"} + B -- Yes --> C["Return true, no operator pods found"] + B -- No --> D["Return false,"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoOperatorPodsSkipFn --> len +``` + +#### Functions calling `GetNoOperatorPodsSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoOperatorPodsSkipFn +env := &provider.TestEnvironment{ + CSVToPodListMap: map[string][]string{}, // empty map simulates no operator pods +} +skipFn := GetNoOperatorPodsSkipFn(env) +shouldSkip, reason := skipFn() +fmt.Printf("Should skip: %v; Reason: %s\n", shouldSkip, reason) +// Output: Should skip: true; Reason: no operator pods found +``` + +--- + +### GetNoOperatorsSkipFn + +**GetNoOperatorsSkipFn** - Creates a predicate function that checks if `env.Operators` is empty and signals whether tests should be skipped with an explanatory message. + +Return a closure that determines whether to skip tests when no operators are present in the test environment. + +```go +func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a predicate function that checks if `env.Operators` is empty and signals whether tests should be skipped with an explanatory message. | +| **Parameters** | `env *provider.TestEnvironment – the test environment containing operator information.` | +| **Return value** | A closure `func() (bool, string)` that returns `(true, "no operators found")` when no operators are present; otherwise `(false, "")`. | +| **Key dependencies** | Calls the built‑in function `len`. | +| **Side effects** | None – purely functional. | +| **How it fits the package** | Provides a reusable skip condition for test suites that depend on operator presence within the `testhelper` utilities. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph "Closure Creation" + A["GetNoOperatorsSkipFn"] --> B{"Return closure"} + end + subgraph "When Closure Executes" + B --> C{"Check len(env.Operators)"} + C -->|">0"| D["return false, \\"] + C -->|"=0"| E["return true, \no operators found\"] + end +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoOperatorsSkipFn --> len +``` + +#### Functions calling `GetNoOperatorsSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoOperatorsSkipFn +env := &provider.TestEnvironment{ + Operators: []string{}, // no operators present +} +skipFn := GetNoOperatorsSkipFn(env) +shouldSkip, reason := skipFn() +if shouldSkip { + fmt.Println("Skipping tests:", reason) +} else { + fmt.Println("Running tests") +} +``` + +--- + +### GetNoPersistentVolumeClaimsSkipFn + +**GetNoPersistentVolumeClaimsSkipFn** - Returns a closure that signals whether to skip tests due to absence of Persistent Volume Claims in the provided environment. + +#### Signature (Go) + +```go +func GetNoPersistentVolumeClaimsSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that signals whether to skip tests due to absence of Persistent Volume Claims in the provided environment. | +| **Parameters** | `env *provider.TestEnvironment` – Test environment containing PVC data. | +| **Return value** | `func() (bool, string)` – A function returning a boolean flag (`true` if skipping) and an explanatory message. | +| **Key dependencies** | Calls the built‑in `len` to inspect `env.PersistentVolumeClaims`. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Utility in `testhelper` for conditionally bypassing tests that require PVCs. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetNoPersistentVolumeClaimsSkipFn --> Closure + Closure --> Decision{"len(env.PersistentVolumeClaims)==0"} + Decision -- Yes --> Skip["Return true, no persistent volume claims found"] + Decision -- No --> Continue["Return false,"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoPersistentVolumeClaimsSkipFn --> len +``` + +#### Functions calling `GetNoPersistentVolumeClaimsSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoPersistentVolumeClaimsSkipFn +env := &provider.TestEnvironment{ + PersistentVolumeClaims: []v1.PersistentVolumeClaim{ /* ... */ }, +} +skipFn := testhelper.GetNoPersistentVolumeClaimsSkipFn(env) + +shouldSkip, msg := skipFn() +if shouldSkip { + fmt.Println("Skipping tests:", msg) +} else { + // proceed with tests +} +``` + +--- + +### GetNoPersistentVolumesSkipFn + +**GetNoPersistentVolumesSkipFn** - Returns a closure that checks if the `env` contains any persistent volumes. If none are found, the closure reports that the test should be skipped with an explanatory message. + +Creates a test‑skip function that signals whether any persistent volumes are available in the supplied environment. + +--- + +#### Signature (Go) + +```go +func GetNoPersistentVolumesSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that checks if the `env` contains any persistent volumes. If none are found, the closure reports that the test should be skipped with an explanatory message. | +| **Parameters** | `env *provider.TestEnvironment` – the test environment whose `PersistentVolumes` slice is inspected. | +| **Return value** | `func() (bool, string)` – a function returning a boolean (`true` to skip) and a string explaining the reason. | +| **Key dependencies** | • Calls the built‑in `len` function
• Accesses `env.PersistentVolumes` field | +| **Side effects** | No state mutations or I/O; purely read‑only inspection of `env`. | +| **How it fits the package** | Part of the test helper utilities, providing a reusable skip condition for tests that require persistent volumes. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"len(env.PersistentVolumes) == 0"} + B -- Yes --> C["Return true, no persistent volumes to check found"] + B -- No --> D["Return false,"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoPersistentVolumesSkipFn --> len +``` + +--- + +#### Functions calling `GetNoPersistentVolumesSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoPersistentVolumesSkipFn +env := &provider.TestEnvironment{PersistentVolumes: []string{}} +skipFn := GetNoPersistentVolumesSkipFn(env) +shouldSkip, reason := skipFn() +fmt.Printf("Should skip? %v – Reason: %s\n", shouldSkip, reason) +``` + +--- + +### GetNoPodsUnderTestSkipFn + +**GetNoPodsUnderTestSkipFn** - Generates a skip function that evaluates the presence of pods in the supplied test environment. If no pods exist, the test is skipped with an explanatory message. + +**Returns a closure that determines whether the current test should be skipped because no pods are available to inspect.** + +--- + +#### Signature (Go) + +```go +func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Generates a skip function that evaluates the presence of pods in the supplied test environment. If no pods exist, the test is skipped with an explanatory message. | +| **Parameters** | `env *provider.TestEnvironment` – the test context containing a slice of pod objects to examine. | +| **Return value** | A closure `func() (bool, string)` that returns:
• `true` and a reason when `len(env.Pods) == 0`;
• `false` and an empty string otherwise. | +| **Key dependencies** | Calls the built‑in `len` function to count pods. | +| **Side effects** | None; purely functional, no mutation or I/O. | +| **How it fits the package** | Provides a reusable skip condition for tests that require at least one pod to be present, simplifying test setup in the `testhelper` package. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CheckPods + CheckPods -->|"len(env.Pods) == 0"| Skip + CheckPods -->|"otherwise"| Continue +``` + +--- + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Functions calling `GetNoPodsUnderTestSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoPodsUnderTestSkipFn + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +// Assume env is populated elsewhere. +var env *provider.TestEnvironment + +skipFn := testhelper.GetNoPodsUnderTestSkipFn(env) + +shouldSkip, reason := skipFn() +if shouldSkip { + fmt.Println("Skipping test:", reason) +} +``` + +--- + +### GetNoRolesSkipFn + +**GetNoRolesSkipFn** - Returns a closure that evaluates whether tests should be skipped due to an empty `Roles` slice in the supplied test environment. + +Creates a skip function that indicates whether test execution should be skipped when the environment has no roles defined. + +--- + +#### Signature (Go) + +```go +func GetNoRolesSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that evaluates whether tests should be skipped due to an empty `Roles` slice in the supplied test environment. | +| **Parameters** | `env *provider.TestEnvironment` – Test environment containing a `Roles` field. | +| **Return value** | A function `func() (bool, string)` that returns `(true, “message”)` if no roles are present, otherwise `(false, “”)`. | +| **Key dependencies** | • Calls the built‑in `len` function.
• Relies on the `provider.TestEnvironment` type. | +| **Side effects** | No state mutation or I/O; purely functional evaluation. | +| **How it fits the package** | Used by test setup routines to conditionally skip role‑related tests when configuration is incomplete. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CheckRoles["Check if `len(env.Roles) == 0`"] + CheckRoles -- true --> Skip{"Skip?"} + CheckRoles -- false --> Continue{"Continue?"} + Skip --> ReturnTrue["Return `(true, \There are no roles to check. Please check config.\)`"] + Continue --> ReturnFalse["Return `(false, \\)`"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoRolesSkipFn --> func_len +``` + +--- + +#### Functions calling `GetNoRolesSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoRolesSkipFn +env := &provider.TestEnvironment{Roles: []string{}} +skipFn := testhelper.GetNoRolesSkipFn(env) + +shouldSkip, reason := skipFn() +if shouldSkip { + fmt.Println("Skipping tests:", reason) +} else { + fmt.Println("Proceeding with role‑dependent tests") +} +``` + +--- + +### GetNoSRIOVPodsSkipFn + +**GetNoSRIOVPodsSkipFn** - Returns a closure that decides if the test suite should be skipped when no SR‑IOV enabled pods are available. + +#### Signature (Go) + +```go +func GetNoSRIOVPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that decides if the test suite should be skipped when no SR‑IOV enabled pods are available. | +| **Parameters** | `env` (`*provider.TestEnvironment`) – Environment providing access to cluster state. | +| **Return value** | A function returning `(bool, string)`. The boolean indicates skip status; the string is an optional explanation. | +| **Key dependencies** | • Calls `GetPodsUsingSRIOV()` on the environment.
• Uses `fmt.Sprintf` for error messages.
• Uses built‑in `len` to count pods. | +| **Side effects** | None – purely functional; only reads from the provided environment. | +| **How it fits the package** | Part of the `testhelper` utilities, enabling conditional skipping of tests that require SR‑IOV functionality. | + +#### Internal workflow + +```mermaid +flowchart TD + GetNoSRIOVPodsSkipFn --> ReturnClosure + ReturnClosure --> CallGetPodsUsingSRIOV + CallGetPodsUsingSRIOV -->|"error"| ReturnTrueWithMsg + CallGetPodsUsingSRIOV -->|"no pods"| ReturnTrueWithMsg2 + CallGetPodsUsingSRIOV -->|"pods exist"| ReturnFalse +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetNoSRIOVPodsSkipFn --> func_GetPodsUsingSRIOV + func_GetNoSRIOVPodsSkipFn --> fmt_Sprintf + func_GetNoSRIOVPodsSkipFn --> len +``` + +#### Functions calling `GetNoSRIOVPodsSkipFn` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoSRIOVPodsSkipFn +env := &provider.TestEnvironment{ /* setup fields */ } + +skipFn := testhelper.GetNoSRIOVPodsSkipFn(env) +shouldSkip, reason := skipFn() +if shouldSkip { + fmt.Println("Skipping tests:", reason) +} else { + fmt.Println("Proceeding with SR‑IOV dependent tests") +} +``` + +--- + +### GetNoServicesUnderTestSkipFn + +**GetNoServicesUnderTestSkipFn** - Supplies a closure that determines whether a test should be skipped because the provided `TestEnvironment` contains no services. + +#### Signature (Go) + +```go +func GetNoServicesUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Supplies a closure that determines whether a test should be skipped because the provided `TestEnvironment` contains no services. | +| **Parameters** | `env *provider.TestEnvironment` – The environment whose services are inspected. | +| **Return value** | A function of type `func() (bool, string)` where:
• `true` indicates the test should be skipped.
• The accompanying string is a skip reason. | +| **Key dependencies** | • Calls built‑in `len` to count services. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by test helpers to conditionally skip tests that require at least one service in the environment. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CheckLen["Check len(env.Services)"] + CheckLen -- "== 0" --> Skip["return true, \no services to check found\"] + CheckLen -- "< 0" --> Continue["return false, \\"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoServicesUnderTestSkipFn --> len +``` + +#### Functions calling `GetNoServicesUnderTestSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoServicesUnderTestSkipFn +env := &provider.TestEnvironment{ /* initialize with or without services */ } +skipCheck := testhelper.GetNoServicesUnderTestSkipFn(env) + +if skip, reason := skipCheck(); skip { + fmt.Println("Skipping test:", reason) +} else { + // proceed with the test +} +``` + +--- + +### GetNoStatefulSetsUnderTestSkipFn + +**GetNoStatefulSetsUnderTestSkipFn** - Returns a function that signals to skip tests when the test environment contains no StatefulSet objects. + +Provides a test‑skipping predicate that determines whether any StatefulSet resources exist in the supplied test environment. + +#### Signature (Go) + +```go +func GetNoStatefulSetsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a function that signals to skip tests when the test environment contains no StatefulSet objects. | +| **Parameters** | `env` – pointer to `provider.TestEnvironment`, containing a slice of StatefulSets (`StatefulSets`). | +| **Return value** | A closure returning `(bool, string)`: *true* and an explanatory message if the slice is empty; otherwise *false* with an empty message. | +| **Key dependencies** | Calls built‑in `len` to inspect the length of `env.StatefulSets`. | +| **Side effects** | No state mutation or I/O; purely deterministic based on input. | +| **How it fits the package** | Used by test helpers to conditionally skip tests that require StatefulSet resources, improving test suite efficiency. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph Closure["Returned function"] + A1{"Check env.StatefulSets length"} + A2 -->|"==0"| B1["Return true, message"] + A2 -->|">0"| C1["Return false, \\"] + A1 --> A2 + end +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoStatefulSetsUnderTestSkipFn --> len +``` + +#### Functions calling `GetNoStatefulSetsUnderTestSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoStatefulSetsUnderTestSkipFn +env := &provider.TestEnvironment{ + StatefulSets: []appsv1.StatefulSet{}, // or populate as needed +} +skipFn := GetNoStatefulSetsUnderTestSkipFn(env) + +if skip, msg := skipFn(); skip { + fmt.Println("Skipping test:", msg) +} else { + fmt.Println("Proceeding with tests") +} +``` + +--- + +### GetNoStorageClassesSkipFn + +**GetNoStorageClassesSkipFn** - Generates a closure that determines if the test should be skipped due to the absence of storage classes. + +A factory that returns a test‑skipping function which evaluates whether the current test environment contains any storage classes. + +--- + +#### Signature (Go) + +```go +func GetNoStorageClassesSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Generates a closure that determines if the test should be skipped due to the absence of storage classes. | +| **Parameters** | `env *provider.TestEnvironment` – Test environment containing a list of storage classes. | +| **Return value** | A function returning `(bool, string)` where the boolean indicates skip status and the string is an optional reason. | +| **Key dependencies** | • Calls the built‑in `len` to inspect `env.StorageClassList`. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a reusable skip condition for tests that require storage classes, simplifying test setup in `pkg/testhelper`. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"len(env.StorageClassList) == 0"} + B -- Yes --> C["Return true, no storage classes found"] + B -- No --> D["Return false,"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNoStorageClassesSkipFn --> len +``` + +--- + +#### Functions calling `GetNoStorageClassesSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking GetNoStorageClassesSkipFn +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + env := &provider.TestEnvironment{ + StorageClassList: []string{}, // or populate with actual storage classes + } + skipFn := testhelper.GetNoStorageClassesSkipFn(env) + if skip, reason := skipFn(); skip { + fmt.Println("Skipping test:", reason) + } else { + fmt.Println("Proceeding with test") + } +} +``` + +--- + +### GetNonOCPClusterSkipFn + +**GetNonOCPClusterSkipFn** - Provides a function that returns `true` and a message when the environment is *not* an OCP cluster, allowing tests to be skipped in non‑OCP contexts. + +Retrieves a test‑skipping callback that signals whether the current cluster is not an OpenShift (OCP) cluster. + +```go +func GetNonOCPClusterSkipFn() func() (bool, string) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides a function that returns `true` and a message when the environment is *not* an OCP cluster, allowing tests to be skipped in non‑OCP contexts. | +| **Parameters** | None | +| **Return value** | A closure of type `func() (bool, string)`; on invocation it returns:
• `true` and `"non-OCP cluster detected"` if the cluster is not OCP,
• `false` and an empty string otherwise. | +| **Key dependencies** | * Calls `provider.IsOCPCluster()` to determine cluster type. | +| **Side effects** | None; purely functional and read‑only. | +| **How it fits the package** | Used by test suites in the `testhelper` package to conditionally skip tests that require an OpenShift environment. | + +```mermaid +flowchart TD + subgraph GetNonOCPClusterSkipFn + A["Return closure"] --> B["Closure calls provider.IsOCPCluster()"] + B --> C{"Is OCP?"} + C -- No --> D["Return true, non-OCP cluster detected"] + C -- Yes --> E["Return false,"] + end +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetNonOCPClusterSkipFn --> func_IsOCPCluster["provider.IsOCPCluster()"] +``` + +#### Functions calling `GetNonOCPClusterSkipFn` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNonOCPClusterSkipFn +skip := testhelper.GetNonOCPClusterSkipFn() +shouldSkip, reason := skip() +if shouldSkip { + fmt.Println("Skipping test:", reason) +} +``` + +--- + +### GetNotEnoughWorkersSkipFn + +**GetNotEnoughWorkersSkipFn** - Generates a closure that determines whether to skip a test because the cluster contains fewer worker nodes than `minWorkerNodes`. + +A helper that returns a skip‑function used in tests to bypass checks when the cluster has fewer worker nodes than required. + +#### Signature (Go) + +```go +func GetNotEnoughWorkersSkipFn(env *provider.TestEnvironment, minWorkerNodes int) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Generates a closure that determines whether to skip a test because the cluster contains fewer worker nodes than `minWorkerNodes`. | +| **Parameters** | *`env`* – `*provider.TestEnvironment`: access to environment state.
*`minWorkerNodes`* – `int`: minimum required worker count. | +| **Return value** | A function returning `(bool, string)`. The boolean indicates skip status; the string is an optional message. | +| **Key dependencies** | Calls `env.GetWorkerCount()` to obtain current node count. | +| **Side effects** | None – purely functional and read‑only on the environment. | +| **How it fits the package** | Provides a reusable condition for test suites that depend on a minimum number of worker nodes, ensuring graceful degradation when prerequisites are unmet. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetNotEnoughWorkersSkipFn --> GetWorkerCount +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNotEnoughWorkersSkipFn --> func_GetWorkerCount +``` + +#### Functions calling `GetNotEnoughWorkersSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNotEnoughWorkersSkipFn +env := provider.NewTestEnvironment() +minNodes := 3 + +skipCheck := testhelper.GetNotEnoughWorkersSkipFn(env, minNodes) +if skip, msg := skipCheck(); skip { + fmt.Println("Skipping test:", msg) +} else { + // proceed with the test +} +``` + +--- + +### GetNotIntrusiveSkipFn + +**GetNotIntrusiveSkipFn** - Creates a closure that indicates whether the current test should be skipped because it is not meant to run in an intrusive environment. + +#### Signature (Go) + +```go +func GetNotIntrusiveSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a closure that indicates whether the current test should be skipped because it is not meant to run in an intrusive environment. | +| **Parameters** | `env *provider.TestEnvironment` – Test environment context providing access to configuration flags. | +| **Return value** | A function returning `(bool, string)` where `true` signals a skip and the string explains the reason. | +| **Key dependencies** | Calls `env.IsIntrusive()` from the `provider` package to check the intrusive flag. | +| **Side effects** | None; purely functional closure. | +| **How it fits the package** | Utility for test helpers, enabling conditional skipping of tests based on environment capabilities. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetNotIntrusiveSkipFn --> Closure + Closure --> CheckEnv["IsIntrusive()"] + CheckEnv -- "Return true, not intrusive test" --> Skip + CheckEnv -- "Return false" --> Continue +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetNotIntrusiveSkipFn --> func_IsIntrusive +``` + +#### Functions calling `GetNotIntrusiveSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetNotIntrusiveSkipFn +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + env := provider.NewTestEnvironment() + skipFn := testhelper.GetNotIntrusiveSkipFn(env) + if skip, reason := skipFn(); skip { + println("Skipping test:", reason) + } else { + println("Running test") + } +} +``` + +--- + +### GetPodsWithoutAffinityRequiredLabelSkipFn + +**GetPodsWithoutAffinityRequiredLabelSkipFn** - Returns a closure that signals whether to skip a test when no pods are found without the required affinity label. + +Creates a skip function for test suites that determines whether to skip tests based on the presence of pods lacking the required affinity label. + +#### Signature (Go) + +```go +func GetPodsWithoutAffinityRequiredLabelSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that signals whether to skip a test when no pods are found without the required affinity label. | +| **Parameters** | `env` (`*provider.TestEnvironment`) – Test environment containing cluster state and helper methods. | +| **Return value** | A function returning `(bool, string)` where the boolean indicates “skip” and the string provides an optional message. | +| **Key dependencies** | • `len` (builtin)
• `env.GetPodsWithoutAffinityRequiredLabel()` | +| **Side effects** | None – only reads state from `env`. | +| **How it fits the package** | Part of the test helper utilities; used to conditionally skip tests that require pods without a specific affinity label. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetPodsWithoutAffinityRequiredLabelSkipFn --> Closure + Closure --> CheckLen["Check length of env.GetPodsWithoutAffinityRequiredLabel()"] + CheckLen -- zero --> Skip["Return true, message"] + CheckLen -- non-zero --> Continue["Return false, empty string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetPodsWithoutAffinityRequiredLabelSkipFn --> len + func_GetPodsWithoutAffinityRequiredLabelSkipFn --> GetPodsWithoutAffinityRequiredLabel +``` + +#### Functions calling `GetPodsWithoutAffinityRequiredLabelSkipFn` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetPodsWithoutAffinityRequiredLabelSkipFn +env := provider.NewTestEnvironment() +skipFunc := testhelper.GetPodsWithoutAffinityRequiredLabelSkipFn(env) + +skip, msg := skipFunc() +if skip { + fmt.Println("Skipping test:", msg) +} +``` + +--- + +### GetSharedProcessNamespacePodsSkipFn + +**GetSharedProcessNamespacePodsSkipFn** - Returns a closure that checks if any shared‑process‑namespace pods are present in the given `TestEnvironment`. If none exist, the closure signals to skip the test with an explanatory message. + +Creates a test skip function that determines whether tests requiring shared process namespace pods should be skipped based on the current test environment. + +```go +func GetSharedProcessNamespacePodsSkipFn(env *provider.TestEnvironment) func() (bool, string) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a closure that checks if any shared‑process‑namespace pods are present in the given `TestEnvironment`. If none exist, the closure signals to skip the test with an explanatory message. | +| **Parameters** | `env *provider.TestEnvironment` – Environment object containing pod information. | +| **Return value** | `func() (bool, string)` – a function that returns a boolean indicating whether to skip (`true`) and an optional reason string. | +| **Key dependencies** | • Calls the built‑in `len` function.
• Invokes `env.GetShareProcessNamespacePods()` to obtain the slice of relevant pods. | +| **Side effects** | No state mutation or I/O; purely deterministic based on the provided environment. | +| **How it fits the package** | Used in test setup within the `testhelper` package to conditionally skip tests that require shared process namespace pods when none are available. | + +#### Internal workflow + +```mermaid +flowchart TD + Start --> CheckPodsLen + CheckPodsLen -->|"len==0"| SkipTest["Return true, \Shared process namespace pods found.\"] + CheckPodsLen -->|"len>0"| Continue["Return false, \\"] +``` + +#### Function dependencies + +```mermaid +graph TD + GetSharedProcessNamespacePodsSkipFn --> len + GetSharedProcessNamespacePodsSkipFn --> GetShareProcessNamespacePods +``` + +#### Functions calling `GetSharedProcessNamespacePodsSkipFn` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking GetSharedProcessNamespacePodsSkipFn +env := provider.NewTestEnvironment() +skipFn := testhelper.GetSharedProcessNamespacePodsSkipFn(env) + +if skip, reason := skipFn(); skip { + fmt.Println("Skipping test:", reason) +} +``` + +--- + +### NewCatalogSourceReportObject + +**NewCatalogSourceReportObject** - Builds a `*ReportObject` that records the status of a catalog source, including its namespace, name, compliance reason, and type. + +#### Signature (Go) + +```go +func NewCatalogSourceReportObject(aNamespace, aCatalogSourceName, aReason string, isCompliant bool) *ReportObject +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `*ReportObject` that records the status of a catalog source, including its namespace, name, compliance reason, and type. | +| **Parameters** | `aNamespace string` – target namespace
`aCatalogSourceName string` – catalog‑source name
`aReason string` – justification for (non)compliance
`isCompliant bool` – compliance flag | +| **Return value** | A pointer to the constructed `ReportObject`. | +| **Key dependencies** | • Calls `NewReportObject(aReason, CatalogSourceType, isCompliant)`
• Invokes `AddField` twice on the returned object | +| **Side effects** | None beyond creating and returning a new in‑memory object. No I/O or state mutation outside of the return value. | +| **How it fits the package** | Part of the test helper utilities, this function simplifies generating catalog‑source compliance reports for unit tests and diagnostics. | + +#### Internal workflow + +```mermaid +flowchart TD + Start --> CreateReportObject["Create NewReportObject"] + CreateReportObject --> AddNamespace["AddField(Namespace)"] + AddNamespace --> AddName["AddField(Name)"] + AddName --> Return["Return ReportObject"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_NewCatalogSourceReportObject --> func_NewReportObject + func_NewCatalogSourceReportObject --> func_ReportObject_AddField +``` + +#### Functions calling `NewCatalogSourceReportObject` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewCatalogSourceReportObject +import "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" + +func main() { + report := testhelper.NewCatalogSourceReportObject( + "default", + "my-catalog-source", + "All images are signed", + true, + ) + // `report` now contains the catalog source details and compliance status +} +``` + +--- + +### NewCertifiedContainerReportObject + +**NewCertifiedContainerReportObject** - Builds a `ReportObject` that records compliance information for a container image identified by its digest, repository, tag, and registry. + +#### Signature (Go) + +```go +func NewCertifiedContainerReportObject(cii provider.ContainerImageIdentifier, aReason string, isCompliant bool) *ReportObject +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `ReportObject` that records compliance information for a container image identified by its digest, repository, tag, and registry. | +| **Parameters** | `cii provider.ContainerImageIdentifier` – the image’s identity.
`aReason string` – explanation of compliance status.
`isCompliant bool` – whether the image meets certification requirements. | +| **Return value** | `*ReportObject` – pointer to the populated report object. | +| **Key dependencies** | • Calls `NewReportObject(aReason, ContainerImageType, isCompliant)`
• Invokes `AddField` on the returned `ReportObject` for each image attribute (`ImageDigest`, `ImageRepo`, `ImageTag`, `ImageRegistry`). | +| **Side effects** | None; all operations are pure and affect only the newly created object. | +| **How it fits the package** | In the `testhelper` package, this function supplies a convenient way to generate structured compliance reports for container images during testing or verification workflows. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Create ReportObject"] --> B["Add Digest Field"] + B --> C["Add Repository Field"] + C --> D["Add Tag Field"] + D --> E["Add Registry Field"] + E --> F["Return ReportObject"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCertifiedContainerReportObject --> func_NewReportObject + func_NewCertifiedContainerReportObject --> func_ReportObject.AddField +``` + +#### Functions calling `NewCertifiedContainerReportObject` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewCertifiedContainerReportObject +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + imageID := provider.ContainerImageIdentifier{ + Digest: "sha256:abcd1234", + Repository:"myrepo", + Tag: "v1.0", + Registry: "quay.io", + } + report := testhelper.NewCertifiedContainerReportObject(imageID, "All checks passed", true) + _ = report // use the report object as needed +} +``` + +--- + +### NewClusterOperatorReportObject + +**NewClusterOperatorReportObject** - Builds a `ReportObject` for a cluster operator, setting its type and compliance reason. + +Creates a report object that represents the compliance state of a specific cluster operator. + +--- + +#### Signature (Go) + +```go +func NewClusterOperatorReportObject(aClusterOperatorName, aReason string, isCompliant bool) (*ReportObject) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `ReportObject` for a cluster operator, setting its type and compliance reason. | +| **Parameters** | *`aClusterOperatorName`* (string): the operator’s name.
*`aReason`* (string): justification for compliance/non‑compliance.
*`isCompliant`* (bool): true if compliant. | +| **Return value** | `*ReportObject`: a populated report object. | +| **Key dependencies** | • Calls `NewReportObject(aReason, ClusterOperatorType, isCompliant)`
• Calls `(obj *ReportObject).AddField(Name, aClusterOperatorName)` | +| **Side effects** | None; purely functional construction of an object. | +| **How it fits the package** | Utility function in `testhelper` to simplify report creation for cluster operator checks. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Create new ReportObject"] --> B["Set type to ClusterOperatorType"] + B --> C["Add compliance reason field"] + C --> D["Add Name field with aClusterOperatorName"] + D --> E["Return the object"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewClusterOperatorReportObject --> func_NewReportObject + func_NewClusterOperatorReportObject --> func_ReportObject_AddField +``` + +--- + +#### Functions calling `NewClusterOperatorReportObject` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking NewClusterOperatorReportObject +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + report := testhelper.NewClusterOperatorReportObject("csi-driver", "Driver is compliant", true) + fmt.Printf("%+v\n", report) +} +``` + +--- + +### NewClusterVersionReportObject + +**NewClusterVersionReportObject** - Constructs a `ReportObject` for a cluster’s version and records whether the version meets compliance criteria. + +Creates a report object that records the version of an OpenShift cluster along with compliance information. + +```go +func NewClusterVersionReportObject(version, aReason string, isCompliant bool) (out *ReportObject) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Constructs a `ReportObject` for a cluster’s version and records whether the version meets compliance criteria. | +| **Parameters** | `version string –` OpenShift release identifier.
`aReason string –` Explanation for compliance status.
`isCompliant bool –` Indicates if the cluster is compliant. | +| **Return value** | `*ReportObject` – a populated report object containing version, type and compliance fields. | +| **Key dependencies** | • Calls `NewReportObject(aReason, OCPClusterType, isCompliant)`
• Invokes `AddField(OCPClusterVersionType, version)` on the resulting object | +| **Side effects** | None; only returns a new value. | +| **How it fits the package** | Provides a convenience constructor used by test helpers to generate cluster‑version reports for compliance checks. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B["Create base report via NewReportObject"] + B --> C["Add version field with AddField"] + C --> D["Return ReportObject"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_NewClusterVersionReportObject --> func_NewReportObject + func_NewClusterVersionReportObject --> func_ReportObject_AddField +``` + +#### Functions calling `NewClusterVersionReportObject` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewClusterVersionReportObject +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + report := testhelper.NewClusterVersionReportObject("4.12.0", "All patches applied", true) + fmt.Printf("%+v\n", report) +} +``` + +--- + +### NewContainerReportObject + +**NewContainerReportObject** - Builds a `ReportObject` representing the status of a specific container within a pod. It records namespace, pod name, container name, compliance reason, and whether the container complies with policy. + +#### Signature (Go) + +```go +func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (*ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `ReportObject` representing the status of a specific container within a pod. It records namespace, pod name, container name, compliance reason, and whether the container complies with policy. | +| **Parameters** | `aNamespace string – Kubernetes namespace`
`aPodName string – Pod name`
`aContainerName string – Container name`
`aReason string – Reason for (non‑)compliance`
`isCompliant bool – Compliance flag` | +| **Return value** | `*ReportObject – populated report object ready for further processing or serialization.` | +| **Key dependencies** | • Calls `NewReportObject(aReason, ContainerType, isCompliant)` to create the base object.
• Invokes `AddField` three times to attach namespace, pod name, and container name. | +| **Side effects** | No global state changes or I/O; purely constructs and returns a new object. | +| **How it fits the package** | Part of the `testhelper` utilities for generating structured compliance reports used in tests and examples. It provides a convenient wrapper around generic report creation tailored to container entities. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Create base ReportObject via NewReportObject"] --> B["Add namespace field"] + B --> C["Add pod name field"] + C --> D["Add container name field"] + D --> E["Return populated object"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewContainerReportObject --> func_NewReportObject + func_NewContainerReportObject --> func_ReportObject_AddField +``` + +#### Functions calling `NewContainerReportObject` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewContainerReportObject +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + report := testhelper.NewContainerReportObject( + "dev-namespace", // namespace + "example-pod", // pod name + "app-container", // container name + "image not signed",// reason + false, // compliance status + ) + + fmt.Printf("Report type: %s\n", report.ObjectType) + for i, key := range report.ObjectFieldsKeys { + fmt.Printf("%s: %s\n", key, report.ObjectFieldsValues[i]) + } +} +``` + +This snippet demonstrates how to generate a container‑specific report object and inspect its fields. + +--- + +### NewCrdReportObject + +**NewCrdReportObject** - Instantiates a `ReportObject` describing a Custom Resource Definition (CRD), embedding its name, version, compliance reason, and status. + +#### Signature (Go) + +```go +func NewCrdReportObject(aName, aVersion, aReason string, isCompliant bool) (*ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates a `ReportObject` describing a Custom Resource Definition (CRD), embedding its name, version, compliance reason, and status. | +| **Parameters** | `aName string – CRD name`
`aVersion string – CRD version`
`aReason string – explanation for the compliance state`
`isCompliant bool – indicates if the CRD complies with policy` | +| **Return value** | `*ReportObject – fully populated report object` | +| **Key dependencies** | • Calls `NewReportObject` to create the base object.
• Invokes `AddField` twice to append name and version. | +| **Side effects** | None beyond constructing and returning a new struct; no external I/O or concurrency. | +| **How it fits the package** | Provides a helper for tests that need CRD‑specific compliance reports, simplifying test setup in `testhelper`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Create base ReportObject"} + B --> C["Call NewReportObject(aReason, CustomResourceDefinitionType, isCompliant)"] + C --> D["AddField(CustomResourceDefinitionName, aName)"] + D --> E["AddField(CustomResourceDefinitionVersion, aVersion)"] + E --> F["Return ReportObject"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewCrdReportObject --> func_NewReportObject +``` + +#### Functions calling `NewCrdReportObject` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewCrdReportObject +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + report := testhelper.NewCrdReportObject( + "my-crd", // aName + "v1alpha1", // aVersion + "All checks passed", // aReason + true, // isCompliant + ) + _ = report // use the report as needed +} +``` + +--- + +### NewDeploymentReportObject + +**NewDeploymentReportObject** - Instantiates a `ReportObject` representing the status of a Kubernetes deployment, embedding namespace, name, compliance reason, and type. + +#### Signature (Go) + +```go +func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (*ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates a `ReportObject` representing the status of a Kubernetes deployment, embedding namespace, name, compliance reason, and type. | +| **Parameters** | `aNamespace string – Deployment’s namespace`
`aDeploymentName string – Deployment name`
`aReason string – Explanation for compliance/non‑compliance`
`isCompliant bool – Compliance flag` | +| **Return value** | Pointer to the newly created `ReportObject`. | +| **Key dependencies** | • Calls `NewReportObject(aReason, DeploymentType, isCompliant)`
• Invokes `AddField` twice on the resulting object | +| **Side effects** | No external I/O; mutates the returned `ReportObject` by adding fields. | +| **How it fits the package** | Provides a convenient constructor for test reports that need deployment context, centralizing field names and type handling within the `testhelper` package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CreateObj["Create ReportObject via NewReportObject"] + CreateObj --> AddNS["AddField(Namespace, aNamespace)"] + AddNS --> AddName["AddField(DeploymentName, aDeploymentName)"] + AddName --> End["Return object"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewDeploymentReportObject --> func_NewReportObject + func_NewDeploymentReportObject --> func_ReportObject.AddField +``` + +#### Functions calling `NewDeploymentReportObject` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewDeploymentReportObject +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + report := testhelper.NewDeploymentReportObject( + "production", + "frontend-app", + "All pods are ready and replicas match desired count", + true, + ) + // report now contains a fully populated ReportObject +} +``` + +--- + +### NewHelmChartReportObject + +**NewHelmChartReportObject** - Constructs a `ReportObject` tailored for a Helm chart, embedding namespace, chart name, compliance reason, and status. + +#### Signature (Go) + +```go +func NewHelmChartReportObject(aNamespace, aHelmChartName, aReason string, isCompliant bool) (*ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Constructs a `ReportObject` tailored for a Helm chart, embedding namespace, chart name, compliance reason, and status. | +| **Parameters** | `aNamespace string` – Kubernetes namespace; `aHelmChartName string` – name of the Helm chart; `aReason string` – justification text; `isCompliant bool` – compliance flag. | +| **Return value** | `*ReportObject` – the populated report instance. | +| **Key dependencies** | • Calls `NewReportObject(aReason, HelmType, isCompliant)`
• Invokes `AddField` twice to append namespace and chart name | +| **Side effects** | No external I/O; only creates and mutates a new struct instance in memory. | +| **How it fits the package** | Provides a convenient factory for generating test reports specific to Helm charts within the `testhelper` utilities. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Create base ReportObject"} + B --> C["NewReportObject(aReason, HelmType, isCompliant)"] + C --> D["AddField(Namespace, aNamespace)"] + D --> E["AddField(Name, aHelmChartName)"] + E --> F["Return out"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewHelmChartReportObject --> func_NewReportObject + func_NewHelmChartReportObject --> func_ReportObject.AddField +``` + +#### Functions calling `NewHelmChartReportObject` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewHelmChartReportObject +report := NewHelmChartReportObject("default", "my-chart", "All checks passed", true) +// report now contains namespace, name, reason, and compliance status fields. +``` + +--- + +### NewNamespacedNamedReportObject + +**NewNamespacedNamedReportObject** - Builds a `ReportObject` pre‑populated with the supplied reason, type and compliance flag, then adds namespace and name fields. + +Creates a namespaced, named report object with the specified reason, type, compliance status, namespace and name. + +#### Signature (Go) + +```go +func NewNamespacedNamedReportObject(aReason, aType string, isCompliant bool, aNamespace, aName string) (*ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `ReportObject` pre‑populated with the supplied reason, type and compliance flag, then adds namespace and name fields. | +| **Parameters** | `aReason string` – justification for compliance or non‑compliance.
`aType string` – classification of the report.
`isCompliant bool` – true if compliant, false otherwise.
`aNamespace string` – Kubernetes namespace to associate with the report.
`aName string` – name of the resource being reported. | +| **Return value** | `*ReportObject` – pointer to the fully initialized report object. | +| **Key dependencies** | • `NewReportObject(aReason, aType, isCompliant)`
• `(*ReportObject).AddField(key, value string)` (used twice) | +| **Side effects** | None beyond returning a new in‑memory struct; no I/O or concurrency. | +| **How it fits the package** | In the `testhelper` package, this helper simplifies test assertions by providing a ready‑made report object that includes namespace and name context. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Create base ReportObject via NewReportObject"] --> B["Add Field: Namespace"] + B --> C["Add Field: Name"] + C --> D["Return ReportObject"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewNamespacedNamedReportObject --> func_NewReportObject + func_NewNamespacedNamedReportObject --> func_ReportObject.AddField +``` + +#### Functions calling `NewNamespacedNamedReportObject` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewNamespacedNamedReportObject +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + report := testhelper.NewNamespacedNamedReportObject( + "All checks passed", + "ComplianceCheck", + true, + "default", + "my-app", + ) + fmt.Printf("%+v\n", report) +} +``` + +--- + +### NewNamespacedReportObject + +**NewNamespacedReportObject** - Builds a `ReportObject` that records the reason for compliance/non‑compliance, its type, and associates it with a specific Kubernetes namespace. + +#### Signature (Go) + +```go +func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (*ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `ReportObject` that records the reason for compliance/non‑compliance, its type, and associates it with a specific Kubernetes namespace. | +| **Parameters** | *`aReason string` – Explanation of the compliance state.
*`aType string` – Category or type of report.
*`isCompliant bool` – Flag indicating if the object is compliant.
*`aNamespace string` – Namespace to be attached as a field. | +| **Return value** | `*ReportObject` – The fully populated report object, ready for further use or serialization. | +| **Key dependencies** | • Calls `NewReportObject(aReason, aType, isCompliant)`
• Invokes the method `AddField(Namespace, aNamespace)` on the returned object | +| **Side effects** | None beyond constructing and returning the new object; no global state or I/O. | +| **How it fits the package** | In the `testhelper` package, this helper simplifies test data creation by bundling namespace information into a standard report structure. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Create base ReportObject"} + B --> C["Call NewReportObject"] + C --> D["Add namespace field via AddField"] + D --> E["Return populated ReportObject"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewNamespacedReportObject --> func_ReportObject.AddField + func_NewNamespacedReportObject --> func_NewReportObject +``` + +#### Functions calling `NewNamespacedReportObject` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewNamespacedReportObject +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + report := testhelper.NewNamespacedReportObject( + "All checks passed", + "ComplianceCheck", + true, + "production", + ) + fmt.Printf("Created report: %+v\n", report) +} +``` + +--- + +### NewNodeReportObject + +**NewNodeReportObject** - Instantiates a `ReportObject` representing a Kubernetes node, populating it with the node’s name, compliance status and associated reason. + +#### Signature (Go) + +```go +func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates a `ReportObject` representing a Kubernetes node, populating it with the node’s name, compliance status and associated reason. | +| **Parameters** | `aNodeName string – the node’s identifier`
`aReason string – justification for the compliance state`
`isCompliant bool – true if the node complies with policy, false otherwise | +| **Return value** | `*ReportObject – a pointer to the newly created report object` | +| **Key dependencies** | • Calls `NewReportObject(aReason, NodeType, isCompliant)`
• Invokes `out.AddField(Name, aNodeName)` | +| **Side effects** | None beyond creating and initializing the returned object. | +| **How it fits the package** | Provides a convenient constructor for node‑level compliance reports used in testing helpers. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CreateReport["Create ReportObject via NewReportObject"] + CreateReport --> AddNameField["Add field: Name = aNodeName"] + AddNameField --> End["Return ReportObject"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewNodeReportObject --> func_NewReportObject + func_NewNodeReportObject --> func_ReportObject.AddField +``` + +#### Functions calling `NewNodeReportObject` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewNodeReportObject +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + // Create a compliant node report + report := testhelper.NewNodeReportObject("node-01", "All checks passed", true) + _ = report // use the report as needed +} +``` + +--- + +### NewOperatorReportObject + +**NewOperatorReportObject** - Instantiates a `ReportObject` for an operator, populating it with namespace, name, and compliance reason. + +Creates a new `ReportObject` pre‑filled with operator metadata. + +--- + +#### Signature (Go) + +```go +func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (*ReportObject) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates a `ReportObject` for an operator, populating it with namespace, name, and compliance reason. | +| **Parameters** | `aNamespace string – the Kubernetes namespace`,
`aOperatorName string – the operator’s name`,
`aReason string – justification for compliance status`,
`isCompliant bool – true if compliant, false otherwise` | +| **Return value** | `*ReportObject – the constructed report object` | +| **Key dependencies** | • `NewReportObject(aReason, OperatorType, isCompliant)`
• `(*ReportObject).AddField(Namespace, aNamespace)`
• `(*ReportObject).AddField(Name, aOperatorName)` | +| **Side effects** | None – purely functional; no I/O or global state changes. | +| **How it fits the package** | Provides a convenient helper for tests to generate operator‑related reports within the `testhelper` package. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CreateReportObject["NewReportObject(aReason, OperatorType, isCompliant)"] + CreateReportObject --> AddNamespace["AddField(Namespace, aNamespace)"] + AddNamespace --> AddName["AddField(Name, aOperatorName)"] + AddName --> Return["return out"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewOperatorReportObject --> func_NewReportObject + func_NewOperatorReportObject --> func_ReportObject_AddField + func_NewOperatorReportObject --> func_ReportObject_AddField +``` + +--- + +#### Functions calling `NewOperatorReportObject` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking NewOperatorReportObject +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + // Create a compliant operator report for namespace "default" and name "my-operator". + report := testhelper.NewOperatorReportObject("default", "my-operator", "All checks passed", true) + // Use the report object as needed... + _ = report +} +``` + +--- + +### NewPodReportObject + +**NewPodReportObject** - Constructs a `ReportObject` that describes compliance for a specific pod. The object includes the namespace, pod name, and a reason indicating whether the pod complies with expected policies. + +#### Signature (Go) + +```go +func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (*ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Constructs a `ReportObject` that describes compliance for a specific pod. The object includes the namespace, pod name, and a reason indicating whether the pod complies with expected policies. | +| **Parameters** | - `aNamespace string` – the Kubernetes namespace of the pod.
- `aPodName string` – the pod’s name.
- `aReason string` – textual explanation for compliance status.
- `isCompliant bool` – flag indicating if the pod meets policy requirements. | +| **Return value** | `*ReportObject` – a pointer to the newly created report object populated with relevant fields. | +| **Key dependencies** | - Calls `NewReportObject(aReason, PodType, isCompliant)` to create the base report.
- Invokes `AddField` on the resulting `ReportObject` twice to attach namespace and pod name. | +| **Side effects** | No external I/O or global state changes; only memory allocations for the new object and its fields. | +| **How it fits the package** | This helper centralizes the creation of pod‑level compliance reports, ensuring consistent field names (`Namespace`, `PodName`) across tests within the `testhelper` package. | + +#### Internal workflow + +```mermaid +flowchart TD + Start --> CreateBase["Create base ReportObject via NewReportObject"] + CreateBase --> AddNS["Add namespace field"] + AddNS --> AddPN["Add pod name field"] + AddPN --> Return["Return *ReportObject"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_NewPodReportObject --> func_NewReportObject + func_NewPodReportObject --> func_ReportObject.AddField +``` + +#### Functions calling `NewPodReportObject` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewPodReportObject +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + // Create a report object for a pod that does not comply. + podReport := testhelper.NewPodReportObject( + "default", // namespace + "my-pod", // pod name + "Missing TLS cert", // reason + false, // isCompliant + ) + + // The returned object can now be inspected or serialized as needed. + _ = podReport +} +``` + +--- + +### NewReportObject + +**NewReportObject** - Instantiates a `ReportObject`, sets its type and attaches a reason field that indicates compliance or non‑compliance. + +#### Signature (Go) + +```go +func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates a `ReportObject`, sets its type and attaches a reason field that indicates compliance or non‑compliance. | +| **Parameters** | `aReason string` – the justification text.
`aType string` – object category (e.g., `"Container"`).
`isCompliant bool` – flag determining which reason key to use. | +| **Return value** | `*ReportObject` – pointer to the newly created report object. | +| **Key dependencies** | • Calls `(*ReportObject).AddField` twice.
• Uses constants `ReasonForCompliance`, `ReasonForNonCompliance`. | +| **Side effects** | Allocates a new `ReportObject`; mutates its internal key/value slices via `AddField`. No I/O or concurrency. | +| **How it fits the package** | Serves as the foundational constructor used by all other “New…ReportObject” helpers in the `testhelper` package, ensuring consistent initialization across report types. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"isCompliant?"} + B -- true --> C["AddField(ReasonForCompliance, aReason)"] + B -- false --> D["AddField(ReasonForNonCompliance, aReason)"] + C & D --> E["Set ObjectType = aType"] + E --> F["Return *ReportObject"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewReportObject --> func_ReportObject.AddField +``` + +#### Functions calling `NewReportObject` (Mermaid) + +```mermaid +graph TD + func_NewCatalogSourceReportObject --> func_NewReportObject + func_NewCertifiedContainerReportObject --> func_NewReportObject + func_NewClusterOperatorReportObject --> func_NewReportObject + func_NewClusterVersionReportObject --> func_NewReportObject + func_NewContainerReportObject --> func_NewReportObject + func_NewCrdReportObject --> func_NewReportObject + func_NewDeploymentReportObject --> func_NewReportObject + func_NewHelmChartReportObject --> func_NewReportObject + func_NewNamespacedNamedReportObject --> func_NewReportObject + func_NewNamespacedReportObject --> func_NewReportObject + func_NewNodeReportObject --> func_NewReportObject + func_NewOperatorReportObject --> func_NewReportObject + func_NewPodReportObject --> func_NewReportObject + func_NewStatefulSetReportObject --> func_NewReportObject + func_NewTaintReportObject --> func_NewReportObject +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewReportObject +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + // Create a compliant container report object + report := testhelper.NewReportObject( + "Container image meets policy", // reason + testhelper.ContainerImageType, // type + true, // isCompliant + ) + _ = report // use the report as needed +} +``` + +--- + +### NewStatefulSetReportObject + +**NewStatefulSetReportObject** - Builds a `ReportObject` representing a StatefulSet, attaching its namespace and name. + +Creates a new `ReportObject` tailored for a StatefulSet, embedding namespace and name metadata. + +```go +func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `ReportObject` representing a StatefulSet, attaching its namespace and name. | +| **Parameters** | `aNamespace string – the Kubernetes namespace`
`aStatefulSetName string – the StatefulSet’s name`
`aReason string – compliance explanation`
`isCompliant bool – compliance status` | +| **Return value** | `*ReportObject – fully populated report instance` | +| **Key dependencies** | • Calls `NewReportObject(aReason, StatefulSetType, isCompliant)`
• Invokes `AddField` twice to store namespace and name | +| **Side effects** | No external I/O; purely constructs an in‑memory object. | +| **How it fits the package** | Provides a convenience constructor for test helpers that generate compliance reports for StatefulSets. | + +#### Internal workflow + +```mermaid +flowchart TD + Start --> NewReportObject + NewReportObject --> AddField_Namespace + AddField_Namespace --> AddField_StatefulSetName + AddField_StatefulSetName --> Return +``` + +#### Function dependencies + +```mermaid +graph TD + func_NewStatefulSetReportObject --> func_NewReportObject + func_NewStatefulSetReportObject --> func_ReportObject.AddField +``` + +#### Functions calling `NewStatefulSetReportObject` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example + +```go +// Minimal example invoking NewStatefulSetReportObject +obj := NewStatefulSetReportObject("prod", "db-cluster", "All pods running", true) +fmt.Printf("%+v\n", obj) +``` + +--- + +### NewTaintReportObject + +**NewTaintReportObject** - Instantiates a `ReportObject` that represents the taint status of a node, attaching the node name, the specific taint bit, and an explanation for compliance or non‑compliance. + +Creates a new `ReportObject` prepopulated with taint‑related fields. + +#### Signature (Go) + +```go +func NewTaintReportObject(taintBit, nodeName, aReason string, isCompliant bool) *ReportObject +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates a `ReportObject` that represents the taint status of a node, attaching the node name, the specific taint bit, and an explanation for compliance or non‑compliance. | +| **Parameters** | *`taintBit` (string) – The taint identifier.
* `nodeName` (string) – Name of the node being evaluated.
*`aReason` (string) – Reason explaining the compliance status.
* `isCompliant` (bool) – Flag indicating whether the node satisfies the taint requirement. | +| **Return value** | A pointer to the newly created `ReportObject`. | +| **Key dependencies** | • Calls `NewReportObject(aReason, TaintType, isCompliant)`
• Invokes `AddField(NodeType, nodeName)`
• Invokes `AddField(TaintBit, taintBit)` | +| **Side effects** | None beyond constructing and returning the object. No external I/O or state mutation outside of the returned instance. | +| **How it fits the package** | Provides a convenient constructor for tests that need to generate standardized taint reports within the `testhelper` package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Call NewReportObject(aReason, TaintType, isCompliant)"] + B --> C["AddField(NodeType, nodeName)"] + C --> D["AddField(TaintBit, taintBit)"] + D --> E["Return ReportObject"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewTaintReportObject --> func_NewReportObject + func_NewTaintReportObject --> func_ReportObject.AddField +``` + +#### Functions calling `NewTaintReportObject` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NewTaintReportObject +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + report := testhelper.NewTaintReportObject( + "node.kubernetes.io/disk-pressure", + "worker-01", + "The node is compliant with disk pressure taint.", + true, + ) + // `report` now contains the populated ReportObject +} +``` + +--- + +### ReportObject.AddField + +**AddField** - Appends the supplied key and value to the `ObjectFieldsKeys` and `ObjectFieldsValues` slices of the receiver, then returns the modified object for chaining. + +#### Signature (Go) + +```go +func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Appends the supplied key and value to the `ObjectFieldsKeys` and `ObjectFieldsValues` slices of the receiver, then returns the modified object for chaining. | +| **Parameters** | `aKey string –` field name; `aValue string –` corresponding field value | +| **Return value** | The same `*ReportObject` instance (after mutation), enabling method chaining. | +| **Key dependencies** | • Built‑in `append` function for slices.
• None other than standard library. | +| **Side effects** | Mutates the receiver’s internal slices; no external I/O or concurrency. | +| **How it fits the package** | Provides a fluent interface used by constructors (`NewReportObject`, `NewCatalogSourceReportObject`, etc.) to populate report metadata before returning the object. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Append key"} + B --> C["obj.ObjectFieldsKeys = append(...)"] + B --> D{"Append value"} + D --> E["obj.ObjectFieldsValues = append(...)"] + E --> F["Return obj"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_ReportObject.AddField --> append +``` + +#### Functions calling `ReportObject.AddField` + +```mermaid +graph TD + func_NewCatalogSourceReportObject --> func_ReportObject.AddField + func_NewCertifiedContainerReportObject --> func_ReportObject.AddField + func_NewClusterOperatorReportObject --> func_ReportObject.AddField + func_NewClusterVersionReportObject --> func_ReportObject.AddField + func_NewContainerReportObject --> func_ReportObject.AddField + func_NewCrdReportObject --> func_ReportObject.AddField + func_NewDeploymentReportObject --> func_ReportObject.AddField + func_NewHelmChartReportObject --> func_ReportObject.AddField + func_NewNamespacedNamedReportObject --> func_ReportObject.AddField + func_NewNamespacedReportObject --> func_ReportObject.AddField + func_NewNodeReportObject --> func_ReportObject.AddField + func_NewOperatorReportObject --> func_ReportObject.AddField + func_NewPodReportObject --> func_ReportObject.AddField + func_NewReportObject --> func_ReportObject.AddField + func_NewStatefulSetReportObject --> func_ReportObject.AddField + func_NewTaintReportObject --> func_ReportObject.AddField + func_SetContainerProcessValues --> func_ReportObject.AddField +``` + +#### Usage example (Go) + +```go +// Minimal example invoking ReportObject.AddField +obj := &ReportObject{} +obj.AddField("Status", "Success").AddField("Detail", "All checks passed") +``` + +--- + +### ReportObject.SetContainerProcessValues + +**SetContainerProcessValues** - Adds the scheduling policy, priority, and command line of a container process to the report object and marks its type as `ContainerProcessType`. + +#### Signature (Go) + +```go +func (obj *ReportObject) SetContainerProcessValues(aPolicy, aPriority, aCommandLine string) *ReportObject +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Adds the scheduling policy, priority, and command line of a container process to the report object and marks its type as `ContainerProcessType`. | +| **Parameters** | `aPolicy string` – scheduling policy name.
`aPriority string` – scheduling priority value.
`aCommandLine string` – full command line of the process. | +| **Return value** | The modified `*ReportObject`, enabling method chaining. | +| **Key dependencies** | Calls `ReportObject.AddField` three times to append key/value pairs. | +| **Side effects** | Mutates the receiver’s `ObjectFieldsKeys`, `ObjectFieldsValues`, and `ObjectType`. No external I/O or concurrency is involved. | +| **How it fits the package** | Provides a convenient builder‑style interface for populating container process information within the test helper reporting system. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + obj --> obj.AddField["AddField(ProcessCommandLine, aCommandLine)"] + obj --> obj.AddField["AddField(SchedulingPolicy, aPolicy)"] + obj --> obj.AddField["AddField(SchedulingPriority, aPriority)"] + obj --> obj.ObjectType["Set to ContainerProcessType"] + obj --> return["obj"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ReportObject.SetContainerProcessValues --> func_ReportObject.AddField +``` + +#### Functions calling `ReportObject.SetContainerProcessValues` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ReportObject.SetContainerProcessValues +report := &testhelper.ReportObject{} +report.SetContainerProcessValues("FIFO", "10", "/usr/bin/bash -c 'echo hello'") +``` + +--- + +### ReportObject.SetType + +**SetType** - Assigns a new value to the `ObjectType` field of the receiver and returns the updated object. + +#### Signature (Go) + +```go +func (obj *ReportObject) SetType(aType string) (*ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Assigns a new value to the `ObjectType` field of the receiver and returns the updated object. | +| **Parameters** | `aType string` – The type identifier to be stored in `ObjectType`. | +| **Return value** | `*ReportObject` – A pointer to the modified report object, enabling method chaining. | +| **Key dependencies** | None – the function only manipulates a field of its receiver. | +| **Side effects** | Mutates the `ObjectType` field of the receiver; no external I/O or concurrency concerns. | +| **How it fits the package** | Provides a fluent interface for configuring report objects within the test helper utilities. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + SetType --> Update_ObjectType +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `ReportObject.SetType` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ReportObject.SetType +obj := &testhelper.ReportObject{} +updated := obj.SetType("example-type") +// updated.ObjectType == "example-type" +``` + +--- + +### ReportObjectTestString + +**ReportObjectTestString** - Builds a single string that lists each `ReportObject` in the provided slice, using Go’s `%#v` format for readability. The output is wrapped like `[]testhelper.ReportObject{...}`. + +Creates a testable string representation of a slice of `*ReportObject`. + +#### Signature (Go) + +```go +func ReportObjectTestString(p []*ReportObject) (out string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a single string that lists each `ReportObject` in the provided slice, using Go’s `%#v` format for readability. The output is wrapped like `[]testhelper.ReportObject{...}`. | +| **Parameters** | `p []*ReportObject – the slice to stringify` | +| **Return value** | `out string – formatted representation of the input slice` | +| **Key dependencies** | • `fmt.Sprintf` for formatting each element | +| **Side effects** | None; purely functional, returns a new string. | +| **How it fits the package** | Utility helper used in tests to compare expected versus actual values involving `ReportObject`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> InitString["out = '[]testhelper.ReportObject{'"] + InitString --> Loop["for each element in p"] + Loop --> Format["out += fmt.Sprintf(%#v,, *p)"] + Format --> EndLoop["after loop ends"] + EndLoop --> Close["out += }"] + Close --> Return["return out"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ReportObjectTestString --> func_Sprintf["fmt.Sprintf"] +``` + +#### Functions calling `ReportObjectTestString` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ReportObjectTestString +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + objs := []*testhelper.ReportObject{ + {ID: 1, Name: "alpha"}, + {ID: 2, Name: "beta"}, + } + s := testhelper.ReportObjectTestString(objs) + println(s) // prints: []testhelper.ReportObject{&{1 alpha},&{2 beta},} +} +``` + +--- + +### ReportObjectTestStringPointer + +**ReportObjectTestStringPointer** - Produces a formatted string that lists the dereferenced values of each `*ReportObject` in a slice. + +Generates a human‑readable string representation of a slice of pointers to `ReportObject`. + +--- + +#### Signature (Go) + +```go +func([]*ReportObject)(string) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a formatted string that lists the dereferenced values of each `*ReportObject` in a slice. | +| **Parameters** | `p []*ReportObject` – slice of pointers to `ReportObject`. | +| **Return value** | `string` – e.g. `"[]*testhelper.ReportObject{&{...}, &{...}}"`. | +| **Key dependencies** | • `fmt.Sprintf` (from the standard library) | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Utility helper used by test helpers to serialize objects for error messages and debugging. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> Init + Init --> Loop + Loop --> Format + Format --> EndString + EndString --> Return + + subgraph Steps + Start["Start"] + Init["InitString: out = []*testhelper.ReportObject{"] + Loop["Loop over each element in p"] + Format["Format: out += fmt.Sprintf(\&%#v,\, *p)"] + EndString["EndString: out += \}\"] + Return["Return: return out"] + end +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ReportObjectTestStringPointer --> fmt.Sprintf +``` + +--- + +#### Functions calling `ReportObjectTestStringPointer` (Mermaid) + +```mermaid +graph TD + func_FailureReasonOutTestString --> func_ReportObjectTestStringPointer +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking ReportObjectTestStringPointer +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + objs := []*testhelper.ReportObject{ + { /* populate fields */ }, + { /* populate fields */ }, + } + fmt.Println(testhelper.ReportObjectTestStringPointer(objs)) +} +``` + +--- + +### ResultObjectsToString + +**ResultObjectsToString** - Serialises compliant and non‑compliant report objects into a JSON representation of `FailureReasonOut`. + +#### Signature (Go) + +```go +func ResultObjectsToString(compliantObject, nonCompliantObject []*ReportObject) (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Serialises compliant and non‑compliant report objects into a JSON representation of `FailureReasonOut`. | +| **Parameters** | `compliantObject` – slice of pointers to `ReportObject` that passed validation.
`nonCompliantObject` – slice of pointers to `ReportObject` that failed validation. | +| **Return value** | A JSON string containing both slices, or an error if marshaling fails. | +| **Key dependencies** | • `encoding/json.Marshal`
• `fmt.Errorf` | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by test helpers to produce a deterministic string representation of report objects for comparison or logging. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B["Create FailureReasonOut{CompliantObjectsOut, NonCompliantObjectsOut}"] + B --> C["Marshal into JSON"] + C --> D{{Error?}} + D -- yes --> E["Return empty string & fmt.Errorf"] + D -- no --> F["Convert bytes to string"] + F --> G["Return string, nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_ResultObjectsToString --> func_Marshal + func_ResultObjectsToString --> func_Errorf +``` + +#### Functions calling `ResultObjectsToString` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ResultObjectsToString +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + // Example report objects (details omitted) + compliant := []*testhelper.ReportObject{{/* fields */}} + nonCompliant := []*testhelper.ReportObject{{/* fields */}} + + jsonStr, err := testhelper.ResultObjectsToString(compliant, nonCompliant) + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + fmt.Println(jsonStr) // JSON representation of the two slices +} +``` + +--- + +### ResultToString + +**ResultToString** - Translates predefined integer result codes (`SUCCESS`, `FAILURE`, `ERROR`) into human‑readable strings. Unrecognized codes yield an empty string. + +#### Signature (Go) + +```go +func ResultToString(result int) (str string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Translates predefined integer result codes (`SUCCESS`, `FAILURE`, `ERROR`) into human‑readable strings. Unrecognized codes yield an empty string. | +| **Parameters** | `result int` – the numeric code to convert. | +| **Return value** | `str string` – textual representation of the input code, or an empty string if unknown. | +| **Key dependencies** | *None* | +| **Side effects** | None (pure function). | +| **How it fits the package** | Utility helper used by tests to log or display result statuses in a readable form. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"result"} + B -- SUCCESS --> C["Return \SUCCESS\"] + B -- FAILURE --> D["Return \FAILURE\"] + B -- ERROR --> E["Return \ERROR\"] + B -- Other --> F["Return empty string"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + Note["No external dependencies"] +``` + +#### Functions calling `ResultToString` + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + Note["No callers found"] +``` + +#### Usage example (Go) + +```go +// Minimal example invoking ResultToString +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + fmt.Println(testhelper.ResultToString(SUCCESS)) // Output: SUCCESS +} +``` + +--- diff --git a/docs/pkg/versions/versions.md b/docs/pkg/versions/versions.md new file mode 100644 index 000000000..afbc2cc83 --- /dev/null +++ b/docs/pkg/versions/versions.md @@ -0,0 +1,253 @@ +# Package versions + +**Path**: `pkg/versions` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [GitVersion](#gitversion) + - [IsValidK8sVersion](#isvalidk8sversion) + - [IsValidSemanticVersion](#isvalidsemanticversion) + +## Overview + +Provides utilities for handling version information in CertSuite, including formatting build Git metadata and validating semantic versions used by Kubernetes CRDs. + +### Key Features + +- Formats the current build’s Git commit, release tags, and previous releases into a human‑readable string via GitVersion + +### Design Notes + +- Assumes Git metadata is injected at build time; if missing, falls back to "unreleased" placeholders +- Relies on Masterminds/semver for strict semantic version parsing, which may reject pre‑release identifiers not matching CRD conventions +- Best practice: call GitVersion early in application startup to display build info and validate versions before use + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GitVersion() string](#gitversion) | Returns a formatted string that describes the Git version of the current build. If no release tag is present, it indicates an unreleased post‑build and shows the previous release. | +| [func IsValidK8sVersion(version string) bool](#isvalidk8sversion) | Checks whether the supplied string conforms to the accepted Kubernetes semantic‑versioning format used by CRDs (`v[(alpha | +| [func IsValidSemanticVersion(version string) bool](#isvalidsemanticversion) | Determines if `version` conforms to semantic‑versioning rules. | + +## Exported Functions + +### GitVersion + +**GitVersion** - Returns a formatted string that describes the Git version of the current build. If no release tag is present, it indicates an unreleased post‑build and shows the previous release. + +Retrieves a human‑readable representation of the current build’s Git state, combining the release tag (or a fallback message) with the commit hash. + +#### Signature (Go) + +```go +func GitVersion() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a formatted string that describes the Git version of the current build. If no release tag is present, it indicates an unreleased post‑build and shows the previous release. | +| **Parameters** | None | +| **Return value** | `string` – “ ()” where `` is either the release tag or a fallback message. | +| **Key dependencies** | • Reads global variables: `GitRelease`, `GitPreviousRelease`, `GitDisplayRelease`, `GitCommit`.
• No external function calls. | +| **Side effects** | Mutates the global variable `GitDisplayRelease` to reflect the current release string. | +| **How it fits the package** | Provides a single source of truth for reporting the build’s Git state throughout the certsuite application (e.g., in CLI output and logs). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"GitRelease is empty?"} + B -- Yes --> C["Set GitDisplayRelease = Unreleased build post + GitPreviousRelease"] + B -- No --> D["Set GitDisplayRelease = GitRelease"] + C --> E["Return GitDisplayRelease + ( + GitCommit + )"] + D --> E +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `GitVersion` + +```mermaid +graph TD + func_showVersion --> func_GitVersion + func_Startup --> func_GitVersion +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GitVersion +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions" +) + +func main() { + fmt.Println("Current build:", versions.GitVersion()) +} +``` + +--- + +### IsValidK8sVersion + +**IsValidK8sVersion** - Checks whether the supplied string conforms to the accepted Kubernetes semantic‑versioning format used by CRDs (`v[(alpha + +#### Signature (Go) + +```go +func IsValidK8sVersion(version string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the supplied string conforms to the accepted Kubernetes semantic‑versioning format used by CRDs (`v[(alpha|beta)]{0,2}`). | +| **Parameters** | `version` string – candidate version text. | +| **Return value** | `bool` – `true` if the string matches the pattern; otherwise `false`. | +| **Key dependencies** | • `regexp.MustCompile` from package `regexp`
• `MatchString` method on the compiled regex | +| **Side effects** | None – pure function, no state mutation or I/O. | +| **How it fits the package** | Provides a reusable validator for Kubernetes version strings used throughout the test suite to enforce CRD versioning best practices. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Input: version string"] --> B["Compile regex r"] + B --> C{"r.MatchString(version)"} + C -->|"true"| D["Return true"] + C -->|"false"| E["Return false"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_IsValidK8sVersion --> func_MustCompile + func_IsValidK8sVersion --> func_MatchString +``` + +#### Functions calling `IsValidK8sVersion` + +```mermaid +graph TD + func_testOperatorCrdVersioning --> func_IsValidK8sVersion +``` + +#### Usage example (Go) + +```go +// Minimal example invoking IsValidK8sVersion +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions" +) + +func main() { + v := "v1beta2" + if versions.IsValidK8sVersion(v) { + fmt.Printf("%q is a valid Kubernetes version string\n", v) + } else { + fmt.Printf("%q is NOT a valid Kubernetes version string\n", v) + } +} +``` + +--- + +### IsValidSemanticVersion + +**IsValidSemanticVersion** - Determines if `version` conforms to semantic‑versioning rules. + +Checks whether a string is a valid [semantic version](https://semver.org/). + +--- + +#### Signature (Go) + +```go +func IsValidSemanticVersion(version string) bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if `version` conforms to semantic‑versioning rules. | +| **Parameters** | `version` string – the version string to validate. | +| **Return value** | `bool` – `true` when parsing succeeds, otherwise `false`. | +| **Key dependencies** | Calls `semver.NewVersion` from the `github.com/Masterminds/semver/v3` package. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a helper for other modules to verify operator or component versions before further processing. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> Parse["Parse with semver.NewVersion"] + Parse -- success --> ReturnTrue["return true"] + Parse -- failure --> ReturnFalse["return false"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_IsValidSemanticVersion --> func_NewVersion +``` + +--- + +#### Functions calling `IsValidSemanticVersion` (Mermaid) + +```mermaid +graph TD + testOperatorSemanticVersioning --> func_IsValidSemanticVersion +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking IsValidSemanticVersion +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions" +) + +func main() { + v := "1.2.3" + if versions.IsValidSemanticVersion(v) { + fmt.Printf("The version %q is valid.\n", v) + } else { + fmt.Printf("The version %q is invalid.\n", v) + } +} +``` + +--- + +--- diff --git a/docs/tests/accesscontrol/accesscontrol.md b/docs/tests/accesscontrol/accesscontrol.md new file mode 100644 index 000000000..e005a3175 --- /dev/null +++ b/docs/tests/accesscontrol/accesscontrol.md @@ -0,0 +1,2820 @@ +# Package accesscontrol + +**Path**: `tests/accesscontrol` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [LoadChecks](#loadchecks) +- [Local Functions](#local-functions) + - [checkForbiddenCapability](#checkforbiddencapability) + - [getNbOfProcessesInPidNamespace](#getnbofprocessesinpidnamespace) + - [isCSVAndClusterWide](#iscsvandclusterwide) + - [isContainerCapabilitySet](#iscontainercapabilityset) + - [isInstallModeMultiNamespace](#isinstallmodemultinamespace) + - [ownedByClusterWideOperator](#ownedbyclusterwideoperator) + - [test1337UIDs](#test1337uids) + - [testAutomountServiceToken](#testautomountservicetoken) + - [testBpfCapability](#testbpfcapability) + - [testContainerHostPort](#testcontainerhostport) + - [testContainerSCC](#testcontainerscc) + - [testCrdRoles](#testcrdroles) + - [testIpcLockCapability](#testipclockcapability) + - [testNamespace](#testnamespace) + - [testNamespaceResourceQuota](#testnamespaceresourcequota) + - [testNetAdminCapability](#testnetadmincapability) + - [testNetRawCapability](#testnetrawcapability) + - [testNoSSHDaemonsAllowed](#testnosshdaemonsallowed) + - [testNodePort](#testnodeport) + - [testOneProcessPerContainer](#testoneprocesspercontainer) + - [testPodClusterRoleBindings](#testpodclusterrolebindings) + - [testPodHostIPC](#testpodhostipc) + - [testPodHostNetwork](#testpodhostnetwork) + - [testPodHostPID](#testpodhostpid) + - [testPodHostPath](#testpodhostpath) + - [testPodRequests](#testpodrequests) + - [testPodRoleBindings](#testpodrolebindings) + - [testPodServiceAccount](#testpodserviceaccount) + - [testSYSNiceRealtimeCapability](#testsysnicerealtimecapability) + - [testSecConPrivilegeEscalation](#testsecconprivilegeescalation) + - [testSecConReadOnlyFilesystem](#testsecconreadonlyfilesystem) + - [testSecConRunAsNonRoot](#testsecconrunasnonroot) + - [testSysAdminCapability](#testsysadmincapability) + - [testSysPtraceCapability](#testsysptracecapability) + +## Overview + +The access‑control test suite registers a collection of checks that validate Kubernetes workloads against security best practices, such as forbidden capabilities, insecure host settings, and improper resource limits. + +### Key Features + +- Loads a check group named "Access‑Control" and attaches pre‑test setup callbacks +- Provides individual checks for container capabilities (e.g., BPF, SYS_ADMIN), host networking flags, pod service accounts, and namespace constraints +- Collects results into structured report objects that record compliance or violations + +### Design Notes + +- Checks are registered via LoadChecks; callers must invoke the returned function to perform registration +- Non‑exported helper functions such as isContainerCapabilitySet encapsulate capability logic to keep checks concise +- Test data (e.g., env, beforeEachFn) is injected through package globals for test orchestration + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func LoadChecks()](#loadchecks) | Creates the “Access‑Control” check group, attaches pre‑test setup and registers individual checks that verify security best practices on Kubernetes resources. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func checkForbiddenCapability(containers []*provider.Container, capability string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject)](#checkforbiddencapability) | Determines whether each container in a list uses a disallowed Linux capability. It returns separate slices of report objects for compliant and non‑compliant containers. | +| [func(clientsholder.Context, int, clientsholder.Command) (int, error)](#getnbofprocessesinpidnamespace) | Executes `lsns -p -t pid -n` inside a container to count how many processes share the same PID namespace as the target process. | +| [func isCSVAndClusterWide(aNamespace, name string, env *provider.TestEnvironment) bool](#iscsvandclusterwide) | Determines if the CSV referenced by `aNamespace` and `name` is created by a cluster‑wide operator. | +| [func isContainerCapabilitySet(containerCapabilities *corev1.Capabilities, capability string) bool](#iscontainercapabilityset) | Determines if a container has the specified capability enabled (or if `"ALL"` is set). | +| [func([]v1alpha1.InstallMode)(bool)](#isinstallmodemultinamespace) | Determines if any `InstallMode` in the slice has type `AllNamespaces`. | +| [func ownedByClusterWideOperator(topOwners map[string]podhelper.TopOwner, env *provider.TestEnvironment) (aNamespace, name string, found bool)](#ownedbyclusterwideoperator) | Checks whether any of the provided `topOwners` is a Cluster Service Version (CSV) installed by a cluster‑wide operator. Returns the CSV’s namespace and name if one matches. | +| [func test1337UIDs(check *checksdb.Check, env *provider.TestEnvironment)](#test1337uids) | Verifies each pod in the test environment does not run with `securityContext.runAsUser` set to 1337. Sets compliance results accordingly. | +| [func testAutomountServiceToken(check *checksdb.Check, env *provider.TestEnvironment)](#testautomountservicetoken) | Inspects each pod in the test environment to ensure it does not use the default service account and that its `automountServiceAccountToken` setting is explicitly set to `false`. It records compliant or non‑compliant findings. | +| [func testBpfCapability(check *checksdb.Check, env *provider.TestEnvironment)](#testbpfcapability) | Determines whether any container in the test environment requests the forbidden `BPF` capability and records compliance status. | +| [func testContainerHostPort(check *checksdb.Check, env *provider.TestEnvironment)](#testcontainerhostport) | Checks each container in the test environment for configured host‑port mappings and records compliance status. | +| [func testContainerSCC(check *checksdb.Check, env *provider.TestEnvironment)](#testcontainerscc) | Scans all pods in the supplied environment, determines each container’s Security Context Constraint (SCC) category via `securitycontextcontainer.CheckPod`, and records compliance results. Containers outside the least‑privileged categories fail the test. | +| [func testCrdRoles(check *checksdb.Check, env *provider.TestEnvironment)](#testcrdroles) | Determines which role rules target Custom Resource Definitions (CRDs) included in the current test environment and records compliance results. | +| [func testIpcLockCapability(check *checksdb.Check, env *provider.TestEnvironment)](#testipclockcapability) | Ensures no container in the test environment declares the `IPC_LOCK` capability, which is considered a security risk. | +| [func testNamespace(check *checksdb.Check, env *provider.TestEnvironment)](#testnamespace) | Validates each namespace supplied by the test environment. It checks for disallowed prefixes and verifies that custom resources (CRs) are only deployed in configured namespaces. Results are recorded as compliant or non‑compliant objects. | +| [func testNamespaceResourceQuota(check *checksdb.Check, env *provider.TestEnvironment)](#testnamespaceresourcequota) | Verifies that every Pod in the environment runs inside a namespace that has an applied ResourceQuota. Sets compliance results accordingly. | +| [func testNetAdminCapability(check *checksdb.Check, env *provider.TestEnvironment)](#testnetadmincapability) | Checks each container in the test environment for the presence of the `NET_ADMIN` capability and records compliance results. | +| [func testNetRawCapability(check *checksdb.Check, env *provider.TestEnvironment)](#testnetrawcapability) | Detects and reports any container that requests the `NET_RAW` Linux capability, which is considered forbidden for most workloads. | +| [func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironment)](#testnosshdaemonsallowed) | Determines whether any pod in the environment exposes an SSH daemon and records compliant/non‑compliant results. | +| [func testNodePort(check *checksdb.Check, env *provider.TestEnvironment)](#testnodeport) | Validates each Kubernetes Service in the test environment; reports services that are of type `NodePort` as non‑compliant and those that are not as compliant. | +| [func testOneProcessPerContainer(check *checksdb.Check, env *provider.TestEnvironment) {}](#testoneprocesspercontainer) | Ensures that each non‑Istio‑proxy container runs only one process. Sets the check result with compliant and non‑compliant containers. | +| [func testPodClusterRoleBindings(check *checksdb.Check, env *provider.TestEnvironment)](#testpodclusterrolebindings) | Checks each pod in the environment to ensure it is not bound to a cluster‑role, unless the pod is owned by a cluster‑wide operator. | +| [func testPodHostIPC(check *checksdb.Check, env *provider.TestEnvironment)](#testpodhostipc) | Confirms each pod in the test environment does **not** set `spec.hostIPC` to `true`. | +| [func testPodHostNetwork(check *checksdb.Check, env *provider.TestEnvironment)](#testpodhostnetwork) | Ensures that `spec.hostNetwork` is not set to `true` for any pod under test. A pod using host networking can expose the node’s network stack to the container, which is a security risk. | +| [func testPodHostPID(check *checksdb.Check, env *provider.TestEnvironment)](#testpodhostpid) | Ensures each Pod in the environment does **not** have `spec.hostPID` set to true. A compliant pod passes; a non‑compliant pod is reported. | +| [func testPodHostPath(check *checksdb.Check, env *provider.TestEnvironment)](#testpodhostpath) | Ensures every pod’s volumes either lack a `hostPath` or have an empty path. If any host path is present, the pod is flagged as non‑compliant; otherwise it is compliant. | +| [func testPodRequests(*checksdb.Check, *provider.TestEnvironment)](#testpodrequests) | Verifies each container in the environment has CPU and memory resource requests set; records compliant and non‑compliant containers. | +| [func testPodRoleBindings(check *checksdb.Check, env *provider.TestEnvironment)](#testpodrolebindings) | Ensures that a pod’s service account does not reference role bindings outside of the allowed CNF namespaces. | +| [func testPodServiceAccount(check *checksdb.Check, env *provider.TestEnvironment)](#testpodserviceaccount) | Determines whether each Pod in the test environment uses a non‑default ServiceAccount. Logs findings and records compliant or non‑compliant objects. | +| [func testSYSNiceRealtimeCapability(check *checksdb.Check, env *provider.TestEnvironment)](#testsysnicerealtimecapability) | Determines compliance of each container with respect to the `SYS_NICE` capability when running on a node whose kernel is realtime enabled. | +| [func testSecConPrivilegeEscalation(check *checksdb.Check, env *provider.TestEnvironment)](#testsecconprivilegeescalation) | Confirms each container’s `SecurityContext.AllowPrivilegeEscalation` is not set to `true`. Containers violating this rule are reported as non‑compliant. | +| [func testSecConReadOnlyFilesystem(check *checksdb.Check, env *provider.TestEnvironment)](#testsecconreadonlyfilesystem) | Determines whether every container in the supplied environment has a read‑only root filesystem and records compliance results. | +| [func testSecConRunAsNonRoot(check *checksdb.Check, env *provider.TestEnvironment)](#testsecconrunasnonroot) | Confirms each pod’s containers either have `RunAsNonRoot=true` or a non‑zero user ID. Non‑compliant pods and containers are recorded for reporting. | +| [func testSysAdminCapability(check *checksdb.Check, env *provider.TestEnvironment)](#testsysadmincapability) | Determines whether any container uses the `SYS_ADMIN` Linux capability, which is disallowed for security reasons. | +| [func testSysPtraceCapability(check *checksdb.Check, env *provider.TestEnvironment)](#testsysptracecapability) | Determines whether each pod that shares a process namespace also grants at least one container the `SYS_PTRACE` capability. The check records compliant and non‑compliant pods. | + +## Exported Functions + +### LoadChecks + +**LoadChecks** - Creates the “Access‑Control” check group, attaches pre‑test setup and registers individual checks that verify security best practices on Kubernetes resources. + +#### Signature (Go) + +```go +func LoadChecks() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates the “Access‑Control” check group, attaches pre‑test setup and registers individual checks that verify security best practices on Kubernetes resources. | +| **Parameters** | None | +| **Return value** | None (side‑effect: populates the internal checks database) | +| **Key dependencies** |
  • `log.Debug` – logs the start of the suite
  • `WithBeforeEachFn` – attaches a setup routine to the group
  • `checksdb.NewChecksGroup` – creates a new check group
  • `checksdb.Add`, `NewCheck`, `WithSkipCheckFn`, `WithCheckFn` – construct and configure individual checks
| +| **Side effects** | Modifies the global checks registry, logs debug information, registers skip logic and test functions for each check. | +| **How it fits the package** | It is invoked by `certsuite.LoadInternalChecksDB()` to load all access‑control related tests into the shared test framework. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["log.Debug(\Loading %s suite checks\, common.AccessControlTestKey)"] + B --> C["checksGroup := checksdb.NewChecksGroup(common.AccessControlTestKey)"] + C --> D["WithBeforeEachFn(beforeEachFn)"] + D --> E{"Add individual checks"} + E --> F["NewCheck(identifiers.GetTestIDAndLabels(...))"] + F --> G["WithSkipCheckFn(...)"] + G --> H["WithCheckFn(func(c *checksdb.Check) error { testX(c, &env); return nil })"] + H --> I["Add to checksGroup"] + E --> J["Repeat for each check type"] + J --> K["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> log_Debug + func_LoadChecks --> WithBeforeEachFn + func_LoadChecks --> checksdb_NewChecksGroup + func_LoadChecks --> checksdb_Add + func_LoadChecks --> identifiers_GetTestIDAndLabels + func_LoadChecks --> testhelper_GetNoContainersUnderTestSkipFn + func_LoadChecks --> testhelper_GetNoPodsUnderTestSkipFn + func_LoadChecks --> testhelper_GetNoNamespacesSkipFn + func_LoadChecks --> testhelper_GetDaemonSetFailedToSpawnSkipFn + func_LoadChecks --> testhelper_GetSharedProcessNamespacePodsSkipFn + func_LoadChecks --> testhelper_GetNoNodesWithRealtimeKernelSkipFn + func_LoadChecks --> testContainerSCC + func_LoadChecks --> testSysAdminCapability + func_LoadChecks --> testNetAdminCapability + func_LoadChecks --> testNetRawCapability + func_LoadChecks --> testIpcLockCapability + func_LoadChecks --> testBpfCapability + func_LoadChecks --> testSecConRunAsNonRoot + func_LoadChecks --> testSecConPrivilegeEscalation + func_LoadChecks --> testSecConReadOnlyFilesystem + func_LoadChecks --> testContainerHostPort + func_LoadChecks --> testPodHostNetwork + func_LoadChecks --> testPodHostPath + func_LoadChecks --> testPodHostIPC + func_LoadChecks --> testPodHostPID + func_LoadChecks --> testNamespace + func_LoadChecks --> testPodServiceAccount + func_LoadChecks --> testPodRoleBindings + func_LoadChecks --> testPodClusterRoleBindings + func_LoadChecks --> testAutomountServiceToken + func_LoadChecks --> testOneProcessPerContainer + func_LoadChecks --> testSYSNiceRealtimeCapability + func_LoadChecks --> testSysPtraceCapability + func_LoadChecks --> testNamespaceResourceQuota + func_LoadChecks --> testNoSSHDaemonsAllowed + func_LoadChecks --> testPodRequests + func_LoadChecks --> test1337UIDs + func_LoadChecks --> testNodePort + func_LoadChecks --> testCrdRoles +``` + +#### Functions calling `LoadChecks` (Mermaid) + +```mermaid +graph TD + certsuite_LoadInternalChecksDB --> func_LoadChecks +``` + +#### Usage example (Go) + +```go +// Minimal example invoking LoadChecks to register the suite checks. +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" +) + +func main() { + accesscontrol.LoadChecks() +} +``` + +--- + +## Local Functions + +### checkForbiddenCapability + +**checkForbiddenCapability** - Determines whether each container in a list uses a disallowed Linux capability. It returns separate slices of report objects for compliant and non‑compliant containers. + +#### Signature (Go) + +```go +func checkForbiddenCapability(containers []*provider.Container, capability string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether each container in a list uses a disallowed Linux capability. It returns separate slices of report objects for compliant and non‑compliant containers. | +| **Parameters** | `containers []*provider.Container` – slice of container metadata.
`capability string` – name of the capability to forbid.
`logger *log.Logger` – logger used for informational and error messages. | +| **Return value** | Two slices:
• `compliantObjects []*testhelper.ReportObject` – report objects for containers that do **not** use the forbidden capability.
• `nonCompliantObjects []*testhelper.ReportObject` – report objects for containers that **do** use it. | +| **Key dependencies** | • `logger.Info`, `logger.Error`
• `isContainerCapabilitySet(containerCapabilities *corev1.Capabilities, capability string) bool` (internal helper)
• `append` (slice operation)
• `testhelper.NewContainerReportObject(...)`
• `AddField` on the report object | +| **Side effects** | Emits log messages; no state mutation beyond slice construction. No I/O or concurrency side‑effects. | +| **How it fits the package** | Used by multiple capability‑checking tests (e.g., `testBpfCapability`, `testNetAdminCapability`) to centralise logic for detecting forbidden capabilities across containers in a pod environment. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over containers"} + B --> C["Log “Testing Container …”"] + C --> D{"Check SecurityContext"} + D -->|"nil"| E["Skip capability check"] + D -->|"has caps & set"| F["Set compliant = false"] + D -->|"else"| G["Keep compliant=true"] + F --> H["Non‑compliant path"] + G --> I["Compliant path"] + H --> J["Log error, create non‑compliant report"] + I --> K["Log info, create compliant report"] + J & K --> L["Append to respective slice"] + L --> M{"Next container?"} + M -->|"yes"| B + M -->|"no"| N["Return slices"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_checkForbiddenCapability --> func_Info + func_checkForbiddenCapability --> func_isContainerCapabilitySet + func_checkForbiddenCapability --> func_append + func_checkForbiddenCapability --> func_NewContainerReportObject + func_checkForbiddenCapability --> func_Error + func_checkForbiddenCapability --> func_AddField +``` + +#### Functions calling `checkForbiddenCapability` + +```mermaid +graph TD + func_testBpfCapability --> func_checkForbiddenCapability + func_testIpcLockCapability --> func_checkForbiddenCapability + func_testNetAdminCapability --> func_checkForbiddenCapability + func_testNetRawCapability --> func_checkForbiddenCapability + func_testSysAdminCapability --> func_checkForbiddenCapability +``` + +#### Usage example (Go) + +```go +// Minimal example invoking checkForbiddenCapability +containers := []*provider.Container{ /* …populate list… */ } +logger := log.New(os.Stdout, "accesscontrol: ", log.LstdFlags) + +compliant, nonCompliant := checkForbiddenCapability(containers, "NET_ADMIN", logger) + +// Process results (e.g., report to a test framework) +for _, obj := range compliant { + fmt.Println("✅", obj.Reason) +} +for _, obj := range nonCompliant { + fmt.Println("❌", obj.Reason) +} +``` + +--- + +### getNbOfProcessesInPidNamespace + +**getNbOfProcessesInPidNamespace** - Executes `lsns -p -t pid -n` inside a container to count how many processes share the same PID namespace as the target process. + +#### Signature (Go) + +```go +func(clientsholder.Context, int, clientsholder.Command) (int, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes `lsns -p -t pid -n` inside a container to count how many processes share the same PID namespace as the target process. | +| **Parameters** | `ctx clientsholder.Context –` execution context (namespace, pod, container).
`targetPid int –` PID of the reference process.
`ch clientsholder.Command –` command executor that runs shell commands in a container. | +| **Return value** | ` –` number of processes found; `` – any failure during command execution or parsing. | +| **Key dependencies** | `strconv.Itoa`, `ExecCommandContainer`, `fmt.Errorf`, `strings.Fields`, `strconv.Atoi`. | +| **Side effects** | Performs I/O by running a shell command inside the target container; does not modify program state. | +| **How it fits the package** | Used in access‑control tests to enforce that each container runs only one process, aiding isolation verification. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Build command string"] --> B["Execute lsns via ExecCommandContainer"] + B --> C{"Error from exec?"} + C -- Yes --> D["Return fmt.Errorf"] + C -- No --> E["Check stderr non‑empty"] + E -- Yes --> F["Return fmt.Errorf"] + E -- No --> G["Split output with strings.Fields"] + G --> H{"Output too short?"} + H -- Yes --> I["Return fmt.Errorf"] + H -- No --> J["Parse count via strconv.Atoi"] + J --> K["Return count, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + getNbOfProcessesInPidNamespace --> strconv_Itoa + getNbOfProcessesInPidNamespace --> ExecCommandContainer + getNbOfProcessesInPidNamespace --> fmt_Errorf + getNbOfProcessesInPidNamespace --> strings_Fields + getNbOfProcessesInPidNamespace --> strconv_Atoi +``` + +#### Functions calling `getNbOfProcessesInPidNamespace` (Mermaid) + +```mermaid +graph TD + testOneProcessPerContainer --> getNbOfProcessesInPidNamespace +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getNbOfProcessesInPidNamespace +ctx := clientsholder.NewContext("default", "mypod", "mycontainer") +pid := 1234 // PID of a process inside the container +nb, err := getNbOfProcessesInPidNamespace(ctx, pid, clientsholder.GetClientsHolder()) +if err != nil { + log.Fatalf("Failed to count processes: %v", err) +} +fmt.Printf("Number of processes in the namespace: %d\n", nb) +``` + +--- + +--- + +### isCSVAndClusterWide + +**isCSVAndClusterWide** - Determines if the CSV referenced by `aNamespace` and `name` is created by a cluster‑wide operator. + +Checks whether a CSV identified by namespace and name belongs to a cluster‑wide operator. + +--- + +#### Signature (Go) + +```go +func isCSVAndClusterWide(aNamespace, name string, env *provider.TestEnvironment) bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if the CSV referenced by `aNamespace` and `name` is created by a cluster‑wide operator. | +| **Parameters** | `aNamespace string – namespace of the object`
`name string – name of the object`
`env *provider.TestEnvironment – environment containing known operators` | +| **Return value** | `bool – true if the CSV belongs to a cluster‑wide operator, otherwise false` | +| **Key dependencies** | • `isInstallModeMultiNamespace(installModes []v1alpha1.InstallMode) bool`
• Iteration over `env.Operators` and access of their fields (`Csv`, `IsClusterWide`) | +| **Side effects** | None – purely read‑only logic. | +| **How it fits the package** | Used by higher‑level ownership checks to determine if an operator is cluster‑wide within the access‑control test suite. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Operators"} + B -->|"CSV exists & matches namespace/name"| C{"Check scope"} + C --> D{"IsClusterWide OR multi‑namespace install mode?"} + D -->|"Yes"| E["Return true"] + D -->|"No"| F["Continue loop"] + F --> B + B --> G["End – return false"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_isCSVAndClusterWide --> func_isInstallModeMultiNamespace +``` + +--- + +#### Functions calling `isCSVAndClusterWide` (Mermaid) + +```mermaid +graph TD + func_ownedByClusterWideOperator --> func_isCSVAndClusterWide +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking isCSVAndClusterWide +env := &provider.TestEnvironment{ + Operators: []*operator.Operator{ + { + Csv: &v1alpha1.ClusterServiceVersion{Namespace: "openshift-operators", Name: "my-op"}, + IsClusterWide: true, + }, + }, +} +ok := isCSVAndClusterWide("openshift-operators", "my-op", env) +// ok == true +``` + +--- + +### isContainerCapabilitySet + +**isContainerCapabilitySet** - Determines if a container has the specified capability enabled (or if `"ALL"` is set). + +Checks whether a specific capability is explicitly granted to a container via the `securityContext.capabilities.add` list, treating `"ALL"` as an implicit match for any capability. + +--- + +#### Signature (Go) + +```go +func isContainerCapabilitySet(containerCapabilities *corev1.Capabilities, capability string) bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if a container has the specified capability enabled (or if `"ALL"` is set). | +| **Parameters** | `containerCapabilities` – pointer to `corev1.Capabilities`; `capability` – string name of the capability to check. | +| **Return value** | `bool`: `true` if the capability or `"ALL"` appears in the add list; otherwise `false`. | +| **Key dependencies** | • `len` (built‑in)
• `stringhelper.StringInSlice` (generic helper)
• `corev1.Capability` type conversion | +| **Side effects** | None – pure function. | +| **How it fits the package** | Utility used by tests to validate that containers do not or do use forbidden capabilities, and to enforce rules around realtime kernel usage. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"containerCapabilities nil?"} + B -- Yes --> C["Return false"] + B -- No --> D{"len(Add) == 0?"} + D -- Yes --> C + D -- No --> E{"StringInSlice(Add, ALL, true)?"} + E -- Yes --> F["Return true"] + E -- No --> G{"StringInSlice(Add, capability, true)?"} + G -- Yes --> F + G -- No --> H["Return false"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_isContainerCapabilitySet --> builtin_len + func_isContainerCapabilitySet --> stringhelper_StringInSlice + func_isContainerCapabilitySet --> corev1_Capability +``` + +--- + +#### Functions calling `isContainerCapabilitySet` (Mermaid) + +```mermaid +graph TD + checkForbiddenCapability --> isContainerCapabilitySet + testSYSNiceRealtimeCapability --> isContainerCapabilitySet +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking isContainerCapabilitySet +package main + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" +) + +func main() { + caps := &corev1.Capabilities{ + Add: []corev1.Capability{"NET_ADMIN", "SYS_NICE"}, + } + hasSysNice := accesscontrol.IsContainerCapabilitySet(caps, "SYS_NICE") // package export is not available; this is a conceptual example. + fmt.Printf("Has SYS_NICE? %t\n", hasSysNice) +} +``` + +*Note:* `isContainerCapabilitySet` is unexported. In practice the test suite calls it directly within the same package. + +--- + +### isInstallModeMultiNamespace + +**isInstallModeMultiNamespace** - Determines if any `InstallMode` in the slice has type `AllNamespaces`. + +Checks whether a CSV’s install modes include **AllNamespaces** (indicating multi‑namespace or cluster‑wide support). + +```go +func([]v1alpha1.InstallMode)(bool) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if any `InstallMode` in the slice has type `AllNamespaces`. | +| **Parameters** | `installModes []v1alpha1.InstallMode` – list of install modes to examine. | +| **Return value** | `bool` – `true` if at least one mode is `AllNamespaces`; otherwise `false`. | +| **Key dependencies** | • `len(installModes)` – to iterate over the slice.
• `v1alpha1.InstallModeTypeAllNamespaces` constant. | +| **Side effects** | None (pure function). | +| **How it fits the package** | Used by higher‑level checks (e.g., `isCSVAndClusterWide`) to decide if a CSV can be installed cluster‑wide. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over installModes"} + B -->|"i < len(installModes)"| C["Check mode type"] + C -->|"type == AllNamespaces"| D["Return true"] + C -->|"else"| E["i++"] + E --> B + B -->|"i >= len(installModes)"| F["Return false"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_isInstallModeMultiNamespace --> len +``` + +#### Functions calling `isInstallModeMultiNamespace` + +```mermaid +graph TD + func_isCSVAndClusterWide --> func_isInstallModeMultiNamespace +``` + +#### Usage example (Go) + +```go +// Minimal example invoking isInstallModeMultiNamespace +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + v1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func main() { + modes := []v1alpha1.InstallMode{ + {Type: v1alpha1.InstallModeTypeSingleNamespace}, + {Type: v1alpha1.InstallModeTypeAllNamespaces}, // triggers true + } + clusterWide := accesscontrol.isInstallModeMultiNamespace(modes) + fmt.Println("Cluster‑wide support:", clusterWide) // prints: Cluster-wide support: true +} +``` + +--- + +### ownedByClusterWideOperator + +**ownedByClusterWideOperator** - Checks whether any of the provided `topOwners` is a Cluster Service Version (CSV) installed by a cluster‑wide operator. Returns the CSV’s namespace and name if one matches. + +#### Signature (Go) + +```go +func ownedByClusterWideOperator(topOwners map[string]podhelper.TopOwner, env *provider.TestEnvironment) (aNamespace, name string, found bool) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether any of the provided `topOwners` is a Cluster Service Version (CSV) installed by a cluster‑wide operator. Returns the CSV’s namespace and name if one matches. | +| **Parameters** | `topOwners map[string]podhelper.TopOwner` – mapping of owner identifiers to their details.
`env *provider.TestEnvironment` – environment containing operator metadata. | +| **Return value** | `` – namespace and name of the matching CSV, and a boolean indicating success. | +| **Key dependencies** | • `isCSVAndClusterWide` – helper that validates CSV ownership and cluster‑wide status.
• `env.Operators` – list of operator descriptors in the test environment. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by pod access‑control checks to exempt pods owned by cluster‑wide operators from certain RBAC restrictions. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Iterate over topOwners"] --> B{"Is CSV and cluster‑wide?"} + B -- Yes --> C["Return namespace, name, true"] + B -- No --> D["Continue loop"] + D -->|"End loop"| E["Return , , false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ownedByClusterWideOperator --> func_isCSVAndClusterWide +``` + +#### Functions calling `ownedByClusterWideOperator` (Mermaid) + +```mermaid +graph TD + func_testPodClusterRoleBindings --> func_ownedByClusterWideOperator +``` + +#### Usage example (Go) + +```go +// Minimal example invoking ownedByClusterWideOperator + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/podhelper" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/provider" +) + +func example(env *provider.TestEnvironment, podOwners map[string]podhelper.TopOwner) { + ns, name, ok := accesscontrol.ownedByClusterWideOperator(podOwners, env) + if ok { + fmt.Printf("Pod is owned by cluster‑wide CSV %s/%s\n", ns, name) + } else { + fmt.Println("No matching cluster‑wide CSV found") + } +} +``` + +--- + +### test1337UIDs + +**test1337UIDs** - Verifies each pod in the test environment does not run with `securityContext.runAsUser` set to 1337. Sets compliance results accordingly. + +#### Signature (Go) + +```go +func test1337UIDs(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies each pod in the test environment does not run with `securityContext.runAsUser` set to 1337. Sets compliance results accordingly. | +| **Parameters** | `check *checksdb.Check` – context for logging and result storage.
`env *provider.TestEnvironment` – collection of pods to evaluate. | +| **Return value** | None (side‑effect: sets check result). | +| **Key dependencies** | • `check.LogInfo`
• `check.LogError`
• `put.IsRunAsUserID(leetNum)`
• `testhelper.NewPodReportObject`
• `append` (builtin)
• `check.SetResult` | +| **Side effects** | • Logs informational and error messages.
• Constructs compliance/non‑compliance report objects.
• Stores results via `SetResult`. No external I/O or concurrency. | +| **How it fits the package** | Implements the “Test1337UID” check registered in `LoadChecks`; part of the extended access‑control test suite. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Pods"} + B -->|"For each pod"| C["Log “Testing Pod …”"] + C --> D{"IsRunAsUserID(1337)?"} + D -- Yes --> E["Log error"] + E --> F["Append non‑compliant report"] + D -- No --> G["Log info"] + G --> H["Append compliant report"] + B --> I["End loop"] + I --> J["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_test1337UIDs --> func_LogInfo + func_test1337UIDs --> func_IsRunAsUserID + func_test1337UIDs --> func_LogError + func_test1337UIDs --> builtin_append + func_test1337UIDs --> func_NewPodReportObject + func_test1337UIDs --> func_SetResult +``` + +#### Functions calling `test1337UIDs` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_test1337UIDs +``` + +#### Usage example (Go) + +```go +// Minimal example invoking test1337UIDs +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + check := checksdb.NewCheck("example-check") + env := &provider.TestEnvironment{Pods: []*provider.Pod{/* … */}} + accesscontrol.test1337UIDs(check, env) +} +``` + +--- + +### testAutomountServiceToken + +**testAutomountServiceToken** - Inspects each pod in the test environment to ensure it does not use the default service account and that its `automountServiceAccountToken` setting is explicitly set to `false`. It records compliant or non‑compliant findings. + +#### Signature (Go) + +```go +func testAutomountServiceToken(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Inspects each pod in the test environment to ensure it does not use the default service account and that its `automountServiceAccountToken` setting is explicitly set to `false`. It records compliant or non‑compliant findings. | +| **Parameters** | *check* (`*checksdb.Check`) – the current check context.
*env* (`*provider.TestEnvironment`) – environment containing all pods to evaluate. | +| **Return value** | None; results are stored via `check.SetResult`. | +| **Key dependencies** | - `check.LogInfo`
- `check.LogError`
- `clientsholder.GetClientsHolder()`
- `rbac.EvaluateAutomountTokens(client.CoreV1(), pod)`
- `testhelper.NewPodReportObject(...)`
- `append` (built‑in)
- `check.SetResult` | +| **Side effects** | Logs information and errors; creates report objects; updates the check’s result state. No external I/O beyond logging. | +| **How it fits the package** | Part of the *accesscontrol* test suite, specifically implementing the “Pod automount service account” compliance rule. It is invoked by `LoadChecks` when registering this particular check. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["for each pod in env.Pods"] --> B["LogInfo: testing pod"] + B --> C{"pod.Spec.ServiceAccountName == defaultServiceAccount"} + C -- yes --> D["LogError: uses default SA"] + D --> E["append nonCompliantObjects"] + C -- no --> F["GetClientsHolder() -> client"] + F --> G["rbac.EvaluateAutomountTokens(client.CoreV1(), pod)"] + G --> H{"podPassed"} + H -- false --> I["LogError: newMsg"] + I --> J["append nonCompliantObjects"] + H -- true --> K["LogInfo: no automount tokens set to true"] + K --> L["append compliantObjects"] + A --> M["check.SetResult(compliantObjects, nonCompliantObjects)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + accesscontrol.testAutomountServiceToken --> checksdb.Check.LogInfo + accesscontrol.testAutomountServiceToken --> checksdb.Check.LogError + accesscontrol.testAutomountServiceToken --> clientsholder.GetClientsHolder + accesscontrol.testAutomountServiceToken --> rbac.EvaluateAutomountTokens + accesscontrol.testAutomountServiceToken --> testhelper.NewPodReportObject + accesscontrol.testAutomountServiceToken --> checksdb.Check.SetResult +``` + +#### Functions calling `testAutomountServiceToken` (Mermaid) + +```mermaid +graph TD + accesscontrol.LoadChecks --> accesscontrol.testAutomountServiceToken +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testAutomountServiceToken +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/provider" +) + +func main() { + // Assume env is populated with pods to test + var env provider.TestEnvironment + + // Create a dummy check context + chk := checksdb.NewCheck(nil) + + // Run the automount token test + accesscontrol.testAutomountServiceToken(chk, &env) + + // Results are now available via chk.Result() +} +``` + +--- + +### testBpfCapability + +**testBpfCapability** - Determines whether any container in the test environment requests the forbidden `BPF` capability and records compliance status. + +#### Signature (Go) + +```go +func testBpfCapability(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether any container in the test environment requests the forbidden `BPF` capability and records compliance status. | +| **Parameters** | `check *checksdb.Check` – context for reporting; `env *provider.TestEnvironment` – contains the list of containers to evaluate. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | • `checkForbiddenCapability(env.Containers, "BPF", check.GetLogger())`
• `check.SetResult(compliantObjects, nonCompliantObjects)` | +| **Side effects** | Generates compliance reports and logs through the provided logger; no external I/O beyond logging. | +| **How it fits the package** | This function is registered as a test case for the *access control* suite (`TestBpfIdentifier`). It enforces best‑practice security by ensuring that containers do not request the potentially dangerous BPF capability. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Call checkForbiddenCapability with BPF"] + B --> C["Receive compliantObjects, nonCompliantObjects"] + C --> D["Invoke check.SetResult(compliantObjects, nonCompliantObjects)"] + D --> E["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testBpfCapability --> func_checkForbiddenCapability + func_testBpfCapability --> func_GetLogger + func_testBpfCapability --> func_SetResult +``` + +#### Functions calling `testBpfCapability` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testBpfCapability +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testBpfCapability +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/provider" +) + +func main() { + // Assume env is already populated with containers to test + var env provider.TestEnvironment + + // Create a Check object for reporting + check := checksdb.NewCheck("TestBpfIdentifier") + + // Run the BPF capability test + accesscontrol.testBpfCapability(check, &env) + + // Inspect results (e.g., via check.GetResult() or similar API) +} +``` + +--- + +### testContainerHostPort + +**testContainerHostPort** - Checks each container in the test environment for configured host‑port mappings and records compliance status. + +#### Signature (Go) + +```go +func testContainerHostPort(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks each container in the test environment for configured host‑port mappings and records compliance status. | +| **Parameters** | `check` – *checksdb.Check* (context for logging & result reporting)
`env` – *provider.TestEnvironment* (contains the list of containers to evaluate) | +| **Return value** | None; results are set via `check.SetResult`. | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `strconv.Itoa`
• `testhelper.NewContainerReportObject`
• `reportObject.SetType`, `.AddField`
• `append` (slice manipulation)
• `check.SetResult` | +| **Side effects** | Emits log messages; mutates the check’s result state by adding compliant/non‑compliant report objects. No external I/O beyond logging. | +| **How it fits the package** | Implements one of the access‑control test cases registered in `LoadChecks`; ensures containers avoid exposing host ports, a security best practice. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> ForEachContainer["Iterate over env.Containers"] + ForEachContainer --> CheckPorts["Check each port for HostPort != 0"] + CheckPorts -->|"HostPort found"| RecordNonCompliant["Create non‑compliant report object"] + CheckPorts -->|"No HostPort"| RecordCompliant["Create compliant report object"] + RecordNonCompliant --> NextContainer + RecordCompliant --> NextContainer + NextContainer --> End +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + testContainerHostPort --> LogInfo + testContainerHostPort --> LogError + testContainerHostPort --> append + testContainerHostPort --> NewContainerReportObject + testContainerHostPort --> SetType + testContainerHostPort --> AddField + testContainerHostPort --> strconv_Itoa + testContainerHostPort --> SetResult +``` + +#### Functions calling `testContainerHostPort` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testContainerHostPort +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainerHostPort +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + check := checksdb.NewCheck("example-id") + env := &provider.TestEnvironment{ + Containers: []*provider.Container{ /* populate with test data */ }, + } + accesscontrol.testContainerHostPort(check, env) + // Inspect check results... +} +``` + +--- + +### testContainerSCC + +**testContainerSCC** - Scans all pods in the supplied environment, determines each container’s Security Context Constraint (SCC) category via `securitycontextcontainer.CheckPod`, and records compliance results. Containers outside the least‑privileged categories fail the test. + +#### Signature (Go) + +```go +func testContainerSCC(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Scans all pods in the supplied environment, determines each container’s Security Context Constraint (SCC) category via `securitycontextcontainer.CheckPod`, and records compliance results. Containers outside the least‑privileged categories fail the test. | +| **Parameters** | `check` – *checksdb.Check*: object to log messages and store results.
`env` – *provider.TestEnvironment*: holds all pods to evaluate. | +| **Return value** | None; the function writes its findings directly into the `check` result set. | +| **Key dependencies** | - `securitycontextcontainer.CheckPod` (categorization logic)
- `testhelper.NewContainerReportObject`, `NewReportObject` (report construction)
- `SetType`, `AddField`, `LogInfo`, `LogError`, `SetResult` (logging and result handling) | +| **Side effects** | Mutates the `check` object by adding report objects for compliant/non‑compliant containers and setting the overall result. No external I/O beyond logging. | +| **How it fits the package** | This function implements the “Container Security Context” test in the Access Control suite, ensuring that CNFs (Container Network Functions) run with the minimal required privileges. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B["Iterate over env.Pods"] + B --> C{"Check pod name"} + C -->|"True"| D["LogInfo: Testing Pod"] + D --> E["Call CheckPod(pod)"] + E --> F["Loop through returned categories"] + F --> G{"Category > NoUID0?"} + G -->|"Yes"| H["Create non‑compliant report object"] + H --> I["Add to nonCompliantObjects"] + G -->|"No"| J["Create compliant report object"] + J --> K["Add to compliantObjects"] + F --> L{"Category > current highLevelCat?"} + L -->|"Yes"| M["Update highLevelCat"] + M --> N["Continue loop"] + N --> O["End of pod loop"] + O --> P["Create overall CNF report object with highLevelCat"] + P --> Q["Append to compliantObjects"] + Q --> R["SetResult on check"] + R --> S["Finish"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testContainerSCC --> func_LogInfo + func_testContainerSCC --> func_CheckPod + func_testContainerSCC --> func_NewContainerReportObject + func_testContainerSCC --> func_SetType + func_testContainerSCC --> func_AddField + func_testContainerSCC --> func_LogError + func_testContainerSCC --> func_NewReportObject + func_testContainerSCC --> func_SetResult +``` + +#### Functions calling `testContainerSCC` + +```mermaid +graph TD + func_LoadChecks --> func_testContainerSCC +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainerSCC +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/provider" +) + +func main() { + // Create a dummy check and environment + chk := checksdb.NewCheck(nil) + env := &provider.TestEnvironment{Pods: []*provider.Pod{}} + + // Run the SCC test + accesscontrol.testContainerSCC(chk, env) + + // Inspect results (placeholder) + fmt.Printf("Compliant objects: %v\n", chk.CompliantObjects()) +} +``` + +--- + +### testCrdRoles + +**testCrdRoles** - Determines which role rules target Custom Resource Definitions (CRDs) included in the current test environment and records compliance results. + +#### Signature (Go) + +```go +func testCrdRoles(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines which role rules target Custom Resource Definitions (CRDs) included in the current test environment and records compliance results. | +| **Parameters** | `check` – *checksdb.Check*: object used to log information, errors, and set final results.
`env` – *provider.TestEnvironment*: contains CRD definitions, roles, namespaces, etc., relevant for the check. | +| **Return value** | None (the function records its outcome via `check.SetResult`). | +| **Key dependencies** | • `rbac.GetCrdResources`\n• `rbac.GetAllRules`\n• `rbac.FilterRulesNonMatchingResources`\n• `testhelper.NewNamespacedReportObject` / `NewNamespacedNamedReportObject`\n• `check.LogInfo`, `LogError`, `SetResult` | +| **Side effects** | • Logs informational and error messages to the check.
• Builds slices of compliant and non‑compliant report objects that are stored in the check result. | +| **How it fits the package** | Used by the *TestCrdRoleIdentifier* suite to validate role‑rule alignment with the CRDs under test, ensuring no unintended permissions are granted. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Get CRD resources"] --> B["Iterate over roles"] + B --> C{"Role namespace in env.Namespaces?"} + C -- No --> D["Skip role"] + C -- Yes --> E["Retrieve all rules for role"] + E --> F["Filter matching/non‑matching rules"] + F --> G{"Any matching rules?"} + G -- No --> H["Continue to next role"] + G -- Yes --> I["Process each matching rule"] + I --> J["Create compliant report object"] + I --> K["Log info"] + F --> L{"Any non‑matching rules?"} + L -- No --> M["Role only matches CRDs – create compliant named object"] + L -- Yes --> N["Create non‑compliant named object & log error"] + M & N --> O["Append to result slices"] + O --> P["Set check result with compliant/non‑compliant objects"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testCrdRoles --> rbac.GetCrdResources + func_testCrdRoles --> rbac.GetAllRules + func_testCrdRoles --> rbac.FilterRulesNonMatchingResources + func_testCrdRoles --> testhelper.NewNamespacedReportObject + func_testCrdRoles --> testhelper.NewNamespacedNamedReportObject +``` + +#### Functions calling `testCrdRoles` (Mermaid) + +```mermaid +graph TD + LoadChecks --> func_testCrdRoles +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testCrdRoles +func runTest() { + // Assume check and env are already constructed elsewhere. + var check *checksdb.Check + var env *provider.TestEnvironment + + // Execute the role‑CRD compliance logic. + testCrdRoles(check, env) + + // After execution, results can be inspected via check.GetResult(). +} +``` + +--- + +### testIpcLockCapability + +**testIpcLockCapability** - Ensures no container in the test environment declares the `IPC_LOCK` capability, which is considered a security risk. + +#### Signature (Go) + +```go +func testIpcLockCapability(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures no container in the test environment declares the `IPC_LOCK` capability, which is considered a security risk. | +| **Parameters** | `check *checksdb.Check` – check context containing logger and result setter.
`env *provider.TestEnvironment` – runtime information with the list of containers to evaluate. | +| **Return value** | None (results are recorded via `SetResult`). | +| **Key dependencies** | • `checkForbiddenCapability(env.Containers, "IPC_LOCK", check.GetLogger())`
• `check.SetResult(compliantObjects, nonCompliantObjects)` | +| **Side effects** | Produces logs through the supplied logger and updates the check result state; no external I/O or concurrency beyond that. | +| **How it fits the package** | One of several security‑control checks registered in `LoadChecks`. It specifically targets forbidden capabilities within container specifications. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + testIpcLockCapability --> checkForbiddenCapability + testIpcLockCapability --> SetResult +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testIpcLockCapability --> func_checkForbiddenCapability + func_testIpcLockCapability --> func_SetResult +``` + +#### Functions calling `testIpcLockCapability` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testIpcLockCapability +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testIpcLockCapability +env := &provider.TestEnvironment{ + Containers: []*provider.Container{ /* containers to check */ }, +} +check := checksdb.NewCheck("test-ipc-lock") +testIpcLockCapability(check, env) +// Results are now stored in `check` via SetResult. +``` + +--- + +### testNamespace + +**testNamespace** - Validates each namespace supplied by the test environment. It checks for disallowed prefixes and verifies that custom resources (CRs) are only deployed in configured namespaces. Results are recorded as compliant or non‑compliant objects. + +#### Signature (Go) + +```go +func testNamespace(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates each namespace supplied by the test environment. It checks for disallowed prefixes and verifies that custom resources (CRs) are only deployed in configured namespaces. Results are recorded as compliant or non‑compliant objects. | +| **Parameters** | `check *checksdb.Check` – The current check context.
`env *provider.TestEnvironment` – Test environment containing namespaces, CRDs, etc. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | `check.LogInfo`, `check.LogError`, `strings.HasPrefix`, `testhelper.NewNamespacedReportObject`, `namespace.TestCrsNamespaces`, `namespace.GetInvalidCRsNum`, `check.SetResult` | +| **Side effects** | Logs information/errors; mutates the check’s result state. No external I/O beyond logging. | +| **How it fits the package** | Implements the “Namespace Best Practices” test within the Access‑Control suite, ensuring namespace naming and CR placement conform to policy. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate namespaces"} + B -->|"valid prefix"| C["Mark compliant"] + B -->|"invalid prefix"| D["Log error & mark non‑compliant"] + C --> E["Check next namespace"] + D --> E + E --> F{"Any non‑compliant?"} + F -->|"yes"| G["Set result with non‑compliant objects"] + F -->|"no"| H["Continue"] + H --> I{"Test CR namespaces"} + I --> J{"Error?"} + J -->|"yes"| K["Log error & exit"] + J -->|"no"| L["Get invalid CR count"] + L --> M{"CRs valid?"} + M -->|"yes"| N["Mark compliant"] + M -->|"no"| O["Add non‑compliant object"] + O --> P["Set final result"] + N --> P +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testNamespace --> check.LogInfo + func_testNamespace --> strings.HasPrefix + func_testNamespace --> check.LogError + func_testNamespace --> testhelper.NewNamespacedReportObject + func_testNamespace --> namespace.TestCrsNamespaces + func_testNamespace --> namespace.GetInvalidCRsNum + func_testNamespace --> check.SetResult +``` + +#### Functions calling `testNamespace` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testNamespace +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testNamespace +func runExample() { + env := &provider.TestEnvironment{ + Namespaces: []string{"dev", "prod"}, + Crds: []*apiextv1.CustomResourceDefinition{}, // populated elsewhere + } + check := checksdb.NewCheck("namespace-best-practices") + testNamespace(check, env) +} +``` + +--- + +### testNamespaceResourceQuota + +**testNamespaceResourceQuota** - Verifies that every Pod in the environment runs inside a namespace that has an applied ResourceQuota. Sets compliance results accordingly. + +#### Signature (Go) + +```go +func testNamespaceResourceQuota(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies that every Pod in the environment runs inside a namespace that has an applied ResourceQuota. Sets compliance results accordingly. | +| **Parameters** | `check` – *checksdb.Check*, used for logging and result storage.
`env` – *provider.TestEnvironment*, provides lists of Pods and ResourceQuotas. | +| **Return value** | None (void). Results are stored via `check.SetResult`. | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `testhelper.NewPodReportObject`
• `check.SetResult` | +| **Side effects** | Logs informational or error messages; creates and stores compliance report objects in the check. No external I/O beyond logging. | +| **How it fits the package** | Implements one of the Access Control suite checks, specifically the *TestNamespaceResourceQuota* test registered in `LoadChecks`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over Pods"} + B -->|"for each pod"| C["Log info"] + C --> D{"Find matching ResourceQuota"} + D -->|"found"| E["Mark compliant, log info"] + D -->|"not found"| F["Mark non‑compliant, log error"] + E --> G["Append to compliant list"] + F --> H["Append to non‑compliant list"] + G --> I{"Next pod?"} + H --> I + I -->|"yes"| B + I -->|"no"| J["Set check result"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testNamespaceResourceQuota --> func_LogInfo + func_testNamespaceResourceQuota --> func_LogError + func_testNamespaceResourceQuota --> func_NewPodReportObject + func_testNamespaceResourceQuota --> func_SetResult +``` + +#### Functions calling `testNamespaceResourceQuota` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testNamespaceResourceQuota +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testNamespaceResourceQuota +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume env is populated with Pods and ResourceQuotas + var env provider.TestEnvironment + // ... populate env ... + + check := checksdb.NewCheck("example-id") + accesscontrol.testNamespaceResourceQuota(check, &env) +} +``` + +--- + +--- + +### testNetAdminCapability + +**testNetAdminCapability** - Checks each container in the test environment for the presence of the `NET_ADMIN` capability and records compliance results. + +#### Signature (Go) + +```go +func testNetAdminCapability(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks each container in the test environment for the presence of the `NET_ADMIN` capability and records compliance results. | +| **Parameters** | `check *checksdb.Check` – The current check context.
`env *provider.TestEnvironment` – Test data including containers to evaluate. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | • `checkForbiddenCapability(env.Containers, "NET_ADMIN", check.GetLogger())`
• `check.SetResult(compliantObjects, nonCompliantObjects)` | +| **Side effects** | Mutates the `Check` result state; logs informational and error messages through the supplied logger. No external I/O beyond logging. | +| **How it fits the package** | Implements the “TestNetAdminCapability” check used by the Access Control test suite to enforce that containers avoid privileged network capabilities, contributing to overall cluster security compliance. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Call checkForbiddenCapability"} + B --> C["Return compliantObjects, nonCompliantObjects"] + C --> D["SetResult on check"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testNetAdminCapability --> func_checkForbiddenCapability + func_testNetAdminCapability --> func_GetLogger + func_testNetAdminCapability --> func_SetResult +``` + +#### Functions calling `testNetAdminCapability` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testNetAdminCapability +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testNetAdminCapability +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/provider" +) + +func main() { + check := checksdb.NewCheck("example") + env := &provider.TestEnvironment{ + Containers: []*provider.Container{/* ... */}, + } + accesscontrol.testNetAdminCapability(check, env) +} +``` + +--- + +### testNetRawCapability + +**testNetRawCapability** - Detects and reports any container that requests the `NET_RAW` Linux capability, which is considered forbidden for most workloads. + +#### Signature (Go) + +```go +func testNetRawCapability(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Detects and reports any container that requests the `NET_RAW` Linux capability, which is considered forbidden for most workloads. | +| **Parameters** | `check *checksdb.Check` – test context used to record results.
`env *provider.TestEnvironment` – execution environment providing a list of containers under test. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | • `checkForbiddenCapability(env.Containers, "NET_RAW", check.GetLogger())`
• `check.SetResult(compliantObjects, nonCompliantObjects)` | +| **Side effects** | Generates compliance and non‑compliance report objects; no external I/O beyond logging. | +| **How it fits the package** | Part of the Access Control test suite; invoked from `LoadChecks` to enforce container security best practices. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Call checkForbiddenCapability"] + B --> C["Return compliantObjects, nonCompliantObjects"] + C --> D["Store results via SetResult"] + D --> E["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testNetRawCapability --> func_checkForbiddenCapability + func_testNetRawCapability --> func_GetLogger + func_testNetRawCapability --> func_SetResult +``` + +#### Functions calling `testNetRawCapability` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testNetRawCapability +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testNetRawCapability +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/provider" +) + +func main() { + check := checksdb.NewCheck("test-net-raw") + env := &provider.TestEnvironment{ + Containers: []*provider.Container{}, // populate with test containers + } + accesscontrol.testNetRawCapability(check, env) +} +``` + +--- + +### testNoSSHDaemonsAllowed + +**testNoSSHDaemonsAllowed** - Determines whether any pod in the environment exposes an SSH daemon and records compliant/non‑compliant results. + +A compliance check that verifies each pod does **not** run an SSH daemon by inspecting listening ports. + +#### Signature (Go) + +```go +func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether any pod in the environment exposes an SSH daemon and records compliant/non‑compliant results. | +| **Parameters** | `check *checksdb.Check` – the check context for logging and result storage.
`env *provider.TestEnvironment` – holds pods to evaluate. | +| **Return value** | None (side effect: calls `SetResult`). | +| **Key dependencies** | - `netutil.GetSSHDaemonPort`
- `strconv.ParseInt`
- `netutil.GetListeningPorts`
- `testhelper.NewPodReportObject`
- `check.LogInfo`, `LogError`, `SetResult` | +| **Side effects** | Emits log messages, creates report objects, and stores results via `check.SetResult`. No external I/O beyond container command execution. | +| **How it fits the package** | Part of the Access‑Control suite; added as a check by `LoadChecks` for the *No SSH Daemons Allowed* test case. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Iterate over env.Pods"] --> B["Log pod name"] + B --> C["Select first container (cut)"] + C --> D["Get SSH port via GetSSHDaemonPort"] + D -->|"error"| E["Log error, record non‑compliant"] + D -->|"empty port"| F["Log compliant, continue"] + D --> G["Parse port to int"] + G -->|"error"| H["Log error, record non‑compliant"] + G --> I["Build sshPortInfo"] + I --> J["Get listening ports via GetListeningPorts"] + J -->|"error"| K["Log error, record non‑compliant"] + J --> L{"sshPortInfo present?"} + L -- Yes --> M["Log non‑compliant, record pod running SSH"] + L -- No --> N["Log compliant, record pod not running SSH"] + M & N --> O["Loop to next pod"] + O --> P["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testNoSSHDaemonsAllowed --> netutil_GetSSHDaemonPort + func_testNoSSHDaemonsAllowed --> strconv_ParseInt + func_testNoSSHDaemonsAllowed --> netutil_GetListeningPorts + func_testNoSSHDaemonsAllowed --> testhelper_NewPodReportObject + func_testNoSSHDaemonsAllowed --> check_LogInfo + func_testNoSSHDaemonsAllowed --> check_LogError + func_testNoSSHDaemonsAllowed --> check_SetResult +``` + +#### Functions calling `testNoSSHDaemonsAllowed` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testNoSSHDaemonsAllowed +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testNoSSHDaemonsAllowed +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/provider" +) + +func example() { + env := &provider.TestEnvironment{ /* populate Pods */ } + check := checksdb.NewCheck("No SSH Daemons Allowed") + testNoSSHDaemonsAllowed(check, env) + // results are available via check.Result() +} +``` + +--- + +### testNodePort + +**testNodePort** - Validates each Kubernetes Service in the test environment; reports services that are of type `NodePort` as non‑compliant and those that are not as compliant. + +#### Signature (Go) + +```go +func testNodePort(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates each Kubernetes Service in the test environment; reports services that are of type `NodePort` as non‑compliant and those that are not as compliant. | +| **Parameters** | `check *checksdb.Check` – check context for logging and result setting.
`env *provider.TestEnvironment` – contains the list of Services to evaluate. | +| **Return value** | None (side‑effecting). | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `services.ToString(s)`
• `testhelper.NewReportObject` and its chained `AddField` calls
• `append` to slice of report objects
• `check.SetResult` | +| **Side effects** | Logs messages, creates report objects, updates the check result. No external I/O beyond logging. | +| **How it fits the package** | Implements the “ServicesDoNotUseNodeports” test within the access‑control suite; invoked by `LoadChecks`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Services"} + B -->|"Service s"| C["LogInfo Testing %q"] + C --> D{"s.Spec.Type == nodePort"} + D -- Yes --> E["LogError about NodePort"] + E --> F["Create non‑compliant ReportObject"] + F --> G["Append to nonCompliantObjects"] + D -- No --> H["LogInfo that type is not NodePort"] + H --> I["Create compliant ReportObject"] + I --> J["Append to compliantObjects"] + B --> K{"End of loop"} + K --> L["SetResult(compliant, nonCompliant)"] + L --> M["Finish"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testNodePort --> func_LogInfo + func_testNodePort --> services.ToString + func_testNodePort --> func_LogError + func_testNodePort --> append + func_testNodePort --> testhelper.NewReportObject + func_testNodePort --> func_AddField + func_testNodePort --> func_SetResult +``` + +#### Functions calling `testNodePort` (Mermaid) + +```mermaid +graph TD + LoadChecks --> func_testNodePort +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testNodePort +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/provider" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/checksdb" +) + +func main() { + // Assume env is populated with Services + var env provider.TestEnvironment + + // Create a Check instance (normally done by the framework) + check := checksdb.NewCheck("TestID") + + // Run the test function directly + accesscontrol.testNodePort(check, &env) + + // Results are now available via check.GetResult() +} +``` + +--- + +### testOneProcessPerContainer + +**testOneProcessPerContainer** - Ensures that each non‑Istio‑proxy container runs only one process. Sets the check result with compliant and non‑compliant containers. + +#### Signature (Go) + +```go +func testOneProcessPerContainer(check *checksdb.Check, env *provider.TestEnvironment) {} +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that each non‑Istio‑proxy container runs only one process. Sets the check result with compliant and non‑compliant containers. | +| **Parameters** | `check` – *checksdb.Check* (test context)
`env` – *provider.TestEnvironment* (environment containing containers, probe pods, etc.) | +| **Return value** | None (side effects only) | +| **Key dependencies** | • `logInfo`, `logError`
• `IsIstioProxy`
• `clientsholder.NewContext`
• `crclient.GetPidFromContainer`
• `getNbOfProcessesInPidNamespace`
• `testhelper.NewContainerReportObject`
• `check.SetResult` | +| **Side effects** | • Logs information and errors.
• Builds slices of compliant/non‑compliant report objects.
• Calls `SetResult` on the check to record findings. | +| **How it fits the package** | It is one of many compliance checks loaded in `LoadChecks`. The function contributes to the Access Control test suite by validating container isolation at the process level. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Iterate over env.Containers"] --> B{"Is Istio proxy?"} + B -- Yes --> C["Skip container"] + B -- No --> D["Find probe pod for node"] + D -- Missing --> E["Log error & return"] + D -- Found --> F["Create ocpContext"] + F --> G["Get PID of container"] + G -- Error --> H["Record non‑compliant, continue"] + G -- Success --> I["Count processes in PID namespace"] + I -- Error --> J["Record non‑compliant, continue"] + I -- >1 --> K["Log error & record non‑compliant"] + I -- <=1 --> L["Log info & record compliant"] + K & L --> M["End loop iteration"] + M --> N["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testOneProcessPerContainer --> func_LogInfo + func_testOneProcessPerContainer --> func_IsIstioProxy + func_testOneProcessPerContainer --> func_NewContext + func_testOneProcessPerContainer --> func_GetPidFromContainer + func_testOneProcessPerContainer --> func_getNbOfProcessesInPidNamespace + func_testOneProcessPerContainer --> func_NewContainerReportObject + func_testOneProcessPerContainer --> func_LogError + func_testOneProcessPerContainer --> func_SetResult +``` + +#### Functions calling `testOneProcessPerContainer` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testOneProcessPerContainer +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOneProcessPerContainer +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + check := checksdb.NewCheck("one-process-per-container") + env := &provider.TestEnvironment{ + Containers: []provider.Container{}, // populate with real containers + } + accesscontrol.testOneProcessPerContainer(check, env) + // Inspect check.Result for compliance information. +} +``` + +--- + +### testPodClusterRoleBindings + +**testPodClusterRoleBindings** - Checks each pod in the environment to ensure it is not bound to a cluster‑role, unless the pod is owned by a cluster‑wide operator. + +#### Signature (Go) + +```go +func testPodClusterRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks each pod in the environment to ensure it is not bound to a cluster‑role, unless the pod is owned by a cluster‑wide operator. | +| **Parameters** | `check` – *checksdb.Check* (test context)
`env` – *provider.TestEnvironment* (Kubernetes objects) | +| **Return value** | none; results are stored via `check.SetResult`. | +| **Key dependencies** | - `IsUsingClusterRoleBinding` on pods
- `GetTopOwner` on pods
- `ownedByClusterWideOperator` helper
- `NewPodReportObject` from testhelper
- Logging functions (`LogInfo`, `LogError`) | +| **Side effects** | Creates report objects, logs information/errors, updates the check result. No external I/O beyond logging. | +| **How it fits the package** | Part of the access‑control test suite; invoked by `LoadChecks` for the *TestPodClusterRoleBindingsBestPractices* check. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Iterate over env.Pods"] --> B{"Check if pod uses cluster role"} + B -- error --> C["LogError, create non‑compliant report"] + B -- true --> D{"Owned by cluster‑wide operator?"} + D -- yes --> E["LogInfo, create compliant report"] + D -- no --> F["LogError, create non‑compliant report"] + B -- false --> G["LogInfo, create compliant report"] + G --> H["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testPodClusterRoleBindings --> func_IsUsingClusterRoleBinding + func_testPodClusterRoleBindings --> func_GetTopOwner + func_testPodClusterRoleBindings --> func_ownedByClusterWideOperator + func_testPodClusterRoleBindings --> func_NewPodReportObject +``` + +#### Functions calling `testPodClusterRoleBindings` + +```mermaid +graph TD + func_LoadChecks --> func_testPodClusterRoleBindings +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPodClusterRoleBindings +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume check and env are already populated + var check *checksdb.Check + var env *provider.TestEnvironment + + accesscontrol.testPodClusterRoleBindings(check, env) +} +``` + +--- + +### testPodHostIPC + +**testPodHostIPC** - Confirms each pod in the test environment does **not** set `spec.hostIPC` to `true`. + +#### Signature (Go) + +```go +func testPodHostIPC(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Confirms each pod in the test environment does **not** set `spec.hostIPC` to `true`. | +| **Parameters** | `check *checksdb.Check` – current check context.
`env *provider.TestEnvironment` – contains the list of pods to evaluate. | +| **Return value** | None (side‑effects only). | +| **Key dependencies** | • `log.LogInfo`, `log.LogError` for diagnostics.
• `testhelper.NewPodReportObject` for report creation.
• `check.SetResult` to record compliant/non‑compliant pods. | +| **Side effects** | *Mutates the check result via `SetResult`.
* Generates log entries and report objects; no external I/O beyond logging. | +| **How it fits the package** | One of several pod‑level security checks in the access‑control test suite, ensuring Pods do not share IPC namespaces with the host. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Iterate over env.Pods"] --> B["Log pod name"] + B --> C{"put.Spec.HostIPC"} + C -- true --> D["Log error"] + D --> E["Append non‑compliant report object"] + C -- false --> F["Log info"] + F --> G["Append compliant report object"] + G & E --> H["check.SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testPodHostIPC --> log.LogInfo + func_testPodHostIPC --> log.LogError + func_testPodHostIPC --> testhelper.NewPodReportObject + func_testPodHostIPC --> check.SetResult +``` + +#### Functions calling `testPodHostIPC` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testPodHostIPC +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPodHostIPC +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/provider" +) + +func main() { + // Assume check and env are properly initialized elsewhere. + var check *checksdb.Check + var env *provider.TestEnvironment + + testPodHostIPC(check, env) +} +``` + +--- + +### testPodHostNetwork + +**testPodHostNetwork** - Ensures that `spec.hostNetwork` is not set to `true` for any pod under test. A pod using host networking can expose the node’s network stack to the container, which is a security risk. + +#### 1) Signature (Go) + +```go +func testPodHostNetwork(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that `spec.hostNetwork` is not set to `true` for any pod under test. A pod using host networking can expose the node’s network stack to the container, which is a security risk. | +| **Parameters** | `check *checksdb.Check` – the current check context.
`env *provider.TestEnvironment` – environment containing all pods to evaluate. | +| **Return value** | None; results are stored via `check.SetResult`. | +| **Key dependencies** | • `check.LogInfo`, `check.LogError` (logging)
• `testhelper.NewPodReportObject` (report creation)
• `append` (slice manipulation)
• `check.SetResult` (final result aggregation) | +| **Side effects** | Logs messages for each pod. Builds two slices of `*testhelper.ReportObject`: compliant and non‑compliant, then records them with the check. No external I/O or concurrency is involved. | +| **How it fits the package** | Part of the *accesscontrol* test suite; invoked by `LoadChecks` to run a pod‑level security rule during the overall test execution. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Pods"} + B -->|"for each pod"| C["Check put.Spec.HostNetwork"] + C -- true --> D["Log error & add to nonCompliantObjects"] + C -- false --> E["Log info & add to compliantObjects"] + D --> F["Next pod"] + E --> F + F --> G{"All pods processed"} + G --> H["check.SetResult(compliant, nonCompliant)"] + H --> I["End"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_testPodHostNetwork --> check.LogInfo + func_testPodHostNetwork --> check.LogError + func_testPodHostNetwork --> testhelper.NewPodReportObject + func_testPodHostNetwork --> check.SetResult +``` + +#### 5) Functions calling `testPodHostNetwork` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testPodHostNetwork +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testPodHostNetwork +env := &provider.TestEnvironment{ + Pods: []*v1.Pod{ /* pods to test */ }, +} +check := checksdb.NewCheck("example-test") +testPodHostNetwork(check, env) +// Results are now available via check.Results() +``` + +--- + +### testPodHostPID + +**testPodHostPID** - Ensures each Pod in the environment does **not** have `spec.hostPID` set to true. A compliant pod passes; a non‑compliant pod is reported. + +#### Signature (Go) + +```go +func testPodHostPID(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures each Pod in the environment does **not** have `spec.hostPID` set to true. A compliant pod passes; a non‑compliant pod is reported. | +| **Parameters** | `check *checksdb.Check` – test context for logging and result aggregation.
`env *provider.TestEnvironment` – runtime information containing the list of Pods to evaluate. | +| **Return value** | None (the function records results via `check.SetResult`). | +| **Key dependencies** | • `check.LogInfo`, `check.LogError` – for audit logs.
• `testhelper.NewPodReportObject` – creates report objects.
• `append` – collects compliant/non‑compliant objects.
• `check.SetResult` – finalizes the test outcome. | +| **Side effects** | Generates log entries and populates the test result set; no external I/O beyond these actions. | +| **How it fits the package** | Part of the Access Control check suite, invoked by `LoadChecks` as one of several Pod‑level security tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate env.Pods"} + B -->|"for each pod"| C["LogInfo Testing Pod ..."] + C --> D{"pod.Spec.HostPID"} + D -- true --> E["LogError HostPid is set..."] + D -- true --> F["append nonCompliantObjects"] + D -- false --> G["LogInfo HostPid not set..."] + D -- false --> H["append compliantObjects"] + E --> I["Create report object (false)"] + F --> I + G --> J["Create report object (true)"] + H --> J + I & J --> K{"Next pod?"} + K -- yes --> B + K -- no --> L["check.SetResult(compliant, nonCompliant)"] + L --> M["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testPodHostPID --> LogInfo + func_testPodHostPID --> LogError + func_testPodHostPID --> append + func_testPodHostPID --> testhelper.NewPodReportObject + func_testPodHostPID --> SetResult +``` + +#### Functions calling `testPodHostPID` (Mermaid) + +```mermaid +graph TD + LoadChecks --> func_testPodHostPID +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPodHostPID +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume env and check are already populated elsewhere + var env *provider.TestEnvironment + var check *checksdb.Check + + accesscontrol.testPodHostPID(check, env) +} +``` + +--- + +### testPodHostPath + +**testPodHostPath** - Ensures every pod’s volumes either lack a `hostPath` or have an empty path. If any host path is present, the pod is flagged as non‑compliant; otherwise it is compliant. + +#### 1) Signature (Go) + +```go +func testPodHostPath(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures every pod’s volumes either lack a `hostPath` or have an empty path. If any host path is present, the pod is flagged as non‑compliant; otherwise it is compliant. | +| **Parameters** | `check *checksdb.Check` – current check context for logging and result reporting.
`env *provider.TestEnvironment` – test environment containing the list of pods to evaluate. | +| **Return value** | None (side effects only). | +| **Key dependencies** | • `LogInfo`, `LogError` on `check`
• `testhelper.NewPodReportObject`, `SetType`, `AddField` for report objects
• `check.SetResult` to store results | +| **Side effects** | Generates log entries, builds lists of compliant/non‑compliant pod reports, and assigns them via `SetResult`. | +| **How it fits the package** | Part of the access‑control test suite; called by `LoadChecks` when registering the *PodHostPath* check. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Pods"} + B -->|"For each pod"| C["Log “Testing Pod”"] + C --> D{"Check each volume in pod.Spec.Volumes"} + D -->|"volume with hostPath and non‑empty Path"| E["Log error, create non‑compliant report"] + D -->|"otherwise"| F["continue"] + E --> G["Mark pod as non‑compliant"] + F --> H["After all volumes checked"] + H -->|"podIsCompliant==true"| I["Create compliant report"] + I --> J["Add to compliant list"] + G --> K["Continue loop"] + J --> K + K --> L{"All pods processed?"} + L -- Yes --> M["check.SetResult(compliant, noncompliant)"] + L -- No --> B +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + testPodHostPath --> LogInfo + testPodHostPath --> LogError + testPodHostPath --> append + testPodHostPath --> AddField + testPodHostPath --> SetType + testPodHostPath --> NewPodReportObject + testPodHostPath --> SetResult +``` + +#### 5) Functions calling `testPodHostPath` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testPodHostPath +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testPodHostPath +check := checksdb.NewCheck("TestPodHostPath") +env := &provider.TestEnvironment{ + Pods: []v1.Pod{ /* populate with test pods */ }, +} +testPodHostPath(check, env) +``` + +--- + +--- + +### testPodRequests + +**testPodRequests** - Verifies each container in the environment has CPU and memory resource requests set; records compliant and non‑compliant containers. + +#### Signature (Go) + +```go +func testPodRequests(*checksdb.Check, *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies each container in the environment has CPU and memory resource requests set; records compliant and non‑compliant containers. | +| **Parameters** | `check` – *checksdb.Check: test context for logging and result storage.
`env` –*provider.TestEnvironment: contains the list of containers to evaluate. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | • `resources.HasRequestsSet(cut, logger)` – checks request presence.
• `testhelper.NewContainerReportObject` – creates report entries.
• `check.LogInfo`, `check.LogError`, `check.GetLogger`, `check.SetResult`. | +| **Side effects** | Logs information/errors; updates the check result with two slices of report objects. No external I/O or concurrency. | +| **How it fits the package** | Implements the “Pod resource requests” compliance test registered in `LoadChecks`; ensures workloads respect best‑practice resource limits. | + +#### Internal workflow + +```mermaid +flowchart TD + Start --> ForEachContainer + ForEachContainer --> CheckRequests + CheckRequests -->|"Missing"| NonCompliant + CheckRequests -->|"Present"| Compliant + NonCompliant --> AppendNonCompliant + Compliant --> AppendCompliant + AppendNonCompliant --> NextIteration + AppendCompliant --> NextIteration + NextIteration -->|"End of list"| SetResult + SetResult --> End +``` + +#### Function dependencies + +```mermaid +graph TD + func_testPodRequests --> func_LogInfo + func_testPodRequests --> resources_HasRequestsSet + func_testPodRequests --> func_NewContainerReportObject + func_testPodRequests --> func_SetResult +``` + +#### Functions calling `testPodRequests` + +```mermaid +graph TD + LoadChecks --> func_testPodRequests +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPodRequests +check := checksdb.NewCheck("TestPodRequests") +env := &provider.TestEnvironment{ + Containers: []*provider.Container{ /* populate with containers */ }, +} +testPodRequests(check, env) +``` + +--- + +### testPodRoleBindings + +**testPodRoleBindings** - Ensures that a pod’s service account does not reference role bindings outside of the allowed CNF namespaces. + +#### Signature (Go) + +```go +func testPodRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that a pod’s service account does not reference role bindings outside of the allowed CNF namespaces. | +| **Parameters** | `check` – check context; `env` – test environment containing pods, role‑bindings and namespace data. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | - `check.LogInfo`, `check.LogError`
- `testhelper.NewPodReportObject`
- `rbacv1.ServiceAccountKind`
- `stringhelper.StringInSlice` | +| **Side effects** | Logs informational or error messages; builds and stores compliant/non‑compliant report objects. | +| **How it fits the package** | Implements one of the AccessControl test checks, specifically for pod role‑binding best practices. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over Pods"} + B --> C["Check serviceAccountName"] + C -- empty/default --> D["Mark non‑compliant"] + C -- valid --> E["Loop RoleBindings"] + E --> F{"Same namespace?"} + F -- yes --> G["Skip this binding"] + F -- no --> H{"Subject matches pod SA?"} + H -- yes & CNF namespace --> I["Allowed, continue"] + H -- yes & same ns --> J["Non‑compliant, record"] + H -- no --> K["Continue loop"] + J --> L["Set non‑compliant flag"] + E --> M{"Found violation?"} + M -- yes --> N["Break outer loop"] + N --> O["End pod processing"] + O --> P{"Compliant?"} + P -- yes --> Q["Record compliant object"] + P -- no --> R["Skip"] + Q --> S["Set results"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testPodRoleBindings --> LogInfo + func_testPodRoleBindings --> LogError + func_testPodRoleBindings --> testhelper.NewPodReportObject + func_testPodRoleBindings --> rbacv1.ServiceAccountKind + func_testPodRoleBindings --> stringhelper.StringInSlice +``` + +#### Functions calling `testPodRoleBindings` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testPodRoleBindings +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPodRoleBindings +env := &provider.TestEnvironment{ + Pods: []*v1.Pod{...}, + RoleBindings: []*rbacv1.RoleBinding{...}, + Namespaces: []string{"cnf-namespace-1", "cnf-namespace-2"}, +} +check := checksdb.NewCheck(...) + +testPodRoleBindings(check, env) +``` + +--- + +--- + +### testPodServiceAccount + +**testPodServiceAccount** - Determines whether each Pod in the test environment uses a non‑default ServiceAccount. Logs findings and records compliant or non‑compliant objects. + +#### Signature (Go) + +```go +func testPodServiceAccount(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether each Pod in the test environment uses a non‑default ServiceAccount. Logs findings and records compliant or non‑compliant objects. | +| **Parameters** | `check *checksdb.Check` – the current check context.
`env *provider.TestEnvironment` – holds all Pods to evaluate. | +| **Return value** | None (side effects via `check.SetResult`). | +| **Key dependencies** | • `log.Info` / `log.Error` (via `check.LogInfo`, `check.LogError`)
• `testhelper.NewPodReportObject` – creates report objects
• `check.SetResult` – finalizes the check outcome | +| **Side effects** | Writes log messages, appends to internal slices of compliant/non‑compliant report objects, and calls `SetResult`. No external I/O. | +| **How it fits the package** | It is one of several pod‑level checks registered in `LoadChecks` for the AccessControl test suite. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate env.Pods"} + B --> C["LogInfo “Testing Pod …”"] + C --> D{"ServiceAccountName == defaultServiceAccount?"} + D -- Yes --> E["LogError “uses default SA”"] + E --> F["Append nonCompliantObjects"] + D -- No --> G["LogInfo “has valid SA”"] + G --> H["Append compliantObjects"] + B --> I["End loop"] + I --> J["SetResult(compliant, noncompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testPodServiceAccount --> func_LogInfo + func_testPodServiceAccount --> func_LogError + func_testPodServiceAccount --> testhelper.NewPodReportObject + func_testPodServiceAccount --> func_SetResult +``` + +#### Functions calling `testPodServiceAccount` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testPodServiceAccount +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPodServiceAccount +func runExample() { + // Assume env is populated with Pods elsewhere. + var env provider.TestEnvironment + check := checksdb.NewCheck("example-test") + testPodServiceAccount(check, &env) + + // Results are available via check.GetResult() +} +``` + +--- + +### testSYSNiceRealtimeCapability + +**testSYSNiceRealtimeCapability** - Determines compliance of each container with respect to the `SYS_NICE` capability when running on a node whose kernel is realtime enabled. + +#### Signature (Go) + +```go +func testSYSNiceRealtimeCapability(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines compliance of each container with respect to the `SYS_NICE` capability when running on a node whose kernel is realtime enabled. | +| **Parameters** | `check *checksdb.Check` – current check context.
`env *provider.TestEnvironment` – test environment containing containers and nodes. | +| **Return value** | None (the result is stored via `check.SetResult`). | +| **Key dependencies** | • `LogInfo`, `LogError` on the check
• `IsRTKernel()` method of a node
• `isContainerCapabilitySet` helper
• `testhelper.NewContainerReportObject`
• `check.SetResult` | +| **Side effects** | Emits log messages, builds slices of compliant/non‑compliant report objects, and assigns them to the check result. No external state is modified. | +| **How it fits the package** | Part of the Access Control test suite; specifically implements the *SYS_NICE Realtime Capability* compliance rule that is registered in `LoadChecks`. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate env.Containers"} + B --> C["Log container name"] + C --> D{"Node has RT kernel?"} + D -- No --> E["Add compliant object (no RT)"] + D -- Yes --> F{"Has SYS_NICE capability?"} + F -- No --> G["Add non‑compliant object"] + F -- Yes --> H["Add compliant object"] + E --> I + G --> I + H --> I + I --> J{"Next container"} + J --> B + B --> K["Set check result (compliant, non‑compliant)"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testSYSNiceRealtimeCapability --> LogInfo + func_testSYSNiceRealtimeCapability --> IsRTKernel + func_testSYSNiceRealtimeCapability --> isContainerCapabilitySet + func_testSYSNiceRealtimeCapability --> testhelper.NewContainerReportObject + func_testSYSNiceRealtimeCapability --> LogError + func_testSYSNiceRealtimeCapability --> SetResult +``` + +#### Functions calling `testSYSNiceRealtimeCapability` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testSYSNiceRealtimeCapability +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testSYSNiceRealtimeCapability +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume env is prepared with containers and nodes + var env *provider.TestEnvironment + + check := checksdb.NewCheck(nil) // create a dummy check context + accesscontrol.testSYSNiceRealtimeCapability(check, env) +} +``` + +--- + +--- + +### testSecConPrivilegeEscalation + +**testSecConPrivilegeEscalation** - Confirms each container’s `SecurityContext.AllowPrivilegeEscalation` is not set to `true`. Containers violating this rule are reported as non‑compliant. + +#### Signature (Go) + +```go +func testSecConPrivilegeEscalation(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Confirms each container’s `SecurityContext.AllowPrivilegeEscalation` is not set to `true`. Containers violating this rule are reported as non‑compliant. | +| **Parameters** | `check *checksdb.Check` – context for logging and result storage.
`env *provider.TestEnvironment` – holds the list of containers under test. | +| **Return value** | None (the function reports results via `check.SetResult`). | +| **Key dependencies** | - `check.LogInfo`, `check.LogError`
- `testhelper.NewContainerReportObject`
- `check.SetResult` | +| **Side effects** | Emits log entries, creates report objects for compliant and non‑compliant containers, updates the check result. No external I/O beyond logging. | +| **How it fits the package** | One of many security‑context checks in the access‑control test suite; executed during `LoadChecks`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Containers"} + B --> C["Log info about container"] + C --> D{"Check AllowPrivilegeEscalation"} + D -->|"true"| E["Log error, create non‑compliant report"] + D -->|"false"| F["Create compliant report"] + E --> G["Add to nonCompliantObjects"] + F --> H["Add to compliantObjects"] + B --> I{"All containers processed?"} + I --> J["SetResult(compliantObjects, nonCompliantObjects)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testSecConPrivilegeEscalation --> func_LogInfo + func_testSecConPrivilegeEscalation --> func_LogError + func_testSecConPrivilegeEscalation --> func_NewContainerReportObject + func_testSecConPrivilegeEscalation --> func_SetResult +``` + +#### Functions calling `testSecConPrivilegeEscalation` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testSecConPrivilegeEscalation +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testSecConPrivilegeEscalation +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume env is populated with containers to test + var env provider.TestEnvironment + // Prepare a check instance + check := checksdb.NewCheck(nil) + // Run the privilege‑escalation test + accesscontrol.testSecConPrivilegeEscalation(check, &env) + + // Results are now available via check.GetResult() +} +``` + +--- + +### testSecConReadOnlyFilesystem + +**testSecConReadOnlyFilesystem** - Determines whether every container in the supplied environment has a read‑only root filesystem and records compliance results. + +#### Signature (Go) + +```go +func testSecConReadOnlyFilesystem(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether every container in the supplied environment has a read‑only root filesystem and records compliance results. | +| **Parameters** | `check` – *checksdb.Check* (logging & result handling); `env` – *provider.TestEnvironment* (list of pods to inspect) | +| **Return value** | None | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `cut.IsReadOnlyRootFilesystem(check.GetLogger())`
• `testhelper.NewPodReportObject`
• `check.SetResult` | +| **Side effects** | Generates log messages; builds slices of report objects; stores results via `SetResult`. No external I/O or state mutation beyond the check object. | +| **How it fits the package** | Used by the access‑control test suite to validate container security context configuration, specifically the `readOnlyRootFilesystem` field. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over Pods"} + B -->|"for each pod"| C{"Iterate over Containers"} + C --> D["Check IsReadOnlyRootFilesystem"] + D -- Yes --> E["Log success, add compliant object"] + D -- No --> F["Log error, add non‑compliant object"] + E & F --> G["Continue to next container"] + G -->|"All containers processed"| H["SetResult with slices"] + H --> I["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testSecConReadOnlyFilesystem --> func_LogInfo + func_testSecConReadOnlyFilesystem --> func_IsReadOnlyRootFilesystem + func_testSecConReadOnlyFilesystem --> func_GetLogger + func_testSecConReadOnlyFilesystem --> func_NewPodReportObject + func_testSecConReadOnlyFilesystem --> func_LogError + func_testSecConReadOnlyFilesystem --> func_SetResult +``` + +#### Functions calling `testSecConReadOnlyFilesystem` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testSecConReadOnlyFilesystem +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testSecConReadOnlyFilesystem +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume env and check are obtained from the test harness + var env *provider.TestEnvironment + var check *checksdb.Check + + accesscontrol_testtestSecConReadOnlyFilesystem(check, env) // internal function call +} +``` + +(Note: In real usage the function is called indirectly via `LoadChecks` during test execution.) + +--- + +### testSecConRunAsNonRoot + +**testSecConRunAsNonRoot** - Confirms each pod’s containers either have `RunAsNonRoot=true` or a non‑zero user ID. Non‑compliant pods and containers are recorded for reporting. + +#### Signature (Go) + +```go +func testSecConRunAsNonRoot(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Confirms each pod’s containers either have `RunAsNonRoot=true` or a non‑zero user ID. Non‑compliant pods and containers are recorded for reporting. | +| **Parameters** | `check *checksdb.Check –` the test context; `env *provider.TestEnvironment –` environment holding the pods to evaluate | +| **Return value** | None (side effects on `check`) | +| **Key dependencies** | • `LogInfo`, `LogError` (logging via `check`)
• `GetRunAsNonRootFalseContainers` (pod method)
• `NewPodReportObject`, `NewContainerReportObject` (report helpers)
• `SetResult` (finalizes test outcome) | +| **Side effects** | Logs progress and errors, builds compliance/non‑compliance report objects, sets the result on `check`. No external I/O beyond logging. | +| **How it fits the package** | One of many security context checks registered in `LoadChecks`; specifically addresses the “RunAsNonRoot” best practice for containers. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Pods"} + B --> C["Log pod info"] + C --> D{"Get non‑compliant containers"} + D -- no containers --> E["Add compliant pod report"] + D -- some containers --> F["Add non‑compliant pod report"] + F --> G{"For each container"} + G --> H["Log error"] + G --> I["Add non‑compliant container report"] + I --> J + J --> K["SetResult on check"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + testSecConRunAsNonRoot --> LogInfo + testSecConRunAsNonRoot --> GetRunAsNonRootFalseContainers + testSecConRunAsNonRoot --> NewPodReportObject + testSecConRunAsNonRoot --> NewContainerReportObject + testSecConRunAsNonRoot --> LogError + testSecConRunAsNonRoot --> SetResult +``` + +#### Functions calling `testSecConRunAsNonRoot` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testSecConRunAsNonRoot +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testSecConRunAsNonRoot +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func example() { + env := &provider.TestEnvironment{ /* populate with Pods */ } + check := checksdb.NewCheck(...) + accesscontrol.testSecConRunAsNonRoot(check, env) +} +``` + +--- + +### testSysAdminCapability + +**testSysAdminCapability** - Determines whether any container uses the `SYS_ADMIN` Linux capability, which is disallowed for security reasons. + +#### Signature (Go) + +```go +func testSysAdminCapability(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether any container uses the `SYS_ADMIN` Linux capability, which is disallowed for security reasons. | +| **Parameters** | `check` – *checksdb.Check: context and result holder.
`env` –*provider.TestEnvironment: contains test data such as containers to evaluate. | +| **Return value** | None; results are stored via `check.SetResult`. | +| **Key dependencies** | Calls:
  • `checkForbiddenCapability(env.Containers, "SYS_ADMIN", check.GetLogger())`
  • `check.SetResult(compliantObjects, nonCompliantObjects)`
| +| **Side effects** | Logs findings through the provided logger; updates the `Check` object with compliant and non‑compliant container reports. No external I/O beyond logging. | +| **How it fits the package** | Part of the Access Control test suite; invoked by `LoadChecks` to enforce security best practices around Linux capabilities. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Retrieve logger"} + B --> C["Call checkForbiddenCapability"] + C --> D["SetResult on check"] + D --> E["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testSysAdminCapability --> func_checkForbiddenCapability + func_testSysAdminCapability --> func_GetLogger + func_testSysAdminCapability --> func_SetResult +``` + +#### Functions calling `testSysAdminCapability` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testSysAdminCapability +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testSysAdminCapability +func runExample() { + // Assume we have a populated Check and TestEnvironment. + var check *checksdb.Check + var env *provider.TestEnvironment + + // The function does not return a value; it records results in the check object. + testSysAdminCapability(check, env) +} +``` + +--- + +### testSysPtraceCapability + +**testSysPtraceCapability** - Determines whether each pod that shares a process namespace also grants at least one container the `SYS_PTRACE` capability. The check records compliant and non‑compliant pods. + +#### 1. Signature (Go) + +```go +func testSysPtraceCapability(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### 2. Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether each pod that shares a process namespace also grants at least one container the `SYS_PTRACE` capability. The check records compliant and non‑compliant pods. | +| **Parameters** | `check *checksdb.Check` – test harness object for logging and result storage.
`env *provider.TestEnvironment` – provides the list of shared‑process‑namespace pods via `GetShareProcessNamespacePods()`. | +| **Return value** | None; results are stored in the `check` instance. | +| **Key dependencies** | - `env.GetShareProcessNamespacePods()`
- `stringhelper.StringInSlice`
- `testhelper.NewPodReportObject`
- `check.LogInfo`, `check.LogError`
- `check.SetResult` | +| **Side effects** | Emits log messages, builds report objects, and updates the check result. No external state is modified. | +| **How it fits the package** | Part of the access‑control test suite; invoked via a `Check` in `LoadChecks()` to enforce best practices around pod security contexts. | + +#### 3. Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate pods"} + B -->|"for each pod"| C{"Inspect containers"} + C -->|"has SYS_PTRACE?"| D["Mark compliant"] + C -->|"no SYS_PTRACE"| E["Mark non‑compliant"] + D --> F["Append to compliant list"] + E --> G["Append to non‑compliant list"] + F & G --> H["Set check result"] + H --> I["End"] +``` + +#### 4. Function dependencies (Mermaid) + +```mermaid +graph TD + func_testSysPtraceCapability --> env.GetShareProcessNamespacePods + func_testSysPtraceCapability --> stringhelper.StringInSlice + func_testSysPtraceCapability --> testhelper.NewPodReportObject + func_testSysPtraceCapability --> check.LogInfo + func_testSysPtraceCapability --> check.LogError + func_testSysPtraceCapability --> check.SetResult +``` + +#### 5. Functions calling `testSysPtraceCapability` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testSysPtraceCapability +``` + +#### 6. Usage example (Go) + +```go +// Minimal example invoking testSysPtraceCapability +func ExampleTest() { + env := provider.NewTestEnvironment(/* … */) + check := checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestSysPtraceCapabilityIdentifier)) + + // The function does not return a value; it records its outcome in the check. + testSysPtraceCapability(check, env) + + fmt.Println("Result:", check.Result()) +} +``` + +--- + +--- diff --git a/docs/tests/accesscontrol/namespace/namespace.md b/docs/tests/accesscontrol/namespace/namespace.md new file mode 100644 index 000000000..569457707 --- /dev/null +++ b/docs/tests/accesscontrol/namespace/namespace.md @@ -0,0 +1,286 @@ +# Package namespace + +**Path**: `tests/accesscontrol/namespace` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [GetInvalidCRsNum](#getinvalidcrsnum) + - [TestCrsNamespaces](#testcrsnamespaces) +- [Local Functions](#local-functions) + - [getCrsPerNamespaces](#getcrspernamespaces) + +## Overview + +The namespace package provides utilities for validating that Custom Resources (CRs) are deployed only in allowed namespaces during access‑control testing. + +### Key Features + +- TestCrsNamespaces verifies each CRD’s instances against a list of permitted namespaces and returns a map of offending CRs +- GetInvalidCRsNum counts how many CRs violate the namespace rule +- getCrsPerNamespaces enumerates all CR objects for a given CRD, grouping them by namespace + +### Design Notes + +- Functions operate on apiextensions v1 CRDs and rely on an internal client holder for API access +- Error handling is performed via log.Logger to capture diagnostic information +- The package assumes tests run in a cluster context where the client set is available; otherwise operations will fail + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GetInvalidCRsNum(invalidCrs map[string]map[string][]string, logger *log.Logger) int](#getinvalidcrsnum) | Determines how many custom resources (CRs) are not located in their expected namespaces. | +| [func TestCrsNamespaces(crds []*apiextv1.CustomResourceDefinition, configNamespaces []string, logger *log.Logger) (invalidCrs map[string]map[string][]string, err error)](#testcrsnamespaces) | Checks each CRD’s instances to ensure they are only deployed in namespaces supplied by `configNamespaces`. Returns a nested map of offending CRs. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func (*apiextv1.CustomResourceDefinition)(map[string][]string, error)](#getcrspernamespaces) | Enumerates all Custom Resource (CR) objects created from the supplied `CustomResourceDefinition` (CRD), grouping them by namespace. | + +## Exported Functions + +### GetInvalidCRsNum + +**GetInvalidCRsNum** - Determines how many custom resources (CRs) are not located in their expected namespaces. + +#### Signature (Go) + +```go +func GetInvalidCRsNum(invalidCrs map[string]map[string][]string, logger *log.Logger) int +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines how many custom resources (CRs) are not located in their expected namespaces. | +| **Parameters** | `invalidCrs` – a nested map where the first key is the CRD name, the second key is a namespace, and the value is a slice of CR names that were found outside the configured namespaces.
`logger` – a logger used to record each invalid CR encountered. | +| **Return value** | `int` – total count of all invalid CRs across all CRDs and namespaces. | +| **Key dependencies** | Calls `logger.Error` for every invalid CR. | +| **Side effects** | Emits an error log entry per invalid CR; does not modify any external state or the input map. | +| **How it fits the package** | Used by namespace tests to report violations where CRs exist in disallowed namespaces, feeding into compliance metrics. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> IterateCRDs + IterateCRDs --> IterateNamespaces + IterateNamespaces --> IterateCrNames + IterateCrNames --> LogError + LogError --> IncrementCounter + IncrementCounter --> NextCrName + NextCrName -- no more crs? --> End +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetInvalidCRsNum --> func_Error +``` + +#### Functions calling `GetInvalidCRsNum` (Mermaid) + +```mermaid +graph TD + func_testNamespace --> func_GetInvalidCRsNum +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetInvalidCRsNum +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/namespace" +) + +func main() { + invalidCrs := map[string]map[string][]string{ + "example.crd": { + "default": {"bad-cr-1", "bad-cr-2"}, + "prod": {"another-bad-cr"}, + }, + } + + logger := log.New(os.Stdout, "", log.LstdFlags) + count := namespace.GetInvalidCRsNum(invalidCrs, logger) + + fmt.Printf("Found %d invalid CR(s)\n", count) +} +``` + +--- + +--- + +### TestCrsNamespaces + +**TestCrsNamespaces** - Checks each CRD’s instances to ensure they are only deployed in namespaces supplied by `configNamespaces`. Returns a nested map of offending CRs. + +#### Signature (Go) + +```go +func TestCrsNamespaces(crds []*apiextv1.CustomResourceDefinition, configNamespaces []string, logger *log.Logger) (invalidCrs map[string]map[string][]string, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks each CRD’s instances to ensure they are only deployed in namespaces supplied by `configNamespaces`. Returns a nested map of offending CRs. | +| **Parameters** | `crds []*apiextv1.CustomResourceDefinition` – list of CRDs to inspect.
`configNamespaces []string` – allowed namespace names.
`logger *log.Logger` – logger for reporting errors. | +| **Return value** | `invalidCrs map[string]map[string][]string` – mapping from CRD name → offending namespace → slice of CR names.
`err error` – failure during data retrieval or processing. | +| **Key dependencies** | • `make` (built‑in)
• `getCrsPerNamespaces`
• `fmt.Errorf`
• `stringhelper.StringInSlice`
• `logger.Error`
• `append` (built‑in) | +| **Side effects** | Logs errors for each CR found in a non‑configured namespace. No external I/O beyond logging; operates only on provided data structures. | +| **How it fits the package** | Part of the *namespace* test suite, enabling checks that all custom resources reside within permitted namespaces before other access control validations. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + start["Start"] --> initMap["Initialize invalidCrs map"] + initMap --> loopCRDs["For each CRD in crds"] + loopCRDs --> getNamespaces["Call getCrsPerNamespaces(crd)"] + getNamespaces --> checkErr["If error → return"] + checkErr --> processNamespaces["Process namespaces and names"] + processNamespaces --> checkAllowed["Is namespace allowed?"] + checkAllowed -- No --> logError["logger.Error(... )"] + logError --> initCRDMap["Ensure map entry exists"] + initCRDMap --> addNames["Append CR names"] + addNames --> loopEnd["Next namespace / CRD"] + loopEnd --> finish["Return invalidCrs, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestCrsNamespaces --> make + func_TestCrsNamespaces --> getCrsPerNamespaces + func_TestCrsNamespaces --> fmt.Errorf + func_TestCrsNamespaces --> stringhelper.StringInSlice + func_TestCrsNamespaces --> logger.Error + func_TestCrsNamespaces --> append +``` + +#### Functions calling `TestCrsNamespaces` (Mermaid) + +```mermaid +graph TD + testNamespace --> func_TestCrsNamespaces +``` + +#### Usage example (Go) + +```go +// Minimal example invoking TestCrsNamespaces +import ( + "log" + + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +func main() { + // Assume crds and allowed namespaces are already populated. + var crds []*apiextv1.CustomResourceDefinition + allowed := []string{"default", "prod"} + + logger := log.Default() + invalid, err := TestCrsNamespaces(crds, allowed, logger) + if err != nil { + logger.Fatalf("test failed: %v", err) + } + // Process `invalid` as needed. +} +``` + +--- + +## Local Functions + +### getCrsPerNamespaces + +**getCrsPerNamespaces** - Enumerates all Custom Resource (CR) objects created from the supplied `CustomResourceDefinition` (CRD), grouping them by namespace. + +#### Signature (Go) + +```go +func (*apiextv1.CustomResourceDefinition)(map[string][]string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Enumerates all Custom Resource (CR) objects created from the supplied `CustomResourceDefinition` (CRD), grouping them by namespace. | +| **Parameters** | `aCrd *apiextv1.CustomResourceDefinition` – The CRD to inspect. | +| **Return value** | `crdNamespaces map[string][]string` – A map where keys are namespace names and values are slices of CR names.
`err error` – Error encountered while listing resources, if any. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – retrieves a shared Kubernetes client set.
• `oc.DynamicClient.Resource(gvr).List(...)` – dynamic list call per API version.
• Logging via `log.Debug` and `log.Error`. | +| **Side effects** | No state mutation; only reads from the cluster. No I/O beyond API calls. | +| **How it fits the package** | Used by test helpers to verify that CRs are deployed in permitted namespaces. It provides the raw data for validation logic elsewhere. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over aCrd.Spec.Versions"} + B --> C["Build GVR: Group/Version/Plural"] + C --> D["Debug log: CRD name, version, group, plural"] + D --> E["List resources via DynamicClient"] + E -- error --> F["Error log & return err"] + E -- ok --> G{"Iterate over returned items"} + G --> H["Extract metadata.name and namespace"] + H --> I["Normalize nil values to empty string"] + I --> J["Append name to crdNamespaces"] + J --> G + G --> K["End of version loop"] + K --> L["Return crdNamespaces, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getCrsPerNamespaces --> func_GetClientsHolder + func_getCrsPerNamespaces --> func_Debug + func_getCrsPerNamespaces --> func_List + func_getCrsPerNamespaces --> func_Error + func_getCrsPerNamespaces --> func_Sprintf + func_getCrsPerNamespaces --> func_append +``` + +#### Functions calling `getCrsPerNamespaces` (Mermaid) + +```mermaid +graph TD + func_TestCrsNamespaces --> func_getCrsPerNamespaces +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getCrsPerNamespaces +import ( + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +func demo(crd *v1.CustomResourceDefinition) { + namespaces, err := getCrsPerNamespaces(crd) + if err != nil { + // handle error + } + for ns, names := range namespaces { + fmt.Printf("Namespace %q has CRs: %v\n", ns, names) + } +} +``` + +--- diff --git a/docs/tests/accesscontrol/resources/resources.md b/docs/tests/accesscontrol/resources/resources.md new file mode 100644 index 000000000..cf8d8f10f --- /dev/null +++ b/docs/tests/accesscontrol/resources/resources.md @@ -0,0 +1,210 @@ +# Package resources + +**Path**: `tests/accesscontrol/resources` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [HasExclusiveCPUsAssigned](#hasexclusivecpusassigned) + - [HasRequestsSet](#hasrequestsset) + +## Overview + +Utility helpers for verifying Kubernetes container resource specifications in tests. + +### Key Features + +- Checks if a container has both CPU and memory requests set +- Determines whether a container’s limits enable exclusive CPU pool usage +- Logs detailed errors when required fields are missing + +### Design Notes + +- Assumes integer‑valued CPU/memory limits to qualify for exclusive pools +- Only returns true when all relevant fields are present and non‑zero +- Best used in unit tests that validate resource configuration correctness + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func HasExclusiveCPUsAssigned(cut *provider.Container, logger *log.Logger) bool](#hasexclusivecpusassigned) | Checks whether a container’s CPU and memory limits are set in such a way that it will run on an exclusive CPU pool. It returns `true` only when both CPU and memory limits are present, integer‑valued, and match the corresponding requests. | +| [func (*provider.Container, *log.Logger) bool](#hasrequestsset) | Determines if a Kubernetes container has CPU and memory requests defined. It logs any missing fields. | + +## Exported Functions + +### HasExclusiveCPUsAssigned + +**HasExclusiveCPUsAssigned** - Checks whether a container’s CPU and memory limits are set in such a way that it will run on an exclusive CPU pool. It returns `true` only when both CPU and memory limits are present, integer‑valued, and match the corresponding requests. + +#### 1️⃣ Signature (Go) + +```go +func HasExclusiveCPUsAssigned(cut *provider.Container, logger *log.Logger) bool +``` + +--- + +#### 2️⃣ Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether a container’s CPU and memory limits are set in such a way that it will run on an exclusive CPU pool. It returns `true` only when both CPU and memory limits are present, integer‑valued, and match the corresponding requests. | +| **Parameters** | `cut *provider.Container` – Kubernetes container definition.
`logger *log.Logger` – Logger for debugging messages. | +| **Return value** | `bool` – `true` if the container qualifies for an exclusive CPU pool; otherwise `false`. | +| **Key dependencies** | Calls resource quantity helpers (`Cpu`, `Memory`, `IsZero`, `AsInt64`) and uses `logger.Debug`. | +| **Side effects** | Emits debug logs when limits are missing, non‑integer, or mismatched. No state mutation outside the logger. | +| **How it fits the package** | Part of the *resources* subpackage that validates CPU‑pool assignment logic used by performance tests and policy checks. | + +--- + +#### 3️⃣ Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Check cpuLimits.IsZero() || memLimits.IsZero()"} + B -- true --> C["Log missing limits & return false"] + B -- false --> D{"cpuLimits.AsInt64() is integer?"} + D -- false --> E["Log non‑integer CPU limit & return false"] + D -- true --> F{"cpuLimitsVal == cpuRequestsVal && memLimitsVal == memRequestsVal"} + F -- true --> G["Return true (exclusive pool)"] + F -- false --> H["Log mismatched resources & return false"] +``` + +--- + +#### 4️⃣ Function dependencies (Mermaid) + +```mermaid +graph TD + func_HasExclusiveCPUsAssigned --> func_Cpu + func_HasExclusiveCPUsAssigned --> func_Memory + func_HasExclusiveCPUsAssigned --> func_IsZero + func_HasExclusiveCPUsAssigned --> func_AsInt64 + func_HasExclusiveCPUsAssigned --> func_Debug +``` + +--- + +#### 5️⃣ Functions calling `HasExclusiveCPUsAssigned` (Mermaid) + +```mermaid +graph TD + func_testExclusiveCPUPool --> func_HasExclusiveCPUsAssigned +``` + +--- + +#### 6️⃣ Usage example (Go) + +```go +// Minimal example invoking HasExclusiveCPUsAssigned +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/resources" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance/provider" +) + +func main() { + // Assume `c` is a *provider.Container obtained from a Pod spec + var c *provider.Container + + logger := log.Default() + if resources.HasExclusiveCPUsAssigned(c, logger) { + logger.Println("Container uses an exclusive CPU pool") + } else { + logger.Println("Container does not use an exclusive CPU pool") + } +} +``` + +--- + +--- + +### HasRequestsSet + +**HasRequestsSet** - Determines if a Kubernetes container has CPU and memory requests defined. It logs any missing fields. + +#### Signature (Go) + +```go +func (*provider.Container, *log.Logger) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if a Kubernetes container has CPU and memory requests defined. It logs any missing fields. | +| **Parameters** | `cut *provider.Container` – the container to inspect;
`logger *log.Logger` – logger used for error reporting | +| **Return value** | `bool` – `true` when both CPU and memory requests are set, otherwise `false` | +| **Key dependencies** | `len`, `logger.Error`, `cut.Resources.Requests.Cpu().IsZero()`, `cut.Resources.Requests.Memory().IsZero()` | +| **Side effects** | Emits log entries via the provided logger; no mutation of inputs. | +| **How it fits the package** | Utility for access‑control tests to validate pod resource request compliance. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"len(cut.Resources.Requests) == 0"} + B -- Yes --> C["logger.Error(missing requests); passed = false"] + B -- No --> D{"cut.Resources.Requests.Cpu().IsZero()"} + D -- Yes --> E["logger.Error(missing CPU requests); passed = false"] + D -- No --> F{"cut.Resources.Requests.Memory().IsZero()"} + F -- Yes --> G["logger.Error(missing memory requests); passed = false"] + F -- No --> H["passed remains true"] + C --> I["Return passed"] + E --> I + G --> I + H --> I +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + HasRequestsSet --> len + HasRequestsSet --> loggerError + HasRequestsSet --> cpuIsZero + HasRequestsSet --> memoryIsZero +``` + +#### Functions calling `HasRequestsSet` (Mermaid) + +```mermaid +graph TD + testPodRequests --> HasRequestsSet +``` + +#### Usage example (Go) + +```go +// Minimal example invoking HasRequestsSet +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/resources" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/provider" +) + +func main() { + logger := log.New(os.Stdout, "", 0) + container := &provider.Container{ + // populate Resources.Requests with CPU and Memory as needed + } + if resources.HasRequestsSet(container, logger) { + fmt.Println("Container has resource requests") + } else { + fmt.Println("Container is missing resource requests") + } +} +``` + +--- diff --git a/docs/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.md b/docs/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.md new file mode 100644 index 000000000..476c0b351 --- /dev/null +++ b/docs/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.md @@ -0,0 +1,905 @@ +# Package securitycontextcontainer + +**Path**: `tests/accesscontrol/securitycontextcontainer` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [ContainerSCC](#containerscc) + - [PodListCategory](#podlistcategory) +- [Exported Functions](#exported-functions) + - [AllVolumeAllowed](#allvolumeallowed) + - [CategoryID.String](#categoryid.string) + - [CheckPod](#checkpod) + - [GetContainerSCC](#getcontainerscc) + - [OkNok.String](#oknok.string) + - [PodListCategory.String](#podlistcategory.string) +- [Local Functions](#local-functions) + - [checkContainCategory](#checkcontaincategory) + - [checkContainerCategory](#checkcontainercategory) + - [compareCategory](#comparecategory) + - [updateCapabilitiesFromContainer](#updatecapabilitiesfromcontainer) + +## Overview + +The securitycontextcontainer package evaluates Kubernetes pod and container specifications against predefined security‑context categories, producing a compliance report for each container. + +### Key Features + +- Categorises containers by comparing their ContainerSCC profile to four predefined SCC categories +- Provides utilities to check volume type constraints and capability sets +- Exposes human‑readable string representations for status enums and category results + +### Design Notes + +- ContainerSCC captures only the security context fields relevant to policy checks; missing fields default to OK or Undefined +- Comparison logic relies on strict equality of capability lists, which may miss equivalent but reordered entries +- Best practice: use CheckPod on a provider.Pod to obtain per‑container categories before applying higher‑level rules + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**ContainerSCC**](#containerscc) | Represents the security context compliance state of a container | +| [**PodListCategory**](#podlistcategory) | Struct definition | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func AllVolumeAllowed(volumes []corev1.Volume) (r1, r2 OkNok)](#allvolumeallowed) | Determines whether every volume in a slice satisfies allowed‑type constraints and reports if any host‑path volumes are present. | +| [func (category CategoryID) String() string](#categoryid.string) | Returns the textual form of a `CategoryID`. | +| [func CheckPod(pod *provider.Pod) []PodListCategory](#checkpod) | Builds a `ContainerSCC` reflecting pod‑level security settings, then categorizes each container in the pod by invoking `checkContainerCategory`. | +| [func GetContainerSCC(cut *provider.Container, containerSCC ContainerSCC) ContainerSCC](#getcontainerscc) | Builds and returns a `ContainerSCC` that reflects the security capabilities of the supplied container (`cut`). It flags host port usage, capability categories, privilege escalation, privileged mode, run‑as‑user, read‑only root filesystem, non‑root execution, and SELinux context presence. | +| [func (okNok OkNok) String() string](#oknok.string) | Provides a human‑readable string for an `OkNok` status (`OK`, `NOK`, or unknown). | +| [func (category PodListCategory) String() string](#podlistcategory.string) | Returns a human‑readable description containing the container name, pod name, namespace, and category of a `PodListCategory`. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func checkContainCategory(addCapability []corev1.Capability, referenceCategoryAddCapabilities []string) bool](#checkcontaincategory) | Determines whether every element in `addCapability` exists within the slice `referenceCategoryAddCapabilities`. Returns `true` only if all elements match; otherwise returns `false`. | +| [func checkContainerCategory(containers []corev1.Container, containerSCC ContainerSCC, podName, nameSpace string) []PodListCategory](#checkcontainercategory) | For each container in a pod it builds a security‑context profile (`percontainerSCC`), compares that profile against predefined SCC categories, and returns a slice of `PodListCategory` structs indicating the assigned category. | +| [func compareCategory(refCategory, containerSCC *ContainerSCC, id CategoryID) bool](#comparecategory) | Determines whether `containerSCC` satisfies all constraints defined in `refCategory`. Returns `true` if the container matches the reference category; otherwise `false`. | +| [func updateCapabilitiesFromContainer(cut *provider.Container, containerSCC *ContainerSCC)](#updatecapabilitiesfromcontainer) | Populates `containerSCC` with capability‑related data derived from a Kubernetes container’s security context. | + +## Structs + +### ContainerSCC + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `HostDirVolumePluginPresent` | `OkNok` | Indicates if a host‑path volume is used (true/false). | +| `HostIPC` | `OkNok` | Whether the container shares the IPC namespace with the host. | +| `HostNetwork` | `OkNok` | Whether the container uses the host’s network stack. | +| `HostPID` | `OkNok` | Whether the container shares the PID namespace with the host. | +| `HostPorts` | `OkNok` | Whether any host ports are exposed by the container. | +| `PrivilegeEscalation` | `OkNok` | Whether privilege escalation is permitted. | +| `PrivilegedContainer` | `OkNok` | Whether the container runs in privileged mode. | +| `RunAsUserPresent` | `OkNok` | Whether a non‑nil `runAsUser` value is set. | +| `ReadOnlyRootFilesystem` | `OkNok` | Whether the root filesystem is mounted read‑only. | +| `RunAsNonRoot` | `OkNok` | Whether the container must run as a non‑root user. | +| `FsGroupPresent` | `OkNok` | Whether an FS group value is specified. | +| `SeLinuxContextPresent` | `OkNok` | Whether an SELinux context annotation or field is present. | +| `CapabilitiesCategory` | `CategoryID` | The lowest SCC category that matches the container’s added capabilities list. | +| `RequiredDropCapabilitiesPresent` | `OkNok` | Whether required drop capabilities (e.g., MKNOD, SETUID) are satisfied. | +| `AllVolumeAllowed` | `OkNok` | Whether all volumes referenced by the pod are allowed by the SCC. | + +#### Purpose + +`ContainerSCC` aggregates boolean flags (`OkNok`) and a capability category to describe how closely a container’s security context matches the requirements of a given Security Context Constraint (SCC). It is populated during validation, used for comparison against reference categories, and ultimately determines whether a pod can be admitted. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetContainerSCC` | Builds a `ContainerSCC` instance from a container’s spec by inspecting ports, security context fields, capabilities, and volume usage. | +| `compareCategory` | Compares the populated `ContainerSCC` against a reference category to determine if the container satisfies that SCC level. | +| `updateCapabilitiesFromContainer` | Updates capability‑related fields (`RequiredDropCapabilitiesPresent`, `CapabilitiesCategory`) based on the container’s security context capabilities list. | + +--- + +--- + +### PodListCategory + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `Containername` | `string` | Field documentation | +| `Podname` | `string` | Field documentation | +| `NameSpace` | `string` | Field documentation | +| `Category` | `CategoryID` | Field documentation | + +--- + +## Exported Functions + +### AllVolumeAllowed + +**AllVolumeAllowed** - Determines whether every volume in a slice satisfies allowed‑type constraints and reports if any host‑path volumes are present. + +#### 1. Signature (Go) + +```go +func AllVolumeAllowed(volumes []corev1.Volume) (r1, r2 OkNok) +``` + +#### 2. Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether every volume in a slice satisfies allowed‑type constraints and reports if any host‑path volumes are present. | +| **Parameters** | `volumes []corev1.Volume` – list of pod volumes to evaluate. | +| **Return value** | *r1* (`OkNok`) – OK if all volumes are of permitted types, otherwise NOK.
*r2* (`OkNok`) – OK if at least one host‑path volume exists, otherwise NOK. | +| **Key dependencies** | • `len` (built‑in) to obtain slice length.
• Core Kubernetes API type `corev1.Volume`. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by `CheckPod` in the security‑context container tests to validate pod volume configuration before further checks. | + +#### 3. Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"For each volume"} + B --> C["If HostPath present → set value=OK"] + B --> D["If ConfigMap/DownwardAPI/EmptyDir/PVC/Projected/Secret present → increment countVolume"] + C & D --> E["End loop"] + E --> F{"countVolume == len(volumes)"} + F -- Yes --> G["Return OK, value"] + F -- No --> H["Return NOK, value"] +``` + +#### 4. Function dependencies (Mermaid) + +```mermaid +graph TD + func_AllVolumeAllowed --> len +``` + +#### 5. Functions calling `AllVolumeAllowed` (Mermaid) + +```mermaid +graph TD + func_CheckPod --> func_AllVolumeAllowed +``` + +#### 6. Usage example (Go) + +```go +// Minimal example invoking AllVolumeAllowed +package main + +import ( + "fmt" + corev1 "k8s.io/api/core/v1" +) + +func main() { + volumes := []corev1.Volume{ + {Name: "cfg", ConfigMap: &corev1.ConfigMapVolumeSource{}}, + {Name: "data", EmptyDir: &corev1.EmptyDirVolumeSource{}}, + } + r1, r2 := AllVolumeAllowed(volumes) + fmt.Printf("All allowed? %s, HostPath present? %s\n", r1, r2) +} +``` + +--- + +--- + +### CategoryID.String + +**String** - Returns the textual form of a `CategoryID`. + +#### Signature + +```go +func (category CategoryID) String() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the textual form of a `CategoryID`. | +| **Parameters** | *receiver* `category` – the `CategoryID` value to convert. | +| **Return value** | `string` – the human‑readable name corresponding to the category. | +| **Key dependencies** | None (uses only package constants). | +| **Side effects** | None. Pure function. | +| **How it fits the package** | Provides a convenient way for callers to display or log a category identifier. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"category"} + B -->|"CategoryID1"| C["Return CategoryID1String"] + B -->|"CategoryID1NoUID0"| D["Return CategoryID1NoUID0String"] + B -->|"CategoryID2"| E["Return CategoryID2String"] + B -->|"CategoryID3"| F["Return CategoryID3String"] + B -->|"CategoryID4"| G["Return CategoryID4String"] + B -->|"Undefined"| H["Return CategoryID4String"] + C --> I["End"] + D --> I + E --> I + F --> I + G --> I + H --> I +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + Note["None – this function is currently not referenced elsewhere in the package."] +``` + +#### Functions calling `CategoryID.String` + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + Note["None – this function is currently not referenced elsewhere in the package."] +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CategoryID.String +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer" +) + +func main() { + var cat securitycontextcontainer.CategoryID = securitycontextcontainer.CategoryID1 + fmt.Println(cat.String()) // prints the string representation of CategoryID1 +} +``` + +--- + +### CheckPod + +**CheckPod** - Builds a `ContainerSCC` reflecting pod‑level security settings, then categorizes each container in the pod by invoking `checkContainerCategory`. + +#### Signature + +```go +func CheckPod(pod *provider.Pod) []PodListCategory +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a `ContainerSCC` reflecting pod‑level security settings, then categorizes each container in the pod by invoking `checkContainerCategory`. | +| **Parameters** | `pod *provider.Pod – the Pod to analyze` | +| **Return value** | `[]PodListCategory – slice containing one entry per container with its category information` | +| **Key dependencies** | • `AllVolumeAllowed(pod.Spec.Volumes)`
• `checkContainerCategory(pod.Spec.Containers, containerSCC, pod.Name, pod.Namespace)` | +| **Side effects** | No external I/O; only local state mutation within the function’s scope. | +| **How it fits the package** | Core helper used by tests to determine compliance of Pods against security‑context rules. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Initialize ContainerSCC"} + B --> C["Set HostIPC/HostNetwork/HostPID flags"] + C --> D["Check SELinux presence"] + D --> E["Call AllVolumeAllowed for pod volumes"] + E --> F["Check RunAsUser and FSGroup presence"] + F --> G["Invoke checkContainerCategory"] + G --> H{"Return []PodListCategory"} +``` + +#### Function dependencies + +```mermaid +graph TD + func_CheckPod --> func_AllVolumeAllowed + func_CheckPod --> func_checkContainerCategory +``` + +#### Functions calling `CheckPod` + +```mermaid +graph TD + func_testContainerSCC --> func_CheckPod +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CheckPod +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/provider" +) + +func main() { + pod := &provider.Pod{ + Name: "example-pod", + Namespace: "default", + Spec: provider.PodSpec{ + HostIPC: true, + HostNetwork: false, + HostPID: true, + SecurityContext: &provider.SecurityContext{ + SELinuxOptions: &provider.SELinuxOptions{}, + RunAsUser: int64Ptr(1000), + FSGroup: int64Ptr(2000), + }, + Volumes: []corev1.Volume{ /* ... */ }, + Containers: []corev1.Container{ + {Name: "app"}, + }, + }, + } + + categories := securitycontextcontainer.CheckPod(pod) + for _, cat := range categories { + fmt.Printf("Container %s in pod %s is category %s\n", + cat.Containername, cat.Podname, cat.Category.String()) + } +} + +func int64Ptr(v int64) *int64 { return &v } +``` + +--- + +### GetContainerSCC + +**GetContainerSCC** - Builds and returns a `ContainerSCC` that reflects the security capabilities of the supplied container (`cut`). It flags host port usage, capability categories, privilege escalation, privileged mode, run‑as‑user, read‑only root filesystem, non‑root execution, and SELinux context presence. + +#### Signature (Go) + +```go +func GetContainerSCC(cut *provider.Container, containerSCC ContainerSCC) ContainerSCC +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds and returns a `ContainerSCC` that reflects the security capabilities of the supplied container (`cut`). It flags host port usage, capability categories, privilege escalation, privileged mode, run‑as‑user, read‑only root filesystem, non‑root execution, and SELinux context presence. | +| **Parameters** | `cut *provider.Container` – the container to analyze.
`containerSCC ContainerSCC` – an initial SCC struct that will be mutated and returned. | +| **Return value** | Updated `ContainerSCC` reflecting the container’s security posture. | +| **Key dependencies** | Calls helper `updateCapabilitiesFromContainer`; accesses fields of `provider.Container`, its `Ports` slice, and nested `SecurityContext`. Uses constants `OK` / `NOK` to indicate status. | +| **Side effects** | Pure function: no external I/O or global state changes; only mutates the passed `containerSCC` value locally before returning it. | +| **How it fits the package** | Core routine for determining a container’s compliance category in the security‑context‑container test suite. It is invoked by higher‑level functions that classify containers into policy categories. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"cut.Ports loop"} + B -->|"HostPort != 0"| C["Set HostPorts = OK"] + B --> D["Continue loop"] + C --> E["Update capabilities via helper"] + D --> B + E --> F{"SecurityContext present?"} + F -->|"AllowPrivilegeEscalation"| G["Set PrivilegeEscalation = OK"] + F --> H{"Privileged flag set to true?"} + H -->|"true"| I["Set PrivilegedContainer = OK"] + F --> J{"RunAsUser present?"} + J -->|"true"| K["Set RunAsUserPresent = OK"] + F --> L{"ReadOnlyRootFilesystem true?"} + L -->|"true"| M["Set ReadOnlyRootFilesystem = OK"] + F --> N{"RunAsNonRoot true?"} + N -->|"true"| O["Set RunAsNonRoot = OK"] + F --> P{"SELinuxOptions present?"} + P -->|"true"| Q["Set SeLinuxContextPresent = OK"] + Q --> R["Return updated containerSCC"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetContainerSCC --> func_updateCapabilitiesFromContainer +``` + +#### Functions calling `GetContainerSCC` (Mermaid) + +```mermaid +graph TD + func_checkContainerCategory --> func_GetContainerSCC +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetContainerSCC +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer/provider" +) + +func main() { + // Example container (fields omitted for brevity) + cut := &provider.Container{ + Ports: []provider.Port{{HostPort: 0}}, // no host port + SecurityContext: &provider.SecurityContext{ + RunAsUser: nil, + ReadOnlyRootFilesystem: nil, + AllowPrivilegeEscalation: nil, + }, + } + + // Initial SCC with all flags set to NOK + initSCC := securitycontextcontainer.ContainerSCC{ + HostPorts: "NOK", + RequiredDropCapabilitiesPresent: "NOK", + CapabilitiesCategory: "unknown", + PrivilegeEscalation: "NOK", + PrivilegedContainer: "NOK", + RunAsUserPresent: "NOK", + ReadOnlyRootFilesystem: "NOK", + RunAsNonRoot: "NOK", + SeLinuxContextPresent: "NOK", + } + + finalSCC := securitycontextcontainer.GetContainerSCC(cut, initSCC) + fmt.Printf("Updated SCC: %+v\n", finalSCC) +} +``` + +*The example demonstrates how to call `GetContainerSCC` with a container and an initial SCC; the function returns an updated SCC reflecting the container’s security attributes.* + +--- + +### OkNok.String + +**String** - Provides a human‑readable string for an `OkNok` status (`OK`, `NOK`, or unknown). + +The method returns a string representation of the `OkNok` value. + +```go +func (okNok OkNok) String() string +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Provides a human‑readable string for an `OkNok` status (`OK`, `NOK`, or unknown). | +| **Parameters** | *receiver* `okNok` – the `OkNok` value to convert. | +| **Return value** | A `string`: either `OKString`, `NOKString`, or `"false"` if no match. | +| **Key dependencies** | • Constants `OK`, `NOK`, `OKString`, `NOKString`. | +| **Side effects** | None – purely functional; no state changes or I/O. | +| **How it fits the package** | Implements the `fmt.Stringer` interface for `OkNok`, enabling string formatting in logs and tests within the `securitycontextcontainer` package. | + +#### Internal workflow + +```mermaid +flowchart TD + okNok --> switch["Switch on value"] + switch -->|"OK"| OKString["Return OKString"] + switch -->|"NOK"| NOKString["Return NOKString"] + switch --> default["Return \false\"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `OkNok.String` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking OkNok.String +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer" +) + +func main() { + var status securitycontextcontainer.OkNok = securitycontextcontainer.OK + fmt.Println(status.String()) // prints OKString +} +``` + +--- + +### PodListCategory.String + +**String** - Returns a human‑readable description containing the container name, pod name, namespace, and category of a `PodListCategory`. + +Prints the struct fields as a formatted string representation of a pod list category. + +--- + +#### Signature (Go) + +```go +func (category PodListCategory) String() string +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a human‑readable description containing the container name, pod name, namespace, and category of a `PodListCategory`. | +| **Parameters** | `category` – the receiver value holding the data to format. | +| **Return value** | A string formatted with `fmt.Sprintf`. | +| **Key dependencies** | • `fmt.Sprintf` from package `fmt` | +| **Side effects** | None (pure function). | +| **How it fits the package** | Implements the `Stringer` interface for `PodListCategory`, enabling convenient logging and debugging within the *securitycontextcontainer* test suite. | + +--- + +#### Internal workflow + +```mermaid +flowchart TD + A["Start: receive PodListCategory"] --> B["Format string with fmt.Sprintf"] + B --> C["Return formatted string"] +``` + +--- + +#### Function dependencies + +```mermaid +graph TD + func_PodListCategory.String --> func_fmt.Sprintf +``` + +--- + +#### Functions calling `PodListCategory.String` + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking PodListCategory.String +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer" +) + +func main() { + category := securitycontextcontainer.PodListCategory{ + Containername: "nginx", + Podname: "web-pod", + NameSpace: "default", + Category: "frontend", + } + fmt.Println(category.String()) +} +``` + +--- + +## Local Functions + +### checkContainCategory + +**checkContainCategory** - Determines whether every element in `addCapability` exists within the slice `referenceCategoryAddCapabilities`. Returns `true` only if all elements match; otherwise returns `false`. + +#### Signature (Go) + +```go +func checkContainCategory(addCapability []corev1.Capability, referenceCategoryAddCapabilities []string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether every element in `addCapability` exists within the slice `referenceCategoryAddCapabilities`. Returns `true` only if all elements match; otherwise returns `false`. | +| **Parameters** | - `addCapability []corev1.Capability`: Capabilities to be checked.
- `referenceCategoryAddCapabilities []string`: Allowed capability strings for a specific category. | +| **Return value** | `bool`: `true` when all capabilities are present in the reference slice, `false` otherwise. | +| **Key dependencies** | • `github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper.StringInSlice`
• Standard string trimming and comparison logic | +| **Side effects** | None; purely functional with no state mutation or I/O. | +| **How it fits the package** | Used by higher‑level functions to classify container security contexts into capability categories (e.g., Category 2, 3, or 4). It acts as a helper for determining compliance against predefined capability sets. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over addCapability"} + B -->|"cap in referenceCategoryAddCapabilities"| C["Continue"] + B -->|"cap not found"| D["Return false"] + C --> E["All caps checked?"] + E -- Yes --> F["Return true"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_checkContainCategory --> func_StringInSlice +``` + +#### Functions calling `checkContainCategory` + +```mermaid +graph TD + func_updateCapabilitiesFromContainer --> func_checkContainCategory +``` + +#### Usage example (Go) + +```go +// Minimal example invoking checkContainCategory +package main + +import ( + "fmt" + corev1 "k8s.io/api/core/v1" + + // Assume the function is in package securitycontextcontainer + scc "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer" +) + +func main() { + addCaps := []corev1.Capability{"NET_ADMIN", "SYS_TIME"} + reference := []string{"NET_ADMIN", "SYS_TIME", "CHOWN"} + + ok := scc.CheckContainCategory(addCaps, reference) // Note: function is unexported; this call assumes same package or test harness + fmt.Println("All capabilities match:", ok) +} +``` + +> **Note:** `checkContainCategory` is an unexported helper; the example demonstrates usage within the same package context. + +--- + +### checkContainerCategory + +**checkContainerCategory** - For each container in a pod it builds a security‑context profile (`percontainerSCC`), compares that profile against predefined SCC categories, and returns a slice of `PodListCategory` structs indicating the assigned category. + +#### Signature (Go) + +```go +func checkContainerCategory(containers []corev1.Container, containerSCC ContainerSCC, podName, nameSpace string) []PodListCategory +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | For each container in a pod it builds a security‑context profile (`percontainerSCC`), compares that profile against predefined SCC categories, and returns a slice of `PodListCategory` structs indicating the assigned category. | +| **Parameters** | `containers []corev1.Container` – list of containers to analyse.
`containerSCC ContainerSCC` – base SCC information derived from the pod itself.
`podName string` – name of the pod.
`nameSpace string` – namespace of the pod. | +| **Return value** | `[]PodListCategory` – one entry per container, containing its name, pod name, namespace and the matched category ID (`CategoryID1`, `CategoryID1NoUID0`, `CategoryID2`, `CategoryID3`, or `CategoryID4`). | +| **Key dependencies** | *`GetContainerSCC(cut, containerSCC)` – enriches per‑container SCC.
* `compareCategory(ref, target, id)` – checks if a container matches a specific category. | +| **Side effects** | None; the function only reads input and constructs new values. | +| **How it fits the package** | Used by `CheckPod` to produce the final list of categorized containers for security‑context validation tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"for each container"} + B --> C["Create provider.Container cut"] + C --> D["GetContainerSCC(cut, containerSCC)"] + D --> E["Initialize PodListCategory info"] + E --> F{"compareCategory Category1?"} + F -- Yes --> G["Set CategoryID1"] + F -- No --> H{"compareCategory Category1NoUID0?"} + H -- Yes --> I["Set CategoryID1NoUID0"] + H -- No --> J{"compareCategory Category2?"} + J -- Yes --> K["Set CategoryID2"] + J -- No --> L{"compareCategory Category3?"} + L -- Yes --> M["Set CategoryID3"] + L -- No --> N["Set CategoryID4"] + G --> O["Append to list"] --> B + I --> O + K --> O + M --> O + N --> O + O --> P["End loop"] + P --> Q["Return ContainerList"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_checkContainerCategory --> func_GetContainerSCC + func_checkContainerCategory --> func_compareCategory +``` + +#### Functions calling `checkContainerCategory` (Mermaid) + +```mermaid +graph TD + func_CheckPod --> func_checkContainerCategory +``` + +#### Usage example (Go) + +```go +// Minimal example invoking checkContainerCategory +import ( + corev1 "k8s.io/api/core/v1" +) + +func example() { + containers := []corev1.Container{ + {Name: "app"}, + {Name: "sidecar"}, + } + baseSCC := ContainerSCC{ /* populate with pod‑level SCC */ } + result := checkContainerCategory(containers, baseSCC, "mypod", "default") + // result contains the category for each container +} +``` + +--- + +### compareCategory + +**compareCategory** - Determines whether `containerSCC` satisfies all constraints defined in `refCategory`. Returns `true` if the container matches the reference category; otherwise `false`. + +#### Signature (Go) + +```go +func compareCategory(refCategory, containerSCC *ContainerSCC, id CategoryID) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether `containerSCC` satisfies all constraints defined in `refCategory`. Returns `true` if the container matches the reference category; otherwise `false`. | +| **Parameters** | `refCategory *ContainerSCC` – the expected SCC definition.
`containerSCC *ContainerSCC` – the actual SCC derived from a pod/container.
`id CategoryID` – identifier used only for logging. | +| **Return value** | `bool` – `true` when all checks pass, `false` otherwise. | +| **Key dependencies** | • Logging via `log.Debug`
• Fields of `ContainerSCC`: `AllVolumeAllowed`, `RunAsUserPresent`, `RunAsNonRoot`, `FsGroupPresent`, `RequiredDropCapabilitiesPresent`, `HostDirVolumePluginPresent`, `HostIPC`, `HostNetwork`, `HostPID`, `HostPorts`, `PrivilegeEscalation`, `PrivilegedContainer`, `ReadOnlyRootFilesystem`, `SeLinuxContextPresent`, `CapabilitiesCategory` | +| **Side effects** | None – the function only reads inputs and writes debug logs. | +| **How it fits the package** | Used by `checkContainerCategory` to classify each container into one of several predefined SCC categories, enabling policy validation tests. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Compare fields"} + B -->|"All match"| C["Return true"] + B -->|"Any mismatch"| D["Set result=false & log details"] + D --> C +``` + +#### Function dependencies + +```mermaid +graph TD + func_compareCategory --> func_log.Debug +``` + +#### Functions calling `compareCategory` + +```mermaid +graph TD + func_checkContainerCategory --> func_compareCategory +``` + +#### Usage example (Go) + +```go +// Minimal example invoking compareCategory +ref := &ContainerSCC{AllVolumeAllowed: true, RunAsUserPresent: true} +actual := &ContainerSCC{AllVolumeAllowed: true, RunAsUserPresent: false} +id := CategoryID1 + +matches := compareCategory(ref, actual, id) +fmt.Println("Does the container match the category?", matches) +``` + +--- + +### updateCapabilitiesFromContainer + +**updateCapabilitiesFromContainer** - Populates `containerSCC` with capability‑related data derived from a Kubernetes container’s security context. + +#### Signature (Go) + +```go +func updateCapabilitiesFromContainer(cut *provider.Container, containerSCC *ContainerSCC) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Populates `containerSCC` with capability‑related data derived from a Kubernetes container’s security context. | +| **Parameters** | `cut *provider.Container` – the container whose capabilities are examined.
`containerSCC *ContainerSCC` – mutable SCC structure to receive capability results. | +| **Return value** | None (updates `containerSCC` in‑place). | +| **Key dependencies** | - `sort.Strings` (from stdlib)
- `stringhelper.SubSlice` (internal helper)
- `slices.Equal` (golangx/slices)
- `checkContainCategory` (local helper) | +| **Side effects** | Mutates fields of the supplied `ContainerSCC`. No external I/O. | +| **How it fits the package** | Called by `GetContainerSCC` to enrich the SCC with capability checks before returning a full security‑context summary for a container. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"SecurityContext present?"} + B -- No --> C["Set CapabilitiesCategory = ID1"] + B -- Yes --> D["Collect Drop capabilities into sliceDropCapabilities"] + D --> E["Sort sliceDropCapabilities & requiredDropCapabilities"] + E --> F{"Is drop subset or equal to dropAll?"} + F -- Yes --> G["Set RequiredDropCapabilitiesPresent = OK"] + F -- No --> H["Check Add capabilities category"] + H --> I{"len(Add) == 0 ?"} + I -- Yes --> J["Set CapabilitiesCategory = ID1"] + I -- No --> K{"Add in Category2?"} + K -- Yes --> L["Set CapabilitiesCategory = ID2"] + K -- No --> M{"Add in Category3?"} + M -- Yes --> N["Set CapabilitiesCategory = ID3"] + M -- No --> O["Set CapabilitiesCategory = ID4"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_updateCapabilitiesFromContainer --> sort.Strings + func_updateCapabilitiesFromContainer --> stringhelper.SubSlice + func_updateCapabilitiesFromContainer --> slices.Equal + func_updateCapabilitiesFromContainer --> checkContainCategory +``` + +#### Functions calling `updateCapabilitiesFromContainer` (Mermaid) + +```mermaid +graph TD + func_GetContainerSCC --> func_updateCapabilitiesFromContainer +``` + +#### Usage example (Go) + +```go +// Minimal example invoking updateCapabilitiesFromContainer +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/accesscontrol/securitycontextcontainer" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + cut := &provider.Container{ + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"NET_RAW"}, + Add: []corev1.Capability{}, + }, + }, + } + containerSCC := &securitycontextcontainer.ContainerSCC{} + securitycontextcontainer.UpdateCapabilitiesFromContainer(cut, containerSCC) +} +``` + +--- diff --git a/docs/tests/certification/certification.md b/docs/tests/certification/certification.md new file mode 100644 index 000000000..79bada57a --- /dev/null +++ b/docs/tests/certification/certification.md @@ -0,0 +1,563 @@ +# Package certification + +**Path**: `tests/certification` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [LoadChecks](#loadchecks) +- [Local Functions](#local-functions) + - [getContainersToQuery](#getcontainerstoquery) + - [testAllOperatorCertified](#testalloperatorcertified) + - [testContainerCertification](#testcontainercertification) + - [testContainerCertificationStatusByDigest](#testcontainercertificationstatusbydigest) + - [testHelmCertified](#testhelmcertified) + - [testHelmVersion](#testhelmversion) + +## Overview + +The certification test package registers and executes checks that verify whether operators, containers, and Helm charts in a Kubernetes cluster are certified according to the Red Hat certification database. It populates an internal checks database with these checks and provides hooks for reporting compliance. + +### Key Features + +- Registers operator‑level, container‑digest, and Helm chart certification checks; integrates with a certification status validator. +- Provides pre‑execution hooks that gather runtime data such as operator lists, pod listings, and Helm releases before running checks. +- Automatically skips checks when relevant resources (e.g., operators or Helm releases) are absent, improving test efficiency. + +### Design Notes + +- Assumes the presence of a certdb.CertificationStatusValidator to query certification status; missing validator results in unknown compliance. +- Handles absence of Helm v3 by detecting Tiller pods and marking all charts as compliant if none found. +- Uses global test environment and logger for contextual reporting; callers must set these globals before invoking LoadChecks. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func LoadChecks()](#loadchecks) | Populates the internal checks database with all certification‑related checks, attaching pre‑execution hooks and skip conditions. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func getContainersToQuery(env *provider.TestEnvironment) map[provider.ContainerImageIdentifier]bool](#getcontainerstoquery) | Creates a lookup table indicating which container images should be queried. Each key is a `ContainerImageIdentifier` from the test environment, and every value is set to `true`. | +| [func (*checksdb.Check, *provider.TestEnvironment, certdb.CertificationStatusValidator)()](#testalloperatorcertified) | Iterates over all operators listed in the test environment and verifies each operator’s certification status for the current OpenShift minor version. Records compliant and non‑compliant operators for reporting. | +| [func(testContainerCertification)(c provider.ContainerImageIdentifier, validator certdb.CertificationStatusValidator) bool { return validator.IsContainerCertified(c.Registry, c.Repository, c.Tag, c.Digest) }](#testcontainercertification) | Determines if a container image is certified by delegating to the `CertificationStatusValidator`. | +| [func testContainerCertificationStatusByDigest( check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator, )](#testcontainercertificationstatusbydigest) | Verifies that every container in the test environment has a valid image digest and that this digest is present in the certification database. Containers lacking digests or with unknown digests are marked non‑compliant; otherwise they are considered compliant. | +| [func testHelmCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator)()](#testhelmcertified) | Iterates over all Helm chart releases in the test environment and records whether each release is certified according to the provided validator. | +| [func testHelmVersion(check *checksdb.Check)](#testhelmversion) | Determines if the cluster uses Helm v3 by searching for Tiller pods. If none are found, all installed Helm charts are marked compliant; otherwise each Tiller pod is flagged non‑compliant. | + +## Exported Functions + +### LoadChecks + +**LoadChecks** - Populates the internal checks database with all certification‑related checks, attaching pre‑execution hooks and skip conditions. + +#### 1) Signature (Go) + +```go +func LoadChecks() +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Populates the internal checks database with all certification‑related checks, attaching pre‑execution hooks and skip conditions. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | • `log.Debug`
• `WithBeforeEachFn`
• `checksdb.NewChecksGroup`
• `checksdb.Add`
• `WithSkipCheckFn`
• `WithCheckFn`
• `identifiers.GetTestIDAndLabels`
• `testHelmVersion`, `skipIfNoHelmChartReleasesFn`
• `testAllOperatorCertified`, `skipIfNoOperatorsFn`
• `testHelmCertified`
• `testContainerCertificationStatusByDigest`
• `testhelper.GetNoContainersUnderTestSkipFn` | +| **Side effects** | • Emits a debug log.
• Creates a checks group named by `common.AffiliatedCertTestKey`.
• Adds four checks, each with its own skip logic and execution function. | +| **How it fits the package** | It is called by `pkg/certsuite.LoadInternalChecksDB` to register all certification checks that run during test execution. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["log.Debug(Loading ...)"] + B --> C["Create checksGroup := checksdb.NewChecksGroup(...)"] + C --> D["WithBeforeEachFn(beforeEachFn)"] + D --> E["Add Check HelmVersion"] + E --> F["Add Check OperatorCertified"] + F --> G["Add Check HelmIsCertified"] + G --> H["Add Check ContainerIsCertifiedDigest"] + H --> I["End"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + LoadChecks --> Logger.Debug + LoadChecks --> WithBeforeEachFn + LoadChecks --> checksdb.NewChecksGroup + LoadChecks --> checksdb.Add + LoadChecks --> WithSkipCheckFn + LoadChecks --> WithCheckFn + LoadChecks --> identifiers.GetTestIDAndLabels + LoadChecks --> testHelmVersion + LoadChecks --> skipIfNoHelmChartReleasesFn + LoadChecks --> testAllOperatorCertified + LoadChecks --> skipIfNoOperatorsFn + LoadChecks --> testHelmCertified + LoadChecks --> testContainerCertificationStatusByDigest + LoadChecks --> testhelper.GetNoContainersUnderTestSkipFn +``` + +#### 5) Functions calling `LoadChecks` (Mermaid) + +```mermaid +graph TD + LoadInternalChecksDB --> LoadChecks +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking LoadChecks +func main() { + // Load all certification checks into the database + certsuite.LoadChecks() +} +``` + +--- + +## Local Functions + +### getContainersToQuery + +**getContainersToQuery** - Creates a lookup table indicating which container images should be queried. Each key is a `ContainerImageIdentifier` from the test environment, and every value is set to `true`. + +#### Signature (Go) + +```go +func getContainersToQuery(env *provider.TestEnvironment) map[provider.ContainerImageIdentifier]bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a lookup table indicating which container images should be queried. Each key is a `ContainerImageIdentifier` from the test environment, and every value is set to `true`. | +| **Parameters** | `env *provider.TestEnvironment` – The test environment containing a slice of containers (`env.Containers`). | +| **Return value** | `map[provider.ContainerImageIdentifier]bool` – A map where each container image identifier present in the environment maps to `true`. | +| **Key dependencies** | • Calls the built‑in `make` function to allocate the map.
• Accesses `env.Containers` and `cut.ContainerImageIdentifier`. | +| **Side effects** | No external I/O or state mutation; purely functional. | +| **How it fits the package** | Supplies a quick membership check for other parts of the certification suite that need to determine whether a container image should be processed. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> Allocate["Allocate map with `make`"] + Allocate --> Loop{"Iterate over env.Containers"} + Loop -->|"for each cut"| Insert["Insert cut.ContainerImageIdentifier → true"] + Loop --> End +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getContainersToQuery --> make +``` + +#### Functions calling `getContainersToQuery` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking getContainersToQuery +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/certification/provider" +) + +// Assume env is a pre‑populated *provider.TestEnvironment. +func main() { + env := &provider.TestEnvironment{ + Containers: []provider.Container{ /* … */ }, + } + containersToQuery := getContainersToQuery(env) + fmt.Println(containersToQuery) // map[ true] ... +} +``` + +--- + +### testAllOperatorCertified + +**testAllOperatorCertified** - Iterates over all operators listed in the test environment and verifies each operator’s certification status for the current OpenShift minor version. Records compliant and non‑compliant operators for reporting. + +#### Signature (Go) + +```go +func (*checksdb.Check, *provider.TestEnvironment, certdb.CertificationStatusValidator)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over all operators listed in the test environment and verifies each operator’s certification status for the current OpenShift minor version. Records compliant and non‑compliant operators for reporting. | +| **Parameters** | `check *checksdb.Check` – check context; `<-` `env *provider.TestEnvironment` – runtime cluster information; `<-` `validator certdb.CertificationStatusValidator` – certification lookup service | +| **Return value** | None (side‑effect: sets the check result) | +| **Key dependencies** | • `provider.IsOCPCluster()`
• `strings.SplitN`
• `check.LogInfo`, `check.LogError`
• `validator.IsOperatorCertified`
• `testhelper.NewOperatorReportObject`, `AddField`
• `check.SetResult` | +| **Side effects** | Logs progress and errors, creates report objects, updates the check’s result state. No external I/O beyond logging. | +| **How it fits the package** | This function is a core test routine for the *certification* suite; it is invoked by `LoadChecks` to evaluate operator certification compliance across all operators discovered in the environment. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Is OCP cluster?"} + B -- Yes --> C["Extract minor version"] + B -- No --> D["Minor version ="] + C & D --> E["For each operator"] + E --> F{"Certified?"} + F -- Yes --> G["Create compliant report"] + F -- No --> H["Create non‑compliant report"] + G & H --> I["Append to respective slice"] + I --> J["Next operator"] + J --> K{"All operators processed?"} + K -- Yes --> L["Set check result"] + L --> M["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testAllOperatorCertified --> provider_IsOCPCluster + func_testAllOperatorCertified --> strings_SplitN + func_testAllOperatorCertified --> check_LogInfo + func_testAllOperatorCertified --> check_LogError + func_testAllOperatorCertified --> validator_IsOperatorCertified + func_testAllOperatorCertified --> testhelper_NewOperatorReportObject + func_testAllOperatorCertified --> testhelper_AddField + func_testAllOperatorCertified --> check_SetResult +``` + +#### Functions calling `testAllOperatorCertified` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testAllOperatorCertified +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testAllOperatorCertified +check := checksdb.NewCheck("operator-cert") +env := &provider.TestEnvironment{ + Operators: []provider.OperatorInfo{ + {Namespace: "openshift", Name: "my-operator", Channel: "stable"}, + }, + OpenshiftVersion: "4.12.0", +} +validator := certdb.NewMockValidator() // implements CertificationStatusValidator +testAllOperatorCertified(check, env, validator) +// check.Result now contains compliant and non‑compliant operator reports +``` + +--- + +### testContainerCertification + +**testContainerCertification** - Determines if a container image is certified by delegating to the `CertificationStatusValidator`. + +This helper checks whether a container image identified by its registry, repository, tag and digest is certified according to the supplied validator. + +```go +func(testContainerCertification)(c provider.ContainerImageIdentifier, validator certdb.CertificationStatusValidator) bool { + return validator.IsContainerCertified(c.Registry, c.Repository, c.Tag, c.Digest) +} +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if a container image is certified by delegating to the `CertificationStatusValidator`. | +| **Parameters** | `c provider.ContainerImageIdentifier` – image metadata;
`validator certdb.CertificationStatusValidator` – validator interface. | +| **Return value** | `bool` – `true` if the image is certified, otherwise `false`. | +| **Key dependencies** | • Calls `certdb.CertificationStatusValidator.IsContainerCertified`
• Relies on fields of `provider.ContainerImageIdentifier`. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by test helpers to validate container certification status during tests in the `certification` package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive ContainerImageIdentifier"] --> B["Call validator.IsContainerCertified"] + B --> C["Return boolean result"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testContainerCertification --> func_IsContainerCertified +``` + +#### Functions calling `testContainerCertification` + +```mermaid +graph TD + func_testContainerCertificationStatusByDigest --> func_testContainerCertification +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainerCertification +func example() { + img := provider.ContainerImageIdentifier{ + Registry: "quay.io", + Repository:"myrepo/image", + Tag: "v1.0", + Digest: "sha256:abcd1234...", + } + validator := certdb.NewMockValidator() // assume a mock implementation + certified := testContainerCertification(img, validator) + fmt.Println("Certified:", certified) +} +``` + +--- + +### testContainerCertificationStatusByDigest + +**testContainerCertificationStatusByDigest** - Verifies that every container in the test environment has a valid image digest and that this digest is present in the certification database. Containers lacking digests or with unknown digests are marked non‑compliant; otherwise they are considered compliant. + +#### Signature (Go) + +```go +func testContainerCertificationStatusByDigest( + check *checksdb.Check, + env *provider.TestEnvironment, + validator certdb.CertificationStatusValidator, +) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies that every container in the test environment has a valid image digest and that this digest is present in the certification database. Containers lacking digests or with unknown digests are marked non‑compliant; otherwise they are considered compliant. | +| **Parameters** | `check` – *checksdb.Check* (test context), `env` – *provider.TestEnvironment* (contains containers to test), `validator` – certdb.CertificationStatusValidator (provides certification lookup) | +| **Return value** | None (side‑effect: sets result on the check) | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `testhelper.NewContainerReportObject`
• `testContainerCertification` (internal helper)
• `check.SetResult` | +| **Side effects** | Logs information and errors; builds report objects; updates the check’s result. No external I/O beyond logging. | +| **How it fits the package** | This function is one of several certification checks in the *certification* test suite, specifically handling container digest validation before the suite reports overall compliance. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> IterateContainers + IterateContainers --> CheckDigestMissing{"Digest\nmissing?"} + CheckDigestMissing -- Yes --> LogErrorMissingDigest & CreateNonCompliantObject + CheckDigestMissing -- No --> CheckCertified{"Digest\ncertified?"} + CheckCertified -- Yes --> LogInfoCertified & CreateCompliantObject + CheckCertified -- No --> LogErrorUnknownDigest & CreateNonCompliantObject + LogErrorMissingDigest --> ContinueLoop + LogErrorUnknownDigest --> ContinueLoop + CreateCompliantObject --> ContinueLoop + CreateNonCompliantObject --> ContinueLoop + ContinueLoop --> IterateContainers + IterateContainers --> End +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testContainerCertificationStatusByDigest --> LogInfo + func_testContainerCertificationStatusByDigest --> LogError + func_testContainerCertificationStatusByDigest --> testhelper.NewContainerReportObject + func_testContainerCertificationStatusByDigest --> testContainerCertification + func_testContainerCertificationStatusByDigest --> check.SetResult +``` + +#### Functions calling `testContainerCertificationStatusByDigest` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testContainerCertificationStatusByDigest +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainerCertificationStatusByDigest +func runExample() { + // Setup a dummy check, environment and validator + check := checksdb.NewCheck("example-check") + env := &provider.TestEnvironment{ + Containers: []testhelper.Container{ + { + Namespace: "default", + Podname: "app-pod", + Name: "app-container", + ContainerImageIdentifier: provider.ContainerImageIdentifier{ + Registry: "quay.io", + Repository:"myrepo/app", + Tag: "v1.0", + Digest: "sha256:abcd1234...", + }, + }, + }, + } + validator := certdb.NewMockValidator() // assumes a mock implementation + + // Invoke the function + testContainerCertificationStatusByDigest(check, env, validator) + + // Inspect results + fmt.Printf("Compliant: %+v\n", check.Compliant) + fmt.Printf("Non‑compliant: %+v\n", check.NonCompliant) +} +``` + +--- + +### testHelmCertified + +**testHelmCertified** - Iterates over all Helm chart releases in the test environment and records whether each release is certified according to the provided validator. + +#### Signature (Go) + +```go +func testHelmCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over all Helm chart releases in the test environment and records whether each release is certified according to the provided validator. | +| **Parameters** | `check *checksdb.Check` – the current test check; `env *provider.TestEnvironment` – holds Helm releases and Kubernetes version; `validator certdb.CertificationStatusValidator` – exposes `IsHelmChartCertified`. | +| **Return value** | None (the function records results via `check.SetResult`). | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `validator.IsHelmChartCertified`
• `testhelper.NewHelmChartReportObject`
• `SetType`, `AddField` on the report object
• `check.SetResult` | +| **Side effects** | Logs information, creates report objects, and updates the check’s result set; no global state mutation. | +| **How it fits the package** | Implements the test logic for the “Helm is certified” check in the certification suite, called by `LoadChecks`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start["Start"] --> ForEach{"For each Helm release"} + ForEach --> CheckCertified{"Is chart certified?"} + CheckCertified -- No --> CreateNonCompliant["Create non‑compliant report"] + CheckCertified -- Yes --> CreateCompliant["Create compliant report"] + CreateNonCompliant --> AppendToNonCompliant["Append to non‑compliant slice"] + CreateCompliant --> AppendToCompliant["Append to compliant slice"] + ForEach --> End{"End loop"} + End --> SetResult["check.SetResult(compliant, non‑compliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testHelmCertified --> func_LogInfo + func_testHelmCertified --> func_IsHelmChartCertified + func_testHelmCertified --> func_LogError + func_testHelmCertified --> func_append + func_testHelmCertified --> func_AddField + func_testHelmCertified --> func_SetType + func_testHelmCertified --> testhelper.NewHelmChartReportObject + func_testHelmCertified --> func_SetResult +``` + +#### Functions calling `testHelmCertified` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testHelmCertified +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testHelmCertified +check := checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmIsCertifiedIdentifier)) +env := &provider.TestEnvironment{ /* populated with Helm releases and K8s version */ } +validator := certdb.NewDefaultCertificationStatusValidator() + +testHelmCertified(check, env, validator) +``` + +--- + +### testHelmVersion + +**testHelmVersion** - Determines if the cluster uses Helm v3 by searching for Tiller pods. If none are found, all installed Helm charts are marked compliant; otherwise each Tiller pod is flagged non‑compliant. + +#### 1) Signature (Go) + +```go +func testHelmVersion(check *checksdb.Check) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if the cluster uses Helm v3 by searching for Tiller pods. If none are found, all installed Helm charts are marked compliant; otherwise each Tiller pod is flagged non‑compliant. | +| **Parameters** | `check *checksdb.Check` – test context that provides logging and result storage. | +| **Return value** | None (void). Results are set via `check.SetResult`. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` to obtain a Kubernetes client.
• `CoreV1().Pods("").List(...)` for listing pods.
• `testhelper.NewHelmChartReportObject`, `NewPodReportObject` to create report entries.
• `check.LogError`, `LogInfo`, `SetResult`. | +| **Side effects** | • Logs errors or informational messages.
• Populates the check’s result with compliant and non‑compliant objects. | +| **How it fits the package** | Part of the certification test suite; invoked during load of checks to validate Helm version compliance across all Helm chart releases in the environment. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Get Kubernetes client"] --> B["List Pods with label app=helm,name=tiller"] + B --> C{"Any Tiller pods?"} + C -- No --> D["Mark all Helm releases as compliant"] + C -- Yes --> E["For each pod, mark as non‑compliant"] + D --> F["Set result on check"] + E --> F +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_testHelmVersion --> func_GetClientsHolder + func_testHelmVersion --> func_List + func_testHelmVersion --> func_Pods + func_testHelmVersion --> func_CoreV1 + func_testHelmVersion --> func_LogError + func_testHelmVersion --> func_LogInfo + func_testHelmVersion --> func_append + func_testHelmVersion --> func_NewHelmChartReportObject + func_testHelmVersion --> func_NewPodReportObject + func_testHelmVersion --> func_SetResult +``` + +#### 5) Functions calling `testHelmVersion` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testHelmVersion +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testHelmVersion +check := checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestHelmVersionIdentifier)) +testHelmVersion(check) +``` + +--- diff --git a/docs/tests/common/common.md b/docs/tests/common/common.md new file mode 100644 index 000000000..2ce950e65 --- /dev/null +++ b/docs/tests/common/common.md @@ -0,0 +1,23 @@ +# Package common + +**Path**: `tests/common` + +## Table of Contents + +- [Overview](#overview) + +## Overview + +The `common` package supplies shared configuration and identifiers for the CertSuite test suites, such as test keys, default timeouts, and path utilities used across multiple test packages. + +### Key Features + +- Defines a set of exported string constants that act as keys to group and identify individual test categories (e.g., AccessControlTestKey, PerformanceTestKey). +- Provides globally accessible variables like `DefaultTimeout` for session creation and `PathRelativeToRoot`/`RelativeSchemaPath` for locating resources relative to the repository root. +- Includes internal helpers (e.g., a private default timeout constant) that support consistent behavior across test packages without exposing implementation details. + +### Design Notes + +- Constants are exported so that all test suites can reference the same identifiers, ensuring consistency in test grouping and reporting. +- The package intentionally exposes only read‑only globals; mutable state is avoided to keep tests deterministic. +- Best practice: import this package for any shared configuration needed by a test suite, but avoid adding test logic here to maintain separation of concerns. diff --git a/docs/tests/common/rbac/rbac.md b/docs/tests/common/rbac/rbac.md new file mode 100644 index 000000000..fd4f2f87b --- /dev/null +++ b/docs/tests/common/rbac/rbac.md @@ -0,0 +1,599 @@ +# Package rbac + +**Path**: `tests/common/rbac` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [CrdResource](#crdresource) + - [RoleResource](#roleresource) + - [RoleRule](#rolerule) +- [Exported Functions](#exported-functions) + - [EvaluateAutomountTokens](#evaluateautomounttokens) + - [FilterRulesNonMatchingResources](#filterrulesnonmatchingresources) + - [GetAllRules](#getallrules) + - [GetCrdResources](#getcrdresources) + - [SliceDifference](#slicedifference) +- [Local Functions](#local-functions) + - [isResourceInRoleRule](#isresourceinrolerule) + +## Overview + +The rbac package provides utilities for evaluating RBAC configurations and ServiceAccount token mounting in Kubernetes workloads, as well as helper functions for dissecting Roles into actionable rules and matching them against Custom Resource Definitions (CRDs). It is used when validating access control policies or preparing audit reports for clusters. + +### Key Features + +- Determines if a Pod’s ServiceAccount token is automatically mounted, respecting explicit settings on the Pod or its ServiceAccount +- Deconstructs Kubernetes Role objects into individual RoleRule entries for fine‑grained analysis +- Separates RoleRules that match a list of CRDs from those that do not, enabling targeted permission checks + +### Design Notes + +- The package assumes the default for automatic token mounting is false unless explicitly overridden by Pod or ServiceAccount settings; this matches Kubernetes’ current behavior but may change in future releases +- SliceDifference performs a naïve O(n²) comparison; for large rule sets a map‑based approach would be more efficient but was omitted to keep dependencies minimal +- Best practice: call GetAllRules first, then use FilterRulesNonMatchingResources or SliceDifference to isolate relevant permissions before reporting + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**CrdResource**](#crdresource) | Representation of a Custom Resource Definition (CRD) | +| [**RoleResource**](#roleresource) | Represents a reference to an RBAC role or cluster‑role | +| [**RoleRule**](#rolerule) | One-line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func EvaluateAutomountTokens(client corev1typed.CoreV1Interface, put *provider.Pod) (bool, string)](#evaluateautomounttokens) | Determines whether a Pod is correctly configured to avoid automatic mounting of the ServiceAccount token. The function passes only if the token is explicitly set to `false` either on the Pod or its associated ServiceAccount, or if the configuration defaults to `false`. | +| [func FilterRulesNonMatchingResources(ruleList []RoleRule, resourceList []CrdResource) (matching, nonMatching []RoleRule)](#filterrulesnonmatchingresources) | Separates `RoleRule` entries into those that apply to any CRD in the supplied list (`matching`) and those that do not (`nonMatching`). | +| [func (*rbacv1.Role) GetAllRules() []RoleRule](#getallrules) | Deconstructs a Kubernetes `rbacv1.Role` into the smallest actionable units, returning one `RoleRule` for each combination of API group, resource, and verb. | +| [func GetCrdResources(crds []*apiextv1.CustomResourceDefinition) (resourceList []CrdResource)](#getcrdresources) | Transforms a slice of `*apiextv1.CustomResourceDefinition` into a slice of `CrdResource`, extracting group, singular/ plural names and short names for each CRD. | +| [func SliceDifference(s1, s2 []RoleRule) (diff []RoleRule)](#slicedifference) | Computes the set difference of two `[]RoleRule` slices – elements that exist in *s1* but not in *s2*. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func isResourceInRoleRule(crd CrdResource, roleRule RoleRule) bool](#isresourceinrolerule) | Determines whether a custom resource definition (CRD) matches a given RBAC `RoleRule` by comparing the CRD’s API group and plural name against the rule’s resource specification. | + +## Structs + +### CrdResource + +#### Fields + +| Field | Type | Description | +|--------------|-----------|-------------| +| `Group` | `string` | API group to which the CRD belongs (e.g., `"apps.example.com"`). | +| `PluralName` | `string` | Plural form of the resource name used in REST paths (e.g., `"widgets"`). | +| `SingularName` | `string` | Singular form of the resource name (e.g., `"widget"`). | +| `ShortNames` | `[]string` | Optional list of short names that can be used as aliases when referring to the CRD in commands. | + +#### Purpose + +`CrdResource` encapsulates the minimal identifying information for a Kubernetes Custom Resource Definition that is needed when evaluating RBAC rules against available resources. It holds the API group, singular and plural names, and any defined short names so that functions can match these against role rules. + +#### Related Functions + +| Function | Purpose | +|----------|---------| +| `GetCrdResources` | Converts a slice of `apiextv1.CustomResourceDefinition` objects into a slice of `CrdResource`, extracting the group and name fields. | +| `FilterRulesNonMatchingResources` | Filters a list of role rules, returning those that reference resources present in the provided `CrdResource` list. | +| `isResourceInRoleRule` | Determines whether a given `CrdResource` matches the resource component of a role rule by comparing group and plural name. | + +--- + +--- + +### RoleResource + +#### Fields + +| Field | Type | Description | +|-------|--------|-------------| +| Group | string | The API group that defines the role type (`rbac.authorization.k8s.io` for standard roles, `rbac.authorization.k8s.io` for cluster‑roles). If empty, defaults to the core RBAC group. | +| Name | string | The name of the specific role or cluster‑role being referenced. | + +#### Purpose + +`RoleResource` encapsulates a minimal identifier for an RBAC resource (either a `Role` or a `ClusterRole`). It is used throughout the test suite to specify which role should be granted, revoked, or inspected during permission checks. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| *none* | No direct helper functions are defined for this struct in the current codebase. | + +--- + +--- + +### RoleRule + +Represents a single permission entry extracted from a Kubernetes RBAC `Role`, specifying which resource group and name a verb applies to. + +--- + +#### Fields + +| Field | Type | Description | +|---------|-------------|-------------| +| Resource | `RoleResource` | The API group and plural resource name that the rule targets. *(definition not shown in provided code)* | +| Verb | `string` | An allowed action (e.g., `"get"`, `"list"`, `"create"`). | + +--- + +#### Purpose + +The `RoleRule` struct is used to flatten the nested structure of a Kubernetes RBAC `Role`. +Each rule in a `Role` can specify multiple API groups, resources, and verbs. By creating one `RoleRule` per combination, the code simplifies comparison, filtering, and difference calculations between roles and CRD resources. + +--- + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetAllRules(aRole *rbacv1.Role)` | Converts all rules in a Kubernetes `Role` into a slice of `RoleRule`, one per group‑resource‑verb combination. | +| `FilterRulesNonMatchingResources(ruleList []RoleRule, resourceList []CrdResource)` | Separates `RoleRule`s that match any CRD resource from those that do not, based on API group and plural name. | +| `SliceDifference(s1, s2 []RoleRule)` | Computes the set difference between two slices of `RoleRule`, used to identify rules present in one role but absent in another. | +| `isResourceInRoleRule(crd CrdResource, roleRule RoleRule)` | Checks whether a given CRD resource is covered by a specific `RoleRule` (matching group and plural name). | + +--- + +--- + +## Exported Functions + +### EvaluateAutomountTokens + +**EvaluateAutomountTokens** - Determines whether a Pod is correctly configured to avoid automatic mounting of the ServiceAccount token. The function passes only if the token is explicitly set to `false` either on the Pod or its associated ServiceAccount, or if the configuration defaults to `false`. + +#### 1) Signature (Go) + +```go +func EvaluateAutomountTokens(client corev1typed.CoreV1Interface, put *provider.Pod) (bool, string) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether a Pod is correctly configured to avoid automatic mounting of the ServiceAccount token. The function passes only if the token is explicitly set to `false` either on the Pod or its associated ServiceAccount, or if the configuration defaults to `false`. | +| **Parameters** | `client corev1typed.CoreV1Interface – Kubernetes Core API client.
`put *provider.Pod – The Pod under evaluation. | +| **Return value** | `(bool, string)` – Boolean indicating pass/fail; a descriptive error message when failing (empty on success). | +| **Key dependencies** | • `fmt.Sprintf`
• `IsAutomountServiceAccountSetOnSA()` (method of `*provider.Pod`)
• `Error` (from the same package) | +| **Side effects** | No state mutation or I/O; purely functional evaluation. | +| **How it fits the package** | Part of the RBAC checks suite, used to validate that Pods do not unintentionally expose ServiceAccount tokens. | + +#### 3) Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Pod automountToken set?"} + B -- Yes & true --> C["Fail: Pod has token=true"] + B -- No or false --> D{"SA token set?"} + D -- Error --> E["Return error"] + D -- nil (not configured) --> F{"Pod explicitly false?"} + F -- Yes --> G["Pass"] + F -- No --> H["Fail: SA not configured to false"] + D -- true --> I["Fail: SA token=true"] +``` + +#### 4) Function dependencies + +```mermaid +graph TD + func_EvaluateAutomountTokens --> fmt.Sprintf + func_EvaluateAutomountTokens --> IsAutomountServiceAccountSetOnSA + func_EvaluateAutomountTokens --> Error +``` + +#### 5) Functions calling `EvaluateAutomountTokens` + +```mermaid +graph TD + testAutomountServiceToken --> EvaluateAutomountTokens +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking EvaluateAutomountTokens +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common/rbac" + corev1typed "k8s.io/client-go/kubernetes/typed/core/v1" +) + +func main() { + var client corev1typed.CoreV1Interface // assume initialized + var pod *provider.Pod // assume populated + + passed, msg := rbac.EvaluateAutomountTokens(client, pod) + if !passed { + fmt.Println("Automount check failed:", msg) + } else { + fmt.Println("Pod automount configuration is compliant.") + } +} +``` + +--- + +### FilterRulesNonMatchingResources + +**FilterRulesNonMatchingResources** - Separates `RoleRule` entries into those that apply to any CRD in the supplied list (`matching`) and those that do not (`nonMatching`). + +#### 1) Signature (Go) + +```go +func FilterRulesNonMatchingResources(ruleList []RoleRule, resourceList []CrdResource) (matching, nonMatching []RoleRule) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Separates `RoleRule` entries into those that apply to any CRD in the supplied list (`matching`) and those that do not (`nonMatching`). | +| **Parameters** | `ruleList []RoleRule` – all rules to evaluate.
`resourceList []CrdResource` – CRDs against which rules are checked. | +| **Return value** | Two slices of `RoleRule`:
• `matching` – rules that match at least one CRD.
• `nonMatching` – rules that do not match any CRD. | +| **Key dependencies** | • `isResourceInRoleRule(crd CrdResource, roleRule RoleRule) bool` – determines a single match.
• `SliceDifference(s1, s2 []RoleRule)` – computes non‑matching slice. | +| **Side effects** | None; pure function that only returns new slices. | +| **How it fits the package** | Provides core filtering logic used by higher‑level tests (e.g., `testCrdRoles`) to assess role compliance with CRDs. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"For each rule"} + B --> C{"For each resource"} + C -->|"Match?"| D["Add to matching"] + C -->|"No match"| E["Continue"] + D --> F["End inner loop"] + B --> G["Compute nonMatching via SliceDifference"] + G --> H["Return matching, nonMatching"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_FilterRulesNonMatchingResources --> func_isResourceInRoleRule + func_FilterRulesNonMatchingResources --> func_SliceDifference +``` + +#### 5) Functions calling `FilterRulesNonMatchingResources` (Mermaid) + +```mermaid +graph TD + func_testCrdRoles --> func_FilterRulesNonMatchingResources +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking FilterRulesNonMatchingResources +rules := []RoleRule{ + {Resource: Resource{Name:"pods", Group:""}, Verb:"get"}, + {Resource: Resource{Name:"deployments.apps", Group:"apps"}, Verb:"list"}, +} +crds := []CrdResource{ + {Group:"apps", PluralName:"deployments"}, + {Group:"batch", PluralName:"jobs"}, +} + +matching, nonMatching := FilterRulesNonMatchingResources(rules, crds) + +// matching will contain the deployment rule +// nonMatching will contain the pods rule +``` + +--- + +### GetAllRules + +**GetAllRules** - Deconstructs a Kubernetes `rbacv1.Role` into the smallest actionable units, returning one `RoleRule` for each combination of API group, resource, and verb. + +#### Signature (Go) + +```go +func (*rbacv1.Role) GetAllRules() []RoleRule +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Deconstructs a Kubernetes `rbacv1.Role` into the smallest actionable units, returning one `RoleRule` for each combination of API group, resource, and verb. | +| **Parameters** | `aRole *rbacv1.Role` – The Role object whose rules are to be flattened. | +| **Return value** | `[]RoleRule` – A slice where each element represents a single rule (group, resource, verb). | +| **Key dependencies** | - `append` (built‑in) for building the result slice.
- `rbacv1.Role`, `RoleRule` types from the package. | +| **Side effects** | None – pure function; does not modify its input or any external state. | +| **How it fits the package** | Provides a foundational utility for higher‑level analysis functions that need to inspect or filter Role rules at the granularity of individual permissions. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start["Start"] --> ForRules["Iterate over each rule in role.Rules"] + ForRules --> ForGroups["For each APIGroups entry"] + ForGroups --> ForResources["For each Resources entry"] + ForResources --> ForVerbs["For each Verbs entry"] + ForVerbs --> BuildRule["Create RoleRule with group, resource, verb"] + BuildRule --> Append["Append to ruleList slice"] + Append --> End["Return ruleList"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetAllRules --> append +``` + +#### Functions calling `GetAllRules` (Mermaid) + +```mermaid +graph TD + func_testCrdRoles --> func_GetAllRules +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetAllRules +package main + +import ( + "fmt" + + rbacv1 "k8s.io/api/rbac/v1" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common/rbac" +) + +func main() { + role := &rbacv1.Role{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"apps"}, + Resources: []string{"deployments"}, + Verbs: []string{"get", "list"}, + }, + }, + } + + allRules := rbac.GetAllRules(role) + for _, r := range allRules { + fmt.Printf("Group=%s, Resource=%s, Verb=%s\n", + r.Resource.Group, r.Resource.Name, r.Verb) + } +} +``` + +--- + +### GetCrdResources + +**GetCrdResources** - Transforms a slice of `*apiextv1.CustomResourceDefinition` into a slice of `CrdResource`, extracting group, singular/ plural names and short names for each CRD. + +#### 1) Signature (Go) + +```go +func GetCrdResources(crds []*apiextv1.CustomResourceDefinition) (resourceList []CrdResource) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Transforms a slice of `*apiextv1.CustomResourceDefinition` into a slice of `CrdResource`, extracting group, singular/ plural names and short names for each CRD. | +| **Parameters** | `crds []*apiextv1.CustomResourceDefinition` – the CRDs to process | +| **Return value** | `resourceList []CrdResource` – one `CrdResource` per input CRD | +| **Key dependencies** | • `append` (built‑in)
• `apiextv1.CustomResourceDefinition` (Kubernetes API) | +| **Side effects** | None; pure function that only reads its arguments and returns a new slice. | +| **How it fits the package** | Provides foundational data for role‑based access control checks that need to know which resources are represented by CRDs in the cluster. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over crds"} + B --> C["Create a new CrdResource"] + C --> D["Populate Group, SingularName, PluralName, ShortNames"] + D --> E["Append to resourceList"] + E --> B + B --> F["Return resourceList"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetCrdResources --> func_append +``` + +#### 5) Functions calling `GetCrdResources` (Mermaid) + +```mermaid +graph TD + func_testCrdRoles --> func_GetCrdResources +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking GetCrdResources +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common/rbac" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +func example() { + // Assume crds is a slice of *apiextv1.CustomResourceDefinition retrieved elsewhere + var crds []*apiextv1.CustomResourceDefinition + + // Convert to CrdResource slice + resources := rbac.GetCrdResources(crds) + + // Use the resulting resources as needed... + _ = resources +} +``` + +--- + +### SliceDifference + +**SliceDifference** - Computes the set difference of two `[]RoleRule` slices – elements that exist in *s1* but not in *s2*. + +#### Signature (Go) + +```go +func SliceDifference(s1, s2 []RoleRule) (diff []RoleRule) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Computes the set difference of two `[]RoleRule` slices – elements that exist in *s1* but not in *s2*. | +| **Parameters** | `s1 []RoleRule` – first slice; `s2 []RoleRule` – second slice. | +| **Return value** | `diff []RoleRule` – the subset of *s1* that is missing from *s2*. | +| **Key dependencies** | • `len()` to compare slice lengths.
• `append()` to build the result slice. | +| **Side effects** | Pure function: no global state or I/O; returns a new slice. | +| **How it fits the package** | Used by higher‑level RBAC helpers (e.g., `FilterRulesNonMatchingResources`) to isolate unmatched role rules. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Is s2 longer?"} + B -- Yes --> C["Swap s1 & s2"] + B -- No --> D["Proceed"] + C --> D + D --> E{"For each v1 in s1"} + E --> F{"Check if v1 equals any v2 in s2"} + F -- Match --> G["Mark as present"] + F -- No match --> H["Append v1 to diff"] + H --> I["Continue loop"] + G --> I + I --> J["End loop"] + J --> K["Return diff"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_SliceDifference --> len + func_SliceDifference --> append +``` + +#### Functions calling `SliceDifference` + +```mermaid +graph TD + func_FilterRulesNonMatchingResources --> func_SliceDifference +``` + +#### Usage example (Go) + +```go +// Minimal example invoking SliceDifference +rulesA := []RoleRule{ + {"apiGroups": []string{""}, "resources": []string{"pods"}, "verbs": []string{"get"}}, + {"apiGroups": []string{""}, "resources": []string{"services"}, "verbs": []string{"list"}}, +} +rulesB := []RoleRule{ + {"apiGroups": []string{""}, "resources": []string{"pods"}, "verbs": []string{"get"}}, +} + +diff := SliceDifference(rulesA, rulesB) +// diff contains the rule for "services" +``` + +--- + +## Local Functions + +### isResourceInRoleRule + +**isResourceInRoleRule** - Determines whether a custom resource definition (CRD) matches a given RBAC `RoleRule` by comparing the CRD’s API group and plural name against the rule’s resource specification. + +#### 1) Signature (Go) + +```go +func isResourceInRoleRule(crd CrdResource, roleRule RoleRule) bool +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether a custom resource definition (CRD) matches a given RBAC `RoleRule` by comparing the CRD’s API group and plural name against the rule’s resource specification. | +| **Parameters** | `crd CrdResource – the CRD to evaluate`
`roleRule RoleRule – the RBAC rule containing the target resource definition` | +| **Return value** | `bool – true if the CRD’s group and plural name match those of the rule; otherwise false` | +| **Key dependencies** | • `strings.Split` from the standard library (used to strip sub‑resource segments)
• The types `CrdResource` and `RoleRule` defined in the same package | +| **Side effects** | None. Pure function with no state mutation or I/O. | +| **How it fits the package** | Serves as a helper used by higher‑level functions such as `FilterRulesNonMatchingResources`, enabling the package to filter RBAC rules based on whether they correspond to existing CRDs. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Extract rules resource name"} + B --> C["Split by /"] + C --> D{"Take first segment as plural"} + D --> E{"Compare crd.Group == roleRule.Resource.Group"} + E -- No --> F["Return false"] + E -- Yes --> G{"crd.PluralName == ruleResourcePluralName"} + G -- No --> H["Return false"] + G -- Yes --> I["Return true"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_isResourceInRoleRule --> func_Split["strings.Split"] +``` + +#### 5) Functions calling `isResourceInRoleRule` (Mermaid) + +```mermaid +graph TD + func_FilterRulesNonMatchingResources --> func_isResourceInRoleRule +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking isResourceInRoleRule +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/common/rbac" +) + +func main() { + crd := rbac.CrdResource{ + Group: "apps.example.com", + PluralName: "widgets", + } + roleRule := rbac.RoleRule{ + Resource: struct{ Name, Group string }{ + Name: "widgets", // could be "widgets/subresource" + Group: "apps.example.com", + }, + } + + matches := rbac.isResourceInRoleRule(crd, roleRule) + fmt.Printf("Does the CRD match the rule? %t\n", matches) +} +``` + +--- diff --git a/docs/tests/identifiers/identifiers.md b/docs/tests/identifiers/identifiers.md new file mode 100644 index 000000000..54c5cc3e9 --- /dev/null +++ b/docs/tests/identifiers/identifiers.md @@ -0,0 +1,359 @@ +# Package identifiers + +**Path**: `tests/identifiers` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [AddCatalogEntry](#addcatalogentry) + - [GetTestIDAndLabels](#gettestidandlabels) + - [InitCatalog](#initcatalog) +- [Local Functions](#local-functions) + - [init](#init) + +## Overview + +The identifiers package defines test identifiers, catalog registration helpers, and mapping utilities for the Certsuite testing framework. + +### Key Features + +- Centralized catalog of test case descriptions and impact statements +- Convenient helper functions (AddCatalogEntry, GetTestIDAndLabels) to register and retrieve tests +- Automatic initialization that populates a global catalog on import + +### Design Notes + +- Registration occurs in init() via InitCatalog(); catalog is a map[claim.Identifier]claim.TestCaseDescription +- Identifiers are built from constants; impact mapping is separate for readability +- Functions avoid exposing internal structures; only exported helpers are used by test generators + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func AddCatalogEntry( testID string, suiteName string, description string, remediation string, exception string, reference string, qe bool, categoryclassification map[string]string, tags ...string, ) (aID claim.Identifier)](#addcatalogentry) | Creates a `claim.TestCaseDescription`, stores it in the global catalog, and records its classification. It returns the unique identifier for the test case. | +| [func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string)](#gettestidandlabels) | Transforms a `claim.Identifier` into a test identifier suitable for the test framework and collects labels that describe the test. The returned ID is used as the key in check registrations; the labels are later used for filtering or categorisation. | +| [func InitCatalog() map[claim.Identifier]claim.TestCaseDescription](#initcatalog) | Populates a global catalog with test case descriptions by registering each test identifier via `AddCatalogEntry`. Returns the populated catalog. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func init()](#init) | Automatically registers a collection of test identifiers when the package is imported. It invokes `InitCatalog()` to populate the internal catalog. | + +## Exported Functions + +### AddCatalogEntry + +**AddCatalogEntry** - Creates a `claim.TestCaseDescription`, stores it in the global catalog, and records its classification. It returns the unique identifier for the test case. + +#### 1) Signature (Go) + +```go +func AddCatalogEntry( + testID string, + suiteName string, + description string, + remediation string, + exception string, + reference string, + qe bool, + categoryclassification map[string]string, + tags ...string, +) (aID claim.Identifier) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a `claim.TestCaseDescription`, stores it in the global catalog, and records its classification. It returns the unique identifier for the test case. | +| **Parameters** | • `testID` – internal id of the test case
• `suiteName` – name of the test suite (e.g., “preflight”)
• `description` – human‑readable description
• `remediation` – suggested fix when the test fails
• `exception` – exception text, defaults to `NoDocumentedProcess` if empty
• `reference` – documentation link, defaults to “No Reference Document Specified” if empty
• `qe` – flag for Quality‑Engineering overrides
• `categoryclassification` – map of category → classification (e.g., FarEdge: Optional)
• `tags …string` – optional tags; default is `TagCommon`. | +| **Return value** | The generated `claim.Identifier` that uniquely identifies the test case. | +| **Key dependencies** | • `strings.TrimSpace`
• `claim.BuildTestCaseDescription`
• Global maps: `Catalog`, `Classification` | +| **Side effects** | Mutates the global `Catalog` map by inserting a new entry and updates `Classification`. | +| **How it fits the package** | Provides the core cataloging mechanism used throughout the test suite to expose all tests, their metadata, and classification for reporting and filtering. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Missing exception?"} + B -- yes --> C["Set exception = NoDocumentedProcess"] + B -- no --> D["Continue"] + D --> E{"Missing reference?"} + E -- yes --> F["Set reference = No Reference Document Specified"] + E -- no --> G["Continue"] + G --> H{"Tags provided?"} + H -- no --> I["tags = TagCommon"] + H -- yes --> J["Use supplied tags"] + I --> K + J --> K + K --> L["Call claim.BuildTestCaseDescription(...)"] + L --> M["Insert into Catalog"] + M --> N["Insert into Classification"] + N --> O["Return aID"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_AddCatalogEntry --> strings.TrimSpace + func_AddCatalogEntry --> claim.BuildTestCaseDescription + func_AddCatalogEntry --> Catalog + func_AddCatalogEntry --> Classification +``` + +#### 5) Functions calling `AddCatalogEntry` (Mermaid) + +```mermaid +graph TD + identifiers.InitCatalog --> func_AddCatalogEntry + generatePreflightContainerCnfCertTest --> func_AddCatalogEntry + generatePreflightOperatorCnfCertTest --> func_AddCatalogEntry + addPreflightTestsToCatalog --> func_AddCatalogEntry +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking AddCatalogEntry +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers" + claimPkg "github.com/redhat-best-practices-for-rh/.../claim" // replace with real path +) + +func main() { + id := identifiers.AddCatalogEntry( + "example-test", + "preflight", + "Checks that example condition holds", + "Adjust configuration to satisfy the test", + "", // empty => defaults to NoDocumentedProcess + "", // empty => defaults to “No Reference Document Specified” + false, + map[string]string{ + identifiers.FarEdge: identifiers.Optional, + identifiers.Telco: identifiers.Optional, + identifiers.NonTelco: identifiers.Optional, + identifiers.Extended: identifiers.Optional, + }, + identifiers.TagPreflight, // optional tag + ) + _ = id // use the identifier as needed +} +``` + +This example demonstrates how to register a new test case with default values for exception and reference, classify it across categories, and assign a tag. + +--- + +### GetTestIDAndLabels + +**GetTestIDAndLabels** - Transforms a `claim.Identifier` into a test identifier suitable for the test framework and collects labels that describe the test. The returned ID is used as the key in check registrations; the labels are later used for filtering or categorisation. + +#### Signature (Go) + +```go +func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Transforms a `claim.Identifier` into a test identifier suitable for the test framework and collects labels that describe the test. The returned ID is used as the key in check registrations; the labels are later used for filtering or categorisation. | +| **Parameters** | `identifier claim.Identifier –` a struct containing fields `Id`, `Suite`, and `Tags`. | +| **Return value** | `testID string` – the test identifier (`identifier.Id`).
`tags []string` – a slice of label strings derived from `identifier.Tags` plus the ID and suite name. | +| **Key dependencies** | *`strings.Split` – splits comma‑separated tags.
* `append` – adds additional labels to the slice.
* Assignment to global map `TestIDToClaimID`. | +| **Side effects** | Stores the mapping from test ID to its original claim identifier in the package‑level map `TestIDToClaimID`; mutates no external state. | +| **How it fits the package** | The identifiers package centralises conversion between claim data and test metadata. Functions that register checks call this helper to obtain a unique test ID and the labels required for skip logic, categorisation, and reporting. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive claim.Identifier"] --> B{"Split Tags"} + B --> C["tags = strings.Split(identifier.Tags, ,)"] + C --> D["Append identifier.Id"] + D --> E["Append identifier.Suite"] + E --> F["Store mapping in TestIDToClaimID"] + F --> G["Return (identifier.Id, tags)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetTestIDAndLabels --> func_Split(strings) + func_GetTestIDAndLabels --> append(built‑in) +``` + +#### Functions calling `GetTestIDAndLabels` (Mermaid) + +```mermaid +graph TD + func_SanitizeClaimFile --> func_GetTestIDAndLabels + func_LoadChecks(accesscontrol) --> func_GetTestIDAndLabels + func_LoadChecks(certification) --> func_GetTestIDAndLabels + func_LoadChecks(lifecycle) --> func_GetTestIDAndLabels + func_LoadChecks(manageability) --> func_GetTestIDAndLabels + func_LoadChecks(networking) --> func_GetTestIDAndLabels + func_LoadChecks(observability) --> func_GetTestIDAndLabels + func_LoadChecks(operator) --> func_GetTestIDAndLabels + func_LoadChecks(performance) --> func_GetTestIDAndLabels + func_LoadChecks(platform) --> func_GetTestIDAndLabels + func_generatePreflightContainerCnfCertTest --> func_GetTestIDAndLabels + func_generatePreflightOperatorCnfCertTest --> func_GetTestIDAndLabels +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetTestIDAndLabels +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claim" +) + +func main() { + id := claim.Identifier{ + Id: "SecContextCapabilities", + Suite:"accesscontrol", + Tags: "security,capabilities", + } + testID, tags := identifiers.GetTestIDAndLabels(id) + fmt.Printf("Test ID: %s\n", testID) // SecContextCapabilities + fmt.Printf("Tags: %v\n", tags) // [security capabilities SecContextCapabilities accesscontrol] +} +``` + +--- + +### InitCatalog + +**InitCatalog** - Populates a global catalog with test case descriptions by registering each test identifier via `AddCatalogEntry`. Returns the populated catalog. + +#### Signature (Go) + +```go +func InitCatalog() map[claim.Identifier]claim.TestCaseDescription +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Populates a global catalog with test case descriptions by registering each test identifier via `AddCatalogEntry`. Returns the populated catalog. | +| **Parameters** | none | +| **Return value** | A map where keys are `claim.Identifier` and values are `claim.TestCaseDescription`, representing all registered tests. | +| **Key dependencies** | - Calls `AddCatalogEntry` repeatedly to register each test case.
- Relies on global variables such as `TestNetworkPolicyDenyAllIdentifier`, `TagCommon`, etc., for identifiers and default tags. | +| **Side effects** | Mutates the global `Catalog` and `Classification` maps via `AddCatalogEntry`. No I/O or concurrency is performed. | +| **How it fits the package** | The function is invoked during package initialization (`init()`), ensuring that all test cases are available before any tests run. It centralises catalog creation, making the test suite self‑contained and deterministic. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + InitCatalog --> RegisterTest1["AddCatalogEntry for TestNetworkPolicyDenyAllIdentifier"] + InitCatalog --> RegisterTest2["AddCatalogEntry for Test1337UIDIdentifier"] + InitCatalog --> RegisterTest3["AddCatalogEntry for TestLimitedUseOfExecProbesIdentifier"] + InitCatalog --> RegisterTest4["AddCatalogEntry for TestHelmVersionIdentifier"] + InitCatalog --> ... + InitCatalog --> ReturnCatalog["return Catalog"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_InitCatalog --> func_AddCatalogEntry +``` + +#### Functions calling `InitCatalog` (Mermaid) + +```mermaid +graph TD + init --> func_InitCatalog +``` + +#### Usage example (Go) + +```go +// Minimal example invoking InitCatalog +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers" +) + +func main() { + catalog := identifiers.InitCatalog() + fmt.Printf("Loaded %d test cases\n", len(catalog)) +} +``` + +--- + +--- + +## Local Functions + +### init + +**init** - Automatically registers a collection of test identifiers when the package is imported. It invokes `InitCatalog()` to populate the internal catalog. + +#### Signature + +```go +func init() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Automatically registers a collection of test identifiers when the package is imported. It invokes `InitCatalog()` to populate the internal catalog. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | • Calls `InitCatalog()` from the same package. | +| **Side effects** | Populates global state (the identifier catalog) which is used by other tests in the package. No I/O or concurrency involved. | +| **How it fits the package** | Acts as a module‑initialization hook, ensuring that all identifiers are available before any test execution begins. | + +#### Internal workflow + +```mermaid +flowchart TD + A["init"] --> B["InitCatalog"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_init --> func_InitCatalog +``` + +#### Functions calling `init` + +None – this function is automatically invoked by the Go runtime when the package is imported. + +#### Usage example (Go) + +```go +// The init function runs automatically; no explicit call is required. +// Importing the package triggers registration of identifiers. +import _ "github.com/redhat-best-practices-for-k8s/certsuite/tests/identifiers" +``` + +--- diff --git a/docs/tests/lifecycle/lifecycle.md b/docs/tests/lifecycle/lifecycle.md new file mode 100644 index 000000000..d82ecfd14 --- /dev/null +++ b/docs/tests/lifecycle/lifecycle.md @@ -0,0 +1,1734 @@ +# Package lifecycle + +**Path**: `tests/lifecycle` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [LoadChecks](#loadchecks) +- [Local Functions](#local-functions) + - [nameInDeploymentSkipList](#nameindeploymentskiplist) + - [nameInStatefulSetSkipList](#nameinstatefulsetskiplist) + - [testAffinityRequiredPods](#testaffinityrequiredpods) + - [testCPUIsolation](#testcpuisolation) + - [testContainersImagePolicy](#testcontainersimagepolicy) + - [testContainersLivenessProbe](#testcontainerslivenessprobe) + - [testContainersPostStart](#testcontainerspoststart) + - [testContainersPreStop](#testcontainersprestop) + - [testContainersReadinessProbe](#testcontainersreadinessprobe) + - [testContainersStartupProbe](#testcontainersstartupprobe) + - [testDeploymentScaling](#testdeploymentscaling) + - [testHighAvailability](#testhighavailability) + - [testPodNodeSelectorAndAffinityBestPractices](#testpodnodeselectorandaffinitybestpractices) + - [testPodPersistentVolumeReclaimPolicy](#testpodpersistentvolumereclaimpolicy) + - [testPodTolerationBypass](#testpodtolerationbypass) + - [testPodsOwnerReference](#testpodsownerreference) + - [testPodsRecreation](#testpodsrecreation) + - [testScaleCrd](#testscalecrd) + - [testStatefulSetScaling](#teststatefulsetscaling) + - [testStorageProvisioner](#teststorageprovisioner) + +## Overview + +The lifecycle test suite registers a collection of checks that validate Kubernetes objects (Pods, Deployments, StatefulSets, CRDs, storage, tolerations, etc.) against best‑practice rules during certification runs. + +### Key Features + +- Comprehensive set of check functions covering pod lifecycle hooks, probes, affinity, scaling, owner references and resource cleanup +- Dynamic skip logic based on configuration and test environment state +- Integration with the internal checks database to expose results to the certsuite framework + +### Design Notes + +- Checks are registered lazily via LoadChecks; each check records detailed report objects for post‑mortem analysis +- Skip functions rely on runtime data (e.g., pod sets, node count) to avoid false positives in unsuitable clusters +- The suite uses a TestEnvironment abstraction to access cluster resources without hardcoding API calls + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func LoadChecks()](#loadchecks) | Registers all lifecycle‑related test checks into the internal check database. It creates a `ChecksGroup` for the lifecycle suite and adds individual checks with their skip logic, execution functions, and metadata. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func nameInDeploymentSkipList(name, namespace string, list []configuration.SkipScalingTestDeploymentsInfo) bool](#nameindeploymentskiplist) | Checks whether a given deployment (`name` and `namespace`) is present in the skip‑list defined by `list`. Returns `true` if it should be skipped. | +| [func nameInStatefulSetSkipList(name, namespace string, list []configuration.SkipScalingTestStatefulSetsInfo) bool](#nameinstatefulsetskiplist) | Determines whether a StatefulSet identified by `name` and `namespace` is listed in the skip configuration for scaling tests. | +| [func testAffinityRequiredPods(check *checksdb.Check, env *provider.TestEnvironment)](#testaffinityrequiredpods) | Iterates over all pods marked as requiring node affinity in the test environment and records whether each pod satisfies its affinity constraints. The function logs progress, captures compliance status, and sets the check result. | +| [func testCPUIsolation(check *checksdb.Check, env *provider.TestEnvironment)](#testcpuisolation) | Evaluates each pod with exclusive CPUs to determine if it meets CPU‑isolation best practices and records compliant/non‑compliant findings. | +| [func testContainersImagePolicy(check *checksdb.Check, env *provider.TestEnvironment)](#testcontainersimagepolicy) | Iterates over all containers in the test environment and records whether each uses `IfNotPresent` as its image pull policy. Non‑compliant containers are reported with an error message; compliant ones are logged. | +| [func testContainersLivenessProbe(check *checksdb.Check, env *provider.TestEnvironment)](#testcontainerslivenessprobe) | Checks each container in the provided environment for the presence of a `livenessProbe`. It records compliant and non‑compliant containers and reports the outcome. | +| [func testContainersPostStart(check *checksdb.Check, env *provider.TestEnvironment)](#testcontainerspoststart) | Determines whether every container in the test environment defines a `postStart` lifecycle hook and records compliance. | +| [func testContainersPreStop(check *checksdb.Check, env *provider.TestEnvironment)](#testcontainersprestop) | Ensures each container in the test environment has a `preStop` lifecycle hook; records compliant and non‑compliant containers. | +| [func (*checksdb.Check, *provider.TestEnvironment)()](#testcontainersreadinessprobe) | Checks each container in the test environment to confirm a readiness probe is defined; records compliant and non‑compliant containers. | +| [func testContainersStartupProbe(check *checksdb.Check, env *provider.TestEnvironment)](#testcontainersstartupprobe) | Determines whether every container in the test environment defines a Kubernetes `startupProbe`. Containers lacking this probe are flagged as non‑compliant. | +| [func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {}](#testdeploymentscaling) | Determines whether deployments can scale safely, either via HPA or direct scaling, and records compliance. | +| [func testHighAvailability(check *checksdb.Check, env *provider.TestEnvironment)](#testhighavailability) | Confirms that each Deployment or StatefulSet has more than one replica and defines Pod anti‑affinity rules, unless the resource is marked with `AffinityRequired`. | +| [func testPodNodeSelectorAndAffinityBestPractices(testPods []*provider.Pod, check *checksdb.Check)](#testpodnodeselectorandaffinitybestpractices) | Ensures that each Pod in the supplied slice does not specify a node selector or node affinity, flagging any violations. | +| [func testPodPersistentVolumeReclaimPolicy(check *checksdb.Check, env *provider.TestEnvironment)](#testpodpersistentvolumereclaimpolicy) | Ensures each pod that mounts a Persistent Volume Claim has its underlying PV set to `Delete` reclaim policy. Non‑compliant pods are reported for remediation. | +| [func testPodTolerationBypass(check *checksdb.Check, env *provider.TestEnvironment)](#testpodtolerationbypass) | Validates every Pod in `env.Pods` contains only the default Kubernetes tolerations. Non‑default or modified tolerations mark the Pod as non‑compliant. | +| [func testPodsOwnerReference(check *checksdb.Check, env *provider.TestEnvironment)](#testpodsownerreference) | Iterates over all pods in the test environment and verifies that each pod’s owner reference meets defined best‑practice rules. Logs results and records compliant/non‑compliant objects for reporting. | +| [func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment)](#testpodsrecreation) | Verifies that pods belonging to deployments and statefulsets are correctly recreated and become ready after a node is cordoned/drained. | +| [func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check)](#testscalecrd) | Iterates over all CRs under test, attempts to scale each via its HorizontalPodAutoscaler (HPA) if present; otherwise performs a direct scaling test. Records compliant and non‑compliant objects in the supplied check result. | +| [func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check)](#teststatefulsetscaling) | Evaluates whether each StatefulSet in the environment can be scaled (directly or via HPA) and logs compliance. | +| [func(*checksdb.Check, *provider.TestEnvironment)()](#teststorageprovisioner) | Ensures pods use an appropriate storage provisioner (local or non‑local) according to whether the cluster is single‑node or multi‑node, and records compliance. | + +## Exported Functions + +### LoadChecks + +**LoadChecks** - Registers all lifecycle‑related test checks into the internal check database. It creates a `ChecksGroup` for the lifecycle suite and adds individual checks with their skip logic, execution functions, and metadata. + +#### 1) Signature (Go) + +```go +func LoadChecks() +``` + +--- + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Registers all lifecycle‑related test checks into the internal check database. It creates a `ChecksGroup` for the lifecycle suite and adds individual checks with their skip logic, execution functions, and metadata. | +| **Parameters** | None | +| **Return value** | None – the function populates global state via the checks database. | +| **Key dependencies** | • `log.Debug` – logs start of loading.
• `checksdb.NewChecksGroup` – creates a new group.
• `WithBeforeEachFn`, `WithCheckFn`, `WithSkipCheckFn` – chain configuration helpers.
• `identifiers.GetTestIDAndLabels` – supplies test IDs and tags.
• Various `testhelper.*` skip functions (e.g., `GetNoContainersUnderTestSkipFn`).
• Test implementation functions such as `testContainersPreStop`, `testScaleCrd`, `testHighAvailability`, etc. | +| **Side effects** | Mutates the global checks database by adding a group and its checks; logs debug information; may close over test environment (`env`) captured from outer scope. | +| **How it fits the package** | Part of the `lifecycle` test suite; invoked during overall test initialization via `LoadInternalChecksDB`. It defines all checks that verify Kubernetes resource best practices related to lifecycle events and scaling. | + +--- + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start LoadChecks"] --> B["log.Debug(Loading ...)"] + B --> C{"Create ChecksGroup"} + C --> D["NewChecksGroup(common.LifecycleTestKey)"] + D --> E["Add WithBeforeEachFn(beforeEachFn)"] + E --> F{"Add individual checks"} + + subgraph Individual_Checks + F1["Prestop Check"] --> F2["Poststart Check"] + F3["ImagePullPolicy Check"] --> F4["Readiness Probe Check"] + F5["Liveness Probe Check"] --> F6["Startup Probe Check"] + F7["Pod Owner Reference Check"] --> F8["High Availability Check"] + F9["Selector/Affinity Best Practices"] --> FA["Pod Recreation Check"] + FB["Deployment Scaling Check"] --> FC["StatefulSet Scaling Check"] + FD["PV Reclaim Policy Check"] --> FE["CPU Isolation Check"] + FF["Affinity Required Pods Check"] --> FG["Pod Toleration Bypass Check"] + FH["Storage Provisioner Check"] + end + + F --> Individual_Checks + Individual_Checks --> Z["End LoadChecks"] +``` + +--- + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> log_Debug + func_LoadChecks --> checksdb_NewChecksGroup + func_LoadChecks --> WithBeforeEachFn + func_LoadChecks --> identifiers_GetTestIDAndLabels + func_LoadChecks --> testhelper_GetNoContainersUnderTestSkipFn + func_LoadChecks --> testhelper_GetNotIntrusiveSkipFn + func_LoadChecks --> testhelper_GetNotEnoughWorkersSkipFn + func_LoadChecks --> testhelper_SkipIfNoPodSetsetsUnderTest + func_LoadChecks --> testContainersPreStop + func_LoadChecks --> testScaleCrd + func_LoadChecks --> testContainersPostStart + func_LoadChecks --> testContainersImagePolicy + func_LoadChecks --> testContainersReadinessProbe + func_LoadChecks --> testContainersLivenessProbe + func_LoadChecks --> testContainersStartupProbe + func_LoadChecks --> testPodsOwnerReference + func_LoadChecks --> testHighAvailability + func_LoadChecks --> testPodNodeSelectorAndAffinityBestPractices + func_LoadChecks --> testPodsRecreation + func_LoadChecks --> testDeploymentScaling + func_LoadChecks --> testStatefulSetScaling + func_LoadChecks --> testPodPersistentVolumeReclaimPolicy + func_LoadChecks --> testCPUIsolation + func_LoadChecks --> testAffinityRequiredPods + func_LoadChecks --> testPodTolerationBypass + func_LoadChecks --> testStorageProvisioner +``` + +--- + +#### 5) Functions calling `LoadChecks` (Mermaid) + +```mermaid +graph TD + certsuite_LoadInternalChecksDB --> func_LoadChecks +``` + +*Called by:* `certsuite.LoadInternalChecksDB` to populate the lifecycle check group during overall test suite initialization. + +--- + +#### 6) Usage example (Go) + +```go +// In a test initialization routine: +func init() { + // Load all lifecycle checks into the global database. + lifecycle.LoadChecks() +} +``` + +This invocation registers the entire set of lifecycle‑related tests, after which they can be executed by the certsuite framework. + +--- + +## Local Functions + +### nameInDeploymentSkipList + +**nameInDeploymentSkipList** - Checks whether a given deployment (`name` and `namespace`) is present in the skip‑list defined by `list`. Returns `true` if it should be skipped. + +#### Signature (Go) + +```go +func nameInDeploymentSkipList(name, namespace string, list []configuration.SkipScalingTestDeploymentsInfo) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether a given deployment (`name` and `namespace`) is present in the skip‑list defined by `list`. Returns `true` if it should be skipped. | +| **Parameters** | `name string –` Deployment name.
`namespace string –` Deployment namespace.
`list []configuration.SkipScalingTestDeploymentsInfo –` Collection of deployment identifiers to skip. | +| **Return value** | `bool –` `true` when the deployment matches an entry in `list`; otherwise `false`. | +| **Key dependencies** | *None* – purely local logic. | +| **Side effects** | None; function is pure. | +| **How it fits the package** | Used by test harnesses to exclude specific deployments from scaling tests based on configuration. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over list"} + B -->|"Match found"| C["Return true"] + B -->|"No match"| D["Continue loop"] + D --> B + B -->|"End of list"| E["Return false"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + None +``` + +#### Functions calling `nameInDeploymentSkipList` (Mermaid) + +```mermaid +graph TD + func_testDeploymentScaling --> func_nameInDeploymentSkipList +``` + +#### Usage example (Go) + +```go +// Minimal example invoking nameInDeploymentSkipList +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/configuration" +) + +func main() { + skipList := []configuration.SkipScalingTestDeploymentsInfo{ + {Name: "frontend", Namespace: "prod"}, + } + if lifecycle.nameInDeploymentSkipList("frontend", "prod", skipList) { + fmt.Println("Deployment is skipped") + } else { + fmt.Println("Deployment will be tested") + } +} +``` + +--- + +### nameInStatefulSetSkipList + +**nameInStatefulSetSkipList** - Determines whether a StatefulSet identified by `name` and `namespace` is listed in the skip configuration for scaling tests. + +#### Signature (Go) + +```go +func nameInStatefulSetSkipList(name, namespace string, list []configuration.SkipScalingTestStatefulSetsInfo) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether a StatefulSet identified by `name` and `namespace` is listed in the skip configuration for scaling tests. | +| **Parameters** | `name string` – name of the StatefulSet;
`namespace string` – namespace containing the StatefulSet;
`list []configuration.SkipScalingTestStatefulSetsInfo` – slice of skip‑rules to check against. | +| **Return value** | `bool` – `true` if the StatefulSet matches any rule in `list`; otherwise `false`. | +| **Key dependencies** | *None* – function contains only a loop and conditional logic. | +| **Side effects** | None. Purely functional; no state mutation or I/O. | +| **How it fits the package** | Used by test harnesses to skip scaling tests for specific StatefulSets defined in the configuration (`SkipScalingTestStatefulSets`). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CheckLoop["For each rule `l` in list"] + CheckLoop --> Cond{"Does `name == l.Name` AND `namespace == l.Namespace`?"} + Cond -- Yes --> ReturnTrue["Return true"] + Cond -- No --> Continue["Continue loop"] + Continue --> EndLoop{{End of list}} + EndLoop --> ReturnFalse["Return false"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_nameInStatefulSetSkipList --> None +``` + +#### Functions calling `nameInStatefulSetSkipList` (Mermaid) + +```mermaid +graph TD + testStatefulSetScaling --> nameInStatefulSetSkipList +``` + +#### Usage example (Go) + +```go +// Minimal example invoking nameInStatefulSetSkipList +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/configuration" +) + +func main() { + skipRules := []configuration.SkipScalingTestStatefulSetsInfo{ + {Name: "example-ss", Namespace: "default"}, + } + + // Will return true because the StatefulSet matches a rule + shouldSkip := lifecycle.nameInStatefulSetSkipList("example-ss", "default", skipRules) + + fmt.Printf("Should skip: %v\n", shouldSkip) +} +``` + +--- + +### testAffinityRequiredPods + +**testAffinityRequiredPods** - Iterates over all pods marked as requiring node affinity in the test environment and records whether each pod satisfies its affinity constraints. The function logs progress, captures compliance status, and sets the check result. + +#### Signature (Go) + +```go +func testAffinityRequiredPods(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over all pods marked as requiring node affinity in the test environment and records whether each pod satisfies its affinity constraints. The function logs progress, captures compliance status, and sets the check result. | +| **Parameters** | `check *checksdb.Check` – the current check context for logging and result reporting.
`env *provider.TestEnvironment` – provides access to the set of pods under test (`GetAffinityRequiredPods`). | +| **Return value** | None (void). The outcome is communicated via `check.SetResult`. | +| **Key dependencies** | • `env.GetAffinityRequiredPods()` – retrieves the slice of pod wrappers.
• `put.IsAffinityCompliant()` – evaluates a single pod’s affinity compliance.
• `check.LogInfo`, `check.LogError` – logging utilities.
• `testhelper.NewPodReportObject` – constructs report objects for compliant/non‑compliant pods.
• `check.SetResult` – stores the final result set. | +| **Side effects** | • Emits log messages for each pod processed.
• Modifies the internal state of `check` by setting its result; does not alter any external system or pod objects. | +| **How it fits the package** | This function implements the “Affinity required pods” test within the lifecycle suite, ensuring that all relevant pods adhere to node affinity best practices before a cluster is considered compliant. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Retrieve affinity‑required pods"} + B --> C["Iterate over each pod"] + C --> D["Log “Testing Pod”"] + D --> E["Check IsAffinityCompliant()"] + E -- false --> F["Log error, add to non‑compliant list"] + E -- true --> G["Log success, add to compliant list"] + G & F --> H["Continue loop"] + H --> I{"Loop finished?"} + I -- no --> C + I -- yes --> J["SetResult(compliant, nonCompliant)"] + J --> K["End"] +``` + +#### Function dependencies + +```mermaid +graph TD + testAffinityRequiredPods --> GetAffinityRequiredPods + testAffinityRequiredPods --> IsAffinityCompliant + testAffinityRequiredPods --> LogInfo + testAffinityRequiredPods --> LogError + testAffinityRequiredPods --> NewPodReportObject + testAffinityRequiredPods --> SetResult +``` + +#### Functions calling `testAffinityRequiredPods` + +```mermaid +graph TD + LoadChecks --> testAffinityRequiredPods +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testAffinityRequiredPods +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume env is already populated with pods requiring affinity + var env *provider.TestEnvironment + // Create a dummy check context + check := checksdb.NewCheck("dummy-test") + lifecycle.testAffinityRequiredPods(check, env) +} +``` + +--- + +### testCPUIsolation + +**testCPUIsolation** - Evaluates each pod with exclusive CPUs to determine if it meets CPU‑isolation best practices and records compliant/non‑compliant findings. + +#### Signature + +```go +func testCPUIsolation(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Evaluates each pod with exclusive CPUs to determine if it meets CPU‑isolation best practices and records compliant/non‑compliant findings. | +| **Parameters** | `check` – *checksdb.Check, the test context;
`env` –*provider.TestEnvironment, provides the pods under test. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | • `env.GetGuaranteedPodsWithExclusiveCPUs()`
• `put.IsCPUIsolationCompliant()`
• `testhelper.NewPodReportObject`
• `check.LogInfo`, `check.LogError`
• `check.SetResult` | +| **Side effects** | Logs informational and error messages; updates the check result with lists of compliant and non‑compliant report objects. No external I/O beyond logging. | +| **How it fits the package** | Part of the lifecycle test suite, specifically registered under `TestCPUIsolationIdentifier`. It ensures pods that claim exclusive CPUs also satisfy CPU isolation requirements such as proper request/limit values, runtime class, and annotations. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Get guaranteed pods"} + B --> C["Iterate each pod"] + C --> D{"Is compliant?"} + D -- Yes --> E["Log success
Add to compliant list"] + D -- No --> F["Log error
Add to non‑compliant list"] + E --> G["Continue loop"] + F --> G + G --> H{"Loop finished?"} + H -- No --> C + H -- Yes --> I["SetResult(compliant, nonCompliant)"] + I --> J["End"] +``` + +#### Function dependencies + +```mermaid +graph TD + testCPUIsolation --> GetGuaranteedPodsWithExclusiveCPUs + testCPUIsolation --> LogInfo + testCPUIsolation --> IsCPUIsolationCompliant + testCPUIsolation --> LogError + testCPUIsolation --> append + testCPUIsolation --> NewPodReportObject + testCPUIsolation --> SetResult +``` + +#### Functions calling `testCPUIsolation` + +```mermaid +graph TD + LoadChecks --> testCPUIsolation +``` + +#### Usage example + +```go +// Minimal example invoking testCPUIsolation +check := checksdb.NewCheck("example") +env := provider.NewTestEnvironment() +testCPUIsolation(check, env) +``` + +--- + +### testContainersImagePolicy + +**testContainersImagePolicy** - Iterates over all containers in the test environment and records whether each uses `IfNotPresent` as its image pull policy. Non‑compliant containers are reported with an error message; compliant ones are logged. + +#### Signature (Go) + +```go +func testContainersImagePolicy(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over all containers in the test environment and records whether each uses `IfNotPresent` as its image pull policy. Non‑compliant containers are reported with an error message; compliant ones are logged. | +| **Parameters** | *check* `*checksdb.Check` – the check context for logging and result aggregation.
*env* `*provider.TestEnvironment` – contains the list of containers to evaluate. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `testhelper.NewContainerReportObject`
• `append` (built‑in)
• `check.SetResult` | +| **Side effects** | • Writes log entries.
• Modifies the check’s result state via `SetResult`. No external I/O. | +| **How it fits the package** | It is one of several container‑level tests registered in `LoadChecks`; specifically, it enforces best practice for image pull policies within the lifecycle test suite. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Containers"} + B -->|"For each cut"| C["LogInfo “Testing Container …”"] + C --> D{"cut.ImagePullPolicy == PullIfNotPresent?"} + D -- No --> E["LogError “Container … not using IfNotPresent”"] + E --> F["Append to nonCompliantObjects"] + D -- Yes --> G["LogInfo “Container … using IfNotPresent”"] + G --> H["Append to compliantObjects"] + H --> I["Continue loop"] + I --> B + B --> J{"Loop finished?"} + J -- No --> C + J -- Yes --> K["SetResult(compliant, nonCompliant)"] + K --> L["End"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testContainersImagePolicy --> func_LogInfo + func_testContainersImagePolicy --> func_LogError + func_testContainersImagePolicy --> func_append + func_testContainersImagePolicy --> func_NewContainerReportObject + func_testContainersImagePolicy --> func_SetResult +``` + +#### Functions calling `testContainersImagePolicy` + +```mermaid +graph TD + func_LoadChecks --> func_testContainersImagePolicy +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainersImagePolicy +check := checksdb.NewCheck(...) +env := &provider.TestEnvironment{ /* populate Containers slice */ } +testContainersImagePolicy(check, env) +``` + +--- + +### testContainersLivenessProbe + +**testContainersLivenessProbe** - Checks each container in the provided environment for the presence of a `livenessProbe`. It records compliant and non‑compliant containers and reports the outcome. + +#### Signature (Go) + +```go +func testContainersLivenessProbe(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks each container in the provided environment for the presence of a `livenessProbe`. It records compliant and non‑compliant containers and reports the outcome. | +| **Parameters** | `check *checksdb.Check` – test context used to log information and store results.
`env *provider.TestEnvironment` – holds the list of containers (`Containers`) to evaluate. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `testhelper.NewContainerReportObject`
• `check.SetResult` | +| **Side effects** | Emits log entries and populates the check’s result set; no external I/O beyond logging. | +| **How it fits the package** | Part of the lifecycle test suite, executed as part of the “Liveness probe” check registration in `LoadChecks`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"For each container"} + B --> C["Log Testing Container"] + C --> D{"Has LivenessProbe?"} + D -- Yes --> E["Log has LivenessProbe defined"] + E --> F["Append to compliantObjects"] + D -- No --> G["Log does not have LivenessProbe defined"] + G --> H["Append to nonCompliantObjects"] + B --> I{"Next container"} + I -->|"All processed"| J["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testContainersLivenessProbe --> LogInfo + func_testContainersLivenessProbe --> LogError + func_testContainersLivenessProbe --> NewContainerReportObject + func_testContainersLivenessProbe --> SetResult +``` + +#### Functions calling `testContainersLivenessProbe` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testContainersLivenessProbe +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainersLivenessProbe +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume env and check are already prepared + var env provider.TestEnvironment + var check checksdb.Check + + lifecycle.testContainersLivenessProbe(&check, &env) +} +``` + +--- + +### testContainersPostStart + +**testContainersPostStart** - Determines whether every container in the test environment defines a `postStart` lifecycle hook and records compliance. + +#### Signature (Go) + +```go +func testContainersPostStart(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether every container in the test environment defines a `postStart` lifecycle hook and records compliance. | +| **Parameters** | `check` – the current check context; `env` – test environment containing containers to evaluate. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | - `check.LogInfo`, `check.LogError`
- `testhelper.NewContainerReportObject`
- `check.SetResult` | +| **Side effects** | Logs messages; appends report objects to internal slices; updates the check result. | +| **How it fits the package** | Implements the *Post‑Start* test in the lifecycle suite, invoked by `LoadChecks`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start["Start"] --> Iterate["Loop over env.Containers"] + Iterate --> CheckLifecycle{"Lifecycle defined?"} + CheckLifecycle -- No --> LogError["Log error"] + LogError --> NonCompliant["Append non‑compliant object"] + CheckLifecycle -- Yes --> LogInfo["Log info"] + LogInfo --> Compliant["Append compliant object"] + Compliant --> SetResult["check.SetResult(compliant, noncompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + testContainersPostStart --> LogInfo + testContainersPostStart --> LogError + testContainersPostStart --> NewContainerReportObject + testContainersPostStart --> SetResult +``` + +#### Functions calling `testContainersPostStart` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testContainersPostStart +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainersPostStart +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/provider" +) + +func main() { + // Assume we have a populated Check and TestEnvironment + var check *checksdb.Check + var env *provider.TestEnvironment + + lifecycle.testContainersPostStart(check, env) +} +``` + +--- + +### testContainersPreStop + +**testContainersPreStop** - Ensures each container in the test environment has a `preStop` lifecycle hook; records compliant and non‑compliant containers. + +#### Signature (Go) + +```go +func testContainersPreStop(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures each container in the test environment has a `preStop` lifecycle hook; records compliant and non‑compliant containers. | +| **Parameters** | `check *checksdb.Check` – test framework check context.
`env *provider.TestEnvironment` – holds the list of containers to evaluate. | +| **Return value** | None (side‑effects only). | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• Go built‑in `append`
• `testhelper.NewContainerReportObject`
• `check.SetResult` | +| **Side effects** | Logs informational and error messages; creates report objects; updates the check result via `SetResult`. | +| **How it fits the package** | Used by the lifecycle test suite to validate container best practices before other tests run. | + +#### Internal workflow + +```mermaid +flowchart TD + A["TestContainersPreStop"] --> B["Initialize compliant/non‑compliant slices"] + B --> C{"For each container in env.Containers"} + C --> D["Check if Lifecycle.PreStop exists"] + D -->|"Missing"| E["Log error & add non‑compliant report"] + D -->|"Present"| F["Log info & add compliant report"] + C --> G["End loop"] + G --> H["SetResult with compliant/non‑compliant lists"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testContainersPreStop --> func_LogInfo + func_testContainersPreStop --> func_LogError + func_testContainersPreStop --> func_append + func_testContainersPreStop --> testhelper_NewContainerReportObject + func_testContainersPreStop --> func_SetResult +``` + +#### Functions calling `testContainersPreStop` + +```mermaid +graph TD + func_LoadChecks --> func_testContainersPreStop +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainersPreStop +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle" +) + +func main() { + var check *checksdb.Check // obtain from test framework + var env *provider.TestEnvironment // populate with containers to test + + lifecycle.testContainersPreStop(check, env) +} +``` + +--- + +### testContainersReadinessProbe + +**testContainersReadinessProbe** - Checks each container in the test environment to confirm a readiness probe is defined; records compliant and non‑compliant containers. + +#### Signature (Go) + +```go +func (*checksdb.Check, *provider.TestEnvironment)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks each container in the test environment to confirm a readiness probe is defined; records compliant and non‑compliant containers. | +| **Parameters** | `check` – *checksdb.Check (test context)
`env` –*provider.TestEnvironment (containers under test) | +| **Return value** | None; results are stored in the `check` object via `SetResult`. | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `append` (builtin)
• `testhelper.NewContainerReportObject`
• `check.SetResult` | +| **Side effects** | Logs information/errors; mutates the check’s result slice. No external I/O or concurrency. | +| **How it fits the package** | Implements the readiness‑probe test in the lifecycle suite, called by `LoadChecks`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"for each container in env.Containers"} + B --> C["LogInfo “Testing Container …”"] + C --> D{"container.ReadinessProbe == nil?"} + D -- Yes --> E["LogError “Container … does not have ReadinessProbe defined”"] + E --> F["Append non‑compliant report object"] + D -- No --> G["LogInfo “Container … has ReadinessProbe defined”"] + G --> H["Append compliant report object"] + H & F --> I{"next container?"} + I -- Yes --> B + I -- No --> J["SetResult(compliantObjects, nonCompliantObjects)"] + J --> K["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + testContainersReadinessProbe --> LogInfo + testContainersReadinessProbe --> LogError + testContainersReadinessProbe --> append + testContainersReadinessProbe --> NewContainerReportObject + testContainersReadinessProbe --> SetResult +``` + +#### Functions calling `testContainersReadinessProbe` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testContainersReadinessProbe +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainersReadinessProbe +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + check := checksdb.NewCheck(nil) // placeholder for a real Check + env := &provider.TestEnvironment{ /* populate Containers */ } + lifecycle.testContainersReadinessProbe(check, env) +} +``` + +--- + +### testContainersStartupProbe + +**testContainersStartupProbe** - Determines whether every container in the test environment defines a Kubernetes `startupProbe`. Containers lacking this probe are flagged as non‑compliant. + +#### Signature (Go) + +```go +func testContainersStartupProbe(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether every container in the test environment defines a Kubernetes `startupProbe`. Containers lacking this probe are flagged as non‑compliant. | +| **Parameters** | `check *checksdb.Check` – test context for logging and result storage.
`env *provider.TestEnvironment` – contains a slice of containers (`Containers`) to evaluate. | +| **Return value** | None; results are stored via `check.SetResult`. | +| **Key dependencies** | • `check.LogInfo`, `check.LogError` (logging)
• `testhelper.NewContainerReportObject` (creates report entries)
• `check.SetResult` (records compliant/non‑compliant objects) | +| **Side effects** | Emits log messages, builds two slices of report objects (`compliantObjects`, `nonCompliantObjects`) and stores them in the check result. No external I/O beyond logging. | +| **How it fits the package** | This helper is used by the *Startup probe* test within the lifecycle test suite to enforce best‑practice compliance for container startup probes across all pods under test. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate env.Containers"} + B --> C{"cut.StartupProbe == nil?"} + C -- Yes --> D["Log error"] + D --> E["Append non‑compliant report object"] + C -- No --> F["Log info"] + F --> G["Append compliant report object"] + G --> H["Continue loop"] + H --> B + B --> I["SetResult(compliantObjects, nonCompliantObjects)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testContainersStartupProbe --> LogInfo + func_testContainersStartupProbe --> LogError + func_testContainersStartupProbe --> testhelper.NewContainerReportObject + func_testContainersStartupProbe --> SetResult +``` + +#### Functions calling `testContainersStartupProbe` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testContainersStartupProbe +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainersStartupProbe +check := checksdb.NewCheck("example-test") +env := provider.TestEnvironment{ + Containers: []provider.Container{ /* populate with test containers */ }, +} +testContainersStartupProbe(check, &env) +// check.Result now contains compliant and non‑compliant objects +``` + +--- + +--- + +### testDeploymentScaling + +**testDeploymentScaling** - Determines whether deployments can scale safely, either via HPA or direct scaling, and records compliance. + +#### Signature (Go) + +```go +func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) {} +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether deployments can scale safely, either via HPA or direct scaling, and records compliance. | +| **Parameters** | `env *provider.TestEnvironment` – test context; `timeout time.Duration` – maximum wait for scaling operations; `check *checksdb.Check` – check instance used for logging and result reporting. | +| **Return value** | None (side‑effect: sets check results). | +| **Key dependencies** | • `scaling.IsManaged`, `scaling.CheckOwnerReference`, `nameInDeploymentSkipList`
• `scaling.GetResourceHPA`, `scaling.TestScaleHpaDeployment`, `scaling.TestScaleDeployment`
• `testhelper.NewDeploymentReportObject`
• `check.SetResult`, logging helpers | +| **Side effects** | • Logs informational and error messages via the check’s logger.
• Appends report objects to compliant/non‑compliant slices.
• Calls `env.SetNeedsRefresh()` on defer, marking environment for refresh. | +| **How it fits the package** | Part of the lifecycle test suite; invoked by `LoadChecks` as the handler for “Deployment scaling” checks. It orchestrates the actual scaling logic and aggregates results for reporting. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B["Iterate deployments"] + B --> C{"Is deployment managed?"} + C -- Yes --> D["Test owner references via CheckOwnerReference"] + D --> E{"Valid?"} + E -- No --> F["Log error, add non‑compliant object"] + E -- Yes --> G["Skip scaling, log info"] + C -- No --> H{"In skip list?"} + H -- Yes --> I["Log skip, continue"] + H -- No --> J{"Has HPA?"} + J -- Yes --> K["Test HPA scale via TestScaleHpaDeployment"] + K --> L{"Success?"} + L -- No --> M["Log error, add non‑compliant object"] + L -- Yes --> N["Continue"] + J -- No --> O["Test direct deployment scaling via TestScaleDeployment"] + O --> P{"Success?"} + P -- No --> Q["Log error, add non‑compliant object"] + P -- Yes --> R["Add compliant object"] + R & M & Q --> S["End loop"] + S --> T["Set check result"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testDeploymentScaling --> func_IsManaged + func_testDeploymentScaling --> func_CheckOwnerReference + func_testDeploymentScaling --> func_nameInDeploymentSkipList + func_testDeploymentScaling --> func_GetResourceHPA + func_testDeploymentScaling --> func_TestScaleHpaDeployment + func_testDeploymentScaling --> func_TestScaleDeployment + func_testDeploymentScaling --> func_NewDeploymentReportObject + func_testDeploymentScaling --> func_SetResult +``` + +#### Functions calling `testDeploymentScaling` + +```mermaid +graph TD + func_LoadChecks --> func_testDeploymentScaling +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testDeploymentScaling +env := provider.NewTestEnvironment() +timeout := 5 * time.Minute +check := checksdb.NewCheck("deployment-scaling", nil) + +testDeploymentScaling(env, timeout, check) + +// After execution, results are available via check.GetResult(). +``` + +--- + +### testHighAvailability + +**testHighAvailability** - Confirms that each Deployment or StatefulSet has more than one replica and defines Pod anti‑affinity rules, unless the resource is marked with `AffinityRequired`. + +#### Signature (Go) + +```go +func testHighAvailability(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Confirms that each Deployment or StatefulSet has more than one replica and defines Pod anti‑affinity rules, unless the resource is marked with `AffinityRequired`. | +| **Parameters** | `check *checksdb.Check` – the test check context; `env *provider.TestEnvironment` – environment containing Deployments and StatefulSets. | +| **Return value** | None (results are set via `check.SetResult`). | +| **Key dependencies** | • `logInfo`, `logError` on `check`
• `ToString` on objects
• `testhelper.NewDeploymentReportObject`, `NewStatefulSetReportObject`
• `check.SetResult` | +| **Side effects** | Generates log output; populates the check’s result slices with compliant and non‑compliant report objects. | +| **How it fits the package** | Part of the lifecycle test suite; registered as a check for high‑availability best practices. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate Deployments"} + B --> C{"Replicas > 1?"} + C -- No --> D["Log error, add non‑compliant report"] + C -- Yes --> E{"AffinityRequired == true?"} + E -- Yes --> F["Skip this Deployment"] + E -- No --> G{"PodAntiAffinity defined?"} + G -- No --> H["Log error, add non‑compliant report"] + G -- Yes --> I["Add compliant report"] + B --> J{"Iterate StatefulSets"} + J --> K{"Replicas > 1?"} + K -- No --> L["Log error, add non‑compliant report"] + K -- Yes --> M{"AffinityRequired == true?"} + M -- Yes --> N["Skip this StatefulSet"] + M -- No --> O{"PodAntiAffinity defined?"} + O -- No --> P["Log error, add non‑compliant report"] + O -- Yes --> Q["Add compliant report"] + I & Q --> R["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testHighAvailability --> func_LogInfo + func_testHighAvailability --> func_ToString + func_testHighAvailability --> func_LogError + func_testHighAvailability --> func_Append + func_testHighAvailability --> func_NewDeploymentReportObject + func_testHighAvailability --> func_NewStatefulSetReportObject + func_testHighAvailability --> func_SetResult +``` + +#### Functions calling `testHighAvailability` + +```mermaid +graph TD + func_LoadChecks --> func_testHighAvailability +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testHighAvailability +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/provider" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/checksdb" +) + +func main() { + // Assume env is populated with Deployments and StatefulSets + var env *provider.TestEnvironment + check := checksdb.NewCheck("exampleID") + lifecycle.testHighAvailability(check, env) +} +``` + +--- + +### testPodNodeSelectorAndAffinityBestPractices + +**testPodNodeSelectorAndAffinityBestPractices** - Ensures that each Pod in the supplied slice does not specify a node selector or node affinity, flagging any violations. + +#### Signature (Go) + +```go +func testPodNodeSelectorAndAffinityBestPractices(testPods []*provider.Pod, check *checksdb.Check) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that each Pod in the supplied slice does not specify a node selector or node affinity, flagging any violations. | +| **Parameters** | `testPods []*provider.Pod` – Pods to evaluate.
`check *checksdb.Check` – Check instance used for logging and result reporting. | +| **Return value** | None; results are stored via `check.SetResult`. | +| **Key dependencies** | • `check.LogInfo`, `check.LogError` (logging)
• `put.HasNodeSelector()` (Pod helper)
• `testhelper.NewPodReportObject` (report object creation)
• `check.SetResult` (storing compliance data) | +| **Side effects** | Modifies the check result state; writes log messages. No external I/O or concurrency. | +| **How it fits the package** | Part of the lifecycle test suite, specifically verifying best‑practice compliance for pod scheduling attributes. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over Pods"} + B --> C["Log info: testing Pod"] + C --> D{"Has node selector?"} + D -- Yes --> E["Log error, add non‑compliant report"] + D -- No --> F{"Has node affinity?"} + F -- Yes --> G["Log error, add non‑compliant report"] + F -- No --> H["Set compliant flag"] + H --> I["Add compliant report"] + B --> J{"Next Pod"} + J -->|"All processed"| K["Set check result with reports"] +``` + +#### Function dependencies + +```mermaid +graph TD + testPodNodeSelectorAndAffinityBestPractices --> LogInfo + testPodNodeSelectorAndAffinityBestPractices --> HasNodeSelector + testPodNodeSelectorAndAffinityBestPractices --> LogError + testPodNodeSelectorAndAffinityBestPractices --> append + testPodNodeSelectorAndAffinityBestPractices --> NewPodReportObject + testPodNodeSelectorAndAffinityBestPractices --> SetResult +``` + +#### Functions calling `testPodNodeSelectorAndAffinityBestPractices` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testPodNodeSelectorAndAffinityBestPractices +``` + +#### Usage example + +```go +// Minimal example invoking testPodNodeSelectorAndAffinityBestPractices +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/checksdb" +) + +func runExample() { + // Assume we have a slice of Pods and a Check instance + pods := []*provider.Pod{ /* … */ } + check := checksdb.NewCheck("example-id") + + testPodNodeSelectorAndAffinityBestPractices(pods, check) + + // Results are now available via check.Result() +} +``` + +--- + +### testPodPersistentVolumeReclaimPolicy + +**testPodPersistentVolumeReclaimPolicy** - Ensures each pod that mounts a Persistent Volume Claim has its underlying PV set to `Delete` reclaim policy. Non‑compliant pods are reported for remediation. + +#### Signature (Go) + +```go +func testPodPersistentVolumeReclaimPolicy(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures each pod that mounts a Persistent Volume Claim has its underlying PV set to `Delete` reclaim policy. Non‑compliant pods are reported for remediation. | +| **Parameters** | `check *checksdb.Check` – test context used for logging and result aggregation.
`env *provider.TestEnvironment` – runtime environment containing cluster resources such as Pods, PersistentVolumes, and PersistentVolumeClaims. | +| **Return value** | None; results are stored via `check.SetResult`. | +| **Key dependencies** | • `log.LogInfo`, `log.LogError`
• `volumes.IsPodVolumeReclaimPolicyDelete` (checks reclaim policy)
• `testhelper.NewPodReportObject`, `AddField`
• `check.SetResult` | +| **Side effects** | Logs informational and error messages; mutates the check’s result set. No external I/O or concurrency is performed. | +| **How it fits the package** | Part of the lifecycle test suite, specifically the *Persistent volume reclaim policy* check. It is invoked by `LoadChecks` when registering this particular check. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Pods"} + B -->|"for each pod"| C["Log pod name"] + C --> D["Set compliantPod = true"] + D --> E["Iterate over pod.Spec.Volumes"] + E --> F{"Volume has PVC?"} + F -- No --> G["Skip volume, continue loop"] + F -- Yes --> H{"Is reclaim policy DELETE?"} + H -- Yes --> I["Continue to next volume"] + H -- No --> J["Log error"] + J --> K["Create non‑compliant report object"] + K --> L["Set compliantPod = false"] + L --> M["Break out of volume loop"] + M --> N{"compliantPod?"} + N -- Yes --> O["Create compliant report object"] + N -- No --> P["Do nothing"] + E --> Q{"End volume loop"} + B --> R{"End pod loop"} + R --> S["SetResult with objects"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testPodPersistentVolumeReclaimPolicy --> log.LogInfo + func_testPodPersistentVolumeReclaimPolicy --> volumes.IsPodVolumeReclaimPolicyDelete + func_testPodPersistentVolumeReclaimPolicy --> testhelper.NewPodReportObject + func_testPodPersistentVolumeReclaimPolicy --> check.SetResult +``` + +#### Functions calling `testPodPersistentVolumeReclaimPolicy` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testPodPersistentVolumeReclaimPolicy +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPodPersistentVolumeReclaimPolicy +func runExample() { + // Assume check and env are already populated. + var check *checksdb.Check + var env *provider.TestEnvironment + + // Invoke the function directly. + testPodPersistentVolumeReclaimPolicy(check, env) + + // Results can now be inspected via check.Result(). +} +``` + +--- + +### testPodTolerationBypass + +**testPodTolerationBypass** - Validates every Pod in `env.Pods` contains only the default Kubernetes tolerations. Non‑default or modified tolerations mark the Pod as non‑compliant. + +#### Signature (Go) + +```go +func testPodTolerationBypass(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates every Pod in `env.Pods` contains only the default Kubernetes tolerations. Non‑default or modified tolerations mark the Pod as non‑compliant. | +| **Parameters** | `check *checksdb.Check` – test context for logging and result aggregation.
`env *provider.TestEnvironment` – environment holding the list of Pods to evaluate. | +| **Return value** | None; results are stored via `check.SetResult`. | +| **Key dependencies** | • `tolerations.IsTolerationModified`
• `testhelper.NewPodReportObject`
• Logging helpers: `LogInfo`, `LogError`
• `check.SetResult` | +| **Side effects** | Generates log entries, creates report objects, and updates the check result. No external I/O beyond logging. | +| **How it fits the package** | Implements the “Pod toleration bypass” test in the lifecycle suite, ensuring Pods do not introduce security‑weakening tolerations. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Pods"} + B -->|"For each pod"| C["Initialize podIsCompliant=true"] + C --> D{"Iterate over pod.Spec.Tolerations"} + D -->|"Check IsTolerationModified"| E["If true → log error & create non‑compliant object"] + E --> F["Set podIsCompliant=false"] + D --> G{"End toleration loop"} + G --> H{"podIsCompliant?"} + H -- Yes --> I["Log info & add compliant object"] + H -- No --> J["Skip adding compliant object"] + B --> K{"Next pod?"} + K --> L["End loop"] + L --> M["check.SetResult(compliantObjects, nonCompliantObjects)"] + M --> N["Finish"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testPodTolerationBypass --> func_IsTolerationModified + func_testPodTolerationBypass --> func_NewPodReportObject + func_testPodTolerationBypass --> func_LogInfo + func_testPodTolerationBypass --> func_LogError + func_testPodTolerationBypass --> func_SetResult +``` + +#### Functions calling `testPodTolerationBypass` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testPodTolerationBypass +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPodTolerationBypass +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/provider" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/checksdb" +) + +func example() { + // Assume env is prepared with Pods to test + var env provider.TestEnvironment + // Create a dummy check context + check := checksdb.NewCheck("example") + lifecycle.testPodTolerationBypass(check, &env) +} +``` + +--- + +### testPodsOwnerReference + +**testPodsOwnerReference** - Iterates over all pods in the test environment and verifies that each pod’s owner reference meets defined best‑practice rules. Logs results and records compliant/non‑compliant objects for reporting. + +#### Signature (Go) + +```go +func testPodsOwnerReference(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over all pods in the test environment and verifies that each pod’s owner reference meets defined best‑practice rules. Logs results and records compliant/non‑compliant objects for reporting. | +| **Parameters** | `check *checksdb.Check` – context for logging and result aggregation.
`env *provider.TestEnvironment` – contains the list of pods to evaluate (`Pods`). | +| **Return value** | None (side‑effects only). | +| **Key dependencies** | - `ownerreference.NewOwnerReference(put.Pod)`
- `o.RunTest(check.GetLogger())`
- `o.GetResults()`
- `testhelper.NewPodReportObject(...)`
- `check.LogInfo`, `check.LogError`, `check.SetResult` | +| **Side effects** | • Logs informational and error messages via the supplied check.
• Builds slices of compliant/non‑compliant report objects and stores them in the check’s result. | +| **How it fits the package** | This function is one of many pod‑level tests registered under the *Lifecycle* test suite; it specifically enforces owner reference best practices for all pods discovered in the environment. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"For each pod in env.Pods"} + B --> C["Create OwnerReference object"] + C --> D["Run test with check logger"] + D --> E{"Owner reference compliant?"} + E -- Yes --> F["Log success, add to compliant list"] + E -- No --> G["Log error, add to non‑compliant list"] + F & G --> H["Next pod"] + H --> B + B --> I["Set result on check"] + I --> J["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testPodsOwnerReference --> func_LogInfo + func_testPodsOwnerReference --> ownerreference.NewOwnerReference + func_testPodsOwnerReference --> func_RunTest + func_testPodsOwnerReference --> func_GetResults + func_testPodsOwnerReference --> func_LogError + func_testPodsOwnerReference --> func_NewPodReportObject + func_testPodsOwnerReference --> func_SetResult +``` + +#### Functions calling `testPodsOwnerReference` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testPodsOwnerReference +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPodsOwnerReference +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func main() { + // Assume env and check are already created and populated. + var env *provider.TestEnvironment + var check *checksdb.Check + + lifecycle.testPodsOwnerReference(check, env) +} +``` + +--- + +### testPodsRecreation + +**testPodsRecreation** - Verifies that pods belonging to deployments and statefulsets are correctly recreated and become ready after a node is cordoned/drained. + +#### Signature (Go) + +```go +func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies that pods belonging to deployments and statefulsets are correctly recreated and become ready after a node is cordoned/drained. | +| **Parameters** | `check *checksdb.Check` – test context; `env *provider.TestEnvironment` – current cluster snapshot | +| **Return value** | None (result recorded via `check.SetResult`) | +| **Key dependencies** | • `podsets.WaitForAllPodSetsReady`
• `podrecreation.CordonHelper`, `CordonCleanup`, `CountPodsWithDelete`
• `testhelper.New*ReportObject` for reporting
• Logging via `check.Log…` | +| **Side effects** | • Modifies node schedulability (cordon/uncordon)
• Deletes pods during draining
• Sets test result and may log post‑mortem info | +| **How it fits the package** | Part of the lifecycle test suite; invoked by `LoadChecks` as the “Pod recreation” check. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B["Check all podsets ready"] + B --> C{"Any not ready?"} + C -- Yes --> D["Record non‑compliant, exit"] + C -- No --> E["Filter pods with node selectors"] + E --> F{"Any found?"} + F -- Yes --> G["Record non‑compliant, exit"] + F -- No --> H["Iterate over nodes"] + H --> I["Cordon node"] + I --> J["Count pods on node"] + J --> K["Drain node (delete foreground)"] + K --> L["Wait for podsets ready again"] + L --> M{"Any not ready?"} + M -- Yes --> N["Record non‑compliant, exit"] + M -- No --> O["Uncordon node"] + O --> H + H --> P["All nodes processed"] + P --> Q["If no non‑compliant objects"] --> R["Mark deployments/sts compliant"] + R --> S["Finish"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testPodsRecreation --> podsets.WaitForAllPodSetsReady + func_testPodsRecreation --> podrecreation.CordonHelper + func_testPodsRecreation --> podrecreation.CordonCleanup + func_testPodsRecreation --> podrecreation.CountPodsWithDelete + func_testPodsRecreation --> testhelper.NewDeploymentReportObject + func_testPodsRecreation --> testhelper.NewStatefulSetReportObject + func_testPodsRecreation --> testhelper.NewPodReportObject + func_testPodsRecreation --> testhelper.NewNodeReportObject +``` + +#### Functions calling `testPodsRecreation` + +```mermaid +graph TD + func_LoadChecks --> func_testPodsRecreation +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPodsRecreation +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func runExample() { + // Assume env and check are initialized appropriately + var env *provider.TestEnvironment + var check *checksdb.Check + + lifecycle.testPodsRecreation(check, env) +} +``` + +--- + +### testScaleCrd + +**testScaleCrd** - Iterates over all CRs under test, attempts to scale each via its HorizontalPodAutoscaler (HPA) if present; otherwise performs a direct scaling test. Records compliant and non‑compliant objects in the supplied check result. + +#### Signature (Go) + +```go +func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over all CRs under test, attempts to scale each via its HorizontalPodAutoscaler (HPA) if present; otherwise performs a direct scaling test. Records compliant and non‑compliant objects in the supplied check result. | +| **Parameters** | `env *provider.TestEnvironment` – Test environment containing CRs and scaler data.
`timeout time.Duration` – Maximum duration for each scaling operation.
`check *checksdb.Check` – Check object used to log errors, info, and store results. | +| **Return value** | None (void). | +| **Key dependencies** | • `env.SetNeedsRefresh()`
• `scaling.GetResourceHPA`
• `scaling.TestScaleHPACrd`
• `scaling.TestScaleCrd`
• `check.LogError`, `check.LogInfo`
• `testhelper.NewCrdReportObject`
• `append` (to slices)
• `check.SetResult` | +| **Side effects** | • Marks the test environment for refresh via `SetNeedsRefresh`.
• Emits logs through the check’s logger.
• Modifies two slices (`compliantObjects`, `nonCompliantObjects`) and stores them in the check result. | +| **How it fits the package** | Used by the lifecycle test loader to validate that each CRD can scale correctly, either via an HPA or directly, before other tests are executed. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Mark env for refresh"] + B --> C{"Iterate over ScaleCrUnderTest"} + C -->|"HPA exists"| D["Get HPA via GetResourceHPA"] + D --> E["TestScaleHPACrd"] + E -- success --> F["Continue loop"] + E -- failure --> G["Log error, record non‑compliant"] + G --> F + D -- nil HPA --> H["TestScaleCrd"] + H -- success --> I["Log info, record compliant"] + H -- failure --> J["Log error, record non‑compliant"] + I --> F + J --> F + F --> K["Set check result"] + K --> L["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testScaleCrd --> env_SetNeedsRefresh + func_testScaleCrd --> scaling_GetResourceHPA + func_testScaleCrd --> scaling_TestScaleHPACrd + func_testScaleCrd --> scaling_TestScaleCrd + func_testScaleCrd --> check_LogError + func_testScaleCrd --> check_LogInfo + func_testScaleCrd --> testhelper_NewCrdReportObject + func_testScaleCrd --> check_SetResult +``` + +#### Functions calling `testScaleCrd` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testScaleCrd +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testScaleCrd +import ( + "time" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/provider" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +// Assume env and check are already initialized: +var env *provider.TestEnvironment +var check *checksdb.Check + +// Run the scaling test with a 5‑minute timeout. +testScaleCrd(env, 5*time.Minute, check) +``` + +--- + +### testStatefulSetScaling + +**testStatefulSetScaling** - Evaluates whether each StatefulSet in the environment can be scaled (directly or via HPA) and logs compliance. + +#### Signature + +```go +func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Evaluates whether each StatefulSet in the environment can be scaled (directly or via HPA) and logs compliance. | +| **Parameters** | `env` – test context containing StatefulSets, CRDs, config.
`timeout` – maximum wait time for scaling operations.
`check` – check object used for logging and result aggregation. | +| **Return value** | None; results are stored in the `check`. | +| **Key dependencies** | *`scaling.IsManaged`
* `scaling.CheckOwnerReference`
*`nameInStatefulSetSkipList`
* `scaling.GetResourceHPA`
*`scaling.TestScaleHpaStatefulSet`
* `scaling.TestScaleStatefulSet`
* `testhelper.NewStatefulSetReportObject` | +| **Side effects** | *Calls `env.SetNeedsRefresh()` on exit.
* Logs informational and error messages via the check logger.
* Appends compliance objects to the check result. | +| **How it fits the package** | This function is the entry point for the StatefulSet scaling test added in `LoadChecks`. It orchestrates all logic needed to determine if a StatefulSet can be scaled safely. | + +#### Internal workflow + +```mermaid +flowchart TD + A["TestStatefulSetScaling"] --> B["Iterate over env.StatefulSets"] + B --> C{"IsManaged?"} + C -- Yes --> D{"CheckOwnerReference"} + D -- Not scalable --> E["Log error & add non‑compliant report"] + D -- Scalable --> F["Skip HPA test; continue loop"] + C -- No --> G{"nameInStatefulSetSkipList"} + G -- Skip --> H["Continue loop"] + G -- Continue --> I{"GetResourceHPA"} + I -- Found HPA --> J["TestScaleHpaStatefulSet"] + J -- Fail --> K["Log error & add non‑compliant report"] + J -- Pass --> L["Skip direct scaling; continue loop"] + I -- No HPA --> M["TestScaleStatefulSet"] + M -- Fail --> N["Log error & add non‑compliant report"] + M -- Pass --> O["Add compliant report"] + O --> P["End loop"] + P --> Q["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testStatefulSetScaling --> func_IsManaged + func_testStatefulSetScaling --> func_CheckOwnerReference + func_testStatefulSetScaling --> func_nameInStatefulSetSkipList + func_testStatefulSetScaling --> func_GetResourceHPA + func_testStatefulSetScaling --> func_TestScaleHpaStatefulSet + func_testStatefulSetScaling --> func_TestScaleStatefulSet + func_testStatefulSetScaling --> func_NewStatefulSetReportObject +``` + +#### Functions calling `testStatefulSetScaling` + +```mermaid +graph TD + func_LoadChecks --> func_testStatefulSetScaling +``` + +#### Usage example + +```go +// Minimal example invoking testStatefulSetScaling +env := &provider.TestEnvironment{ + StatefulSets: ... , // populated with StatefulSet objects + HorizontalScaler: ... , + Crds: ... , + Config: ... , +} +check := checksdb.NewCheck(...) + +testStatefulSetScaling(env, 30*time.Second, check) + +// After execution, the check holds compliant and non‑compliant reports. +``` + +--- + +### testStorageProvisioner + +**testStorageProvisioner** - Ensures pods use an appropriate storage provisioner (local or non‑local) according to whether the cluster is single‑node or multi‑node, and records compliance. + +#### Signature (Go) + +```go +func(*checksdb.Check, *provider.TestEnvironment)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures pods use an appropriate storage provisioner (local or non‑local) according to whether the cluster is single‑node or multi‑node, and records compliance. | +| **Parameters** | `check *checksdb.Check` – test harness; `env *provider.TestEnvironment` – current test environment containing Pods, StorageClasses, PVCs, etc. | +| **Return value** | None (void) | +| **Key dependencies** | - `LogInfo`, `LogDebug`, `LogError` (logging)
- `IsSNO()` (cluster topology check)
- `NewPodReportObject` and `AddField` (report construction)
- `SetResult` (finalizes test outcome) | +| **Side effects** | Logs messages, creates report objects for compliant/non‑compliant pods, updates the supplied `Check` with results. No global state is altered beyond the check result. | +| **How it fits the package** | This function implements the “Storage provisioner” test within the lifecycle suite, invoked by `LoadChecks`. It checks pod volume configuration against cluster storage policy. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over Pods"} + B --> C{"Pod has PVCs?"} + C -- No --> D["Mark pod compliant"] + C -- Yes --> E{"Find matching PVC"} + E --> F{"Match StorageClass?"} + F -- No --> G["Mark pod compliant (no local storage)"] + F -- Yes --> H{"Check SNO?"} + H -- Yes --> I{"Local or LVM provisioner?"} + I -- Local/LVM --> J["Log compliance, add to compliant list"] + I -- Other --> K["Log error, add to non‑compliant list"] + H -- No --> L{"Local or LVM provisioner?"} + L -- Local/LVM --> M["Log error, add to non‑compliant list"] + L -- Other --> N["Log compliance, add to compliant list"] + J & K & M & N --> O["Continue loop"] + O --> B + B --> P["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testStorageProvisioner --> LogInfo + func_testStorageProvisioner --> LogDebug + func_testStorageProvisioner --> IsSNO + func_testStorageProvisioner --> NewPodReportObject + func_testStorageProvisioner --> AddField + func_testStorageProvisioner --> LogError + func_testStorageProvisioner --> SetResult +``` + +#### Functions calling `testStorageProvisioner` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testStorageProvisioner +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testStorageProvisioner +func runExample(check *checksdb.Check, env *provider.TestEnvironment) { + // The function performs its work through side‑effects on the check. + testStorageProvisioner(check, env) + + // After execution, the check contains compliant and non‑compliant reports: + fmt.Println("Compliant pods:", len(check.CompliantObjects)) + fmt.Println("Non‑compliant pods:", len(check.NonCompliantObjects)) +} +``` + +--- diff --git a/docs/tests/lifecycle/ownerreference/ownerreference.md b/docs/tests/lifecycle/ownerreference/ownerreference.md new file mode 100644 index 000000000..125c19d58 --- /dev/null +++ b/docs/tests/lifecycle/ownerreference/ownerreference.md @@ -0,0 +1,264 @@ +# Package ownerreference + +**Path**: `tests/lifecycle/ownerreference` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [OwnerReference](#ownerreference) +- [Exported Functions](#exported-functions) + - [NewOwnerReference](#newownerreference) + - [OwnerReference.GetResults](#ownerreference.getresults) + - [OwnerReference.RunTest](#ownerreference.runtest) + +## Overview + +Provides utilities for testing Kubernetes pod owner references within the Certsuite lifecycle tests. + +### Key Features + +- Creates and manages an OwnerReference instance tied to a specific Pod +- Runs validation checks on the pod’s owner references, logging progress and recording results +- Exposes the test outcome via a simple integer status + +### Design Notes + +- OwnerReference stores only a pointer to the pod and an int result, assuming one-to-one mapping per test run +- RunTest logs both informational steps and errors but does not propagate failures—errors are captured in result field +- Consumers should invoke GetResults after RunTest to determine pass/fail; negative values indicate failure + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**OwnerReference**](#ownerreference) | One-line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewOwnerReference(put *corev1.Pod) *OwnerReference](#newownerreference) | Instantiates a new `OwnerReference` struct for the provided Pod, initializing its result status to an error state. | +| [func (o *OwnerReference) GetResults() int](#ownerreference.getresults) | Returns the integer result currently stored in an `OwnerReference` instance. | +| [func (*OwnerReference) RunTest(*log.Logger)](#ownerreference.runtest) | Executes a series of checks on the owner references of a pod, logs progress, and records success or failure in `o.result`. | + +## Structs + +### OwnerReference + +The `OwnerReference` type encapsulates a Kubernetes Pod and records the outcome of a test that verifies whether the pod’s owner references belong to acceptable resource kinds (`StatefulSet` or `ReplicaSet`). + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `put` | `*corev1.Pod` | Reference to the Pod under test. | +| `result` | `int` | Numeric status of the test (e.g., success, failure). | + +#### Purpose + +`OwnerReference` is used in lifecycle tests to assert that a pod’s owner references are of the expected kinds. It stores the pod being examined and the result of the validation, enabling later retrieval via `GetResults()`. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NewOwnerReference(put *corev1.Pod) *OwnerReference` | Creates a new instance initialized with the given pod and a default error status. | +| `OwnerReference.GetResults() int` | Returns the stored test result. | +| `OwnerReference.RunTest(logger *log.Logger)` | Iterates over the pod’s owner references, logs details, and updates the result based on whether each reference is of kind `StatefulSet` or `ReplicaSet`. | + +--- + +--- + +## Exported Functions + +### NewOwnerReference + +**NewOwnerReference** - Instantiates a new `OwnerReference` struct for the provided Pod, initializing its result status to an error state. + +#### Signature (Go) + +```go +func NewOwnerReference(put *corev1.Pod) *OwnerReference +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates a new `OwnerReference` struct for the provided Pod, initializing its result status to an error state. | +| **Parameters** | `put *corev1.Pod` – The Kubernetes Pod object whose ownership will be referenced. | +| **Return value** | `*OwnerReference` – A pointer to the newly created OwnerReference instance. | +| **Key dependencies** | - `corev1.Pod` from `k8s.io/api/core/v1`
- `testhelper.ERROR` status flag | +| **Side effects** | None; only returns a new struct without modifying external state. | +| **How it fits the package** | Provides a helper to create OwnerReference objects that are later used for compliance checks on Pod ownership within the lifecycle tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Create OwnerReference"} + B --> C["Set put field"] + B --> D["Set result to ERROR"] + D --> E["Return pointer"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `NewOwnerReference` (Mermaid) + +```mermaid +graph TD + func_testPodsOwnerReference --> func_NewOwnerReference +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewOwnerReference +import ( + corev1 "k8s.io/api/core/v1" + ownerreference "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/ownerreference" +) + +// Assume pod is an existing *corev1.Pod instance +var pod *corev1.Pod + +// Create the OwnerReference wrapper for compliance checks +or := ownerreference.NewOwnerReference(pod) + +// The returned object can now be used to run tests, e.g.: +// or.RunTest(logger) +``` + +--- + +### OwnerReference.GetResults + +**GetResults** - Returns the integer result currently stored in an `OwnerReference` instance. + +#### Signature (Go) + +```go +func (o *OwnerReference) GetResults() int +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the integer result currently stored in an `OwnerReference` instance. | +| **Parameters** | None – the method operates on the receiver `*OwnerReference`. | +| **Return value** | An `int` representing the saved result. | +| **Key dependencies** | • None – this method performs a simple field read. | +| **Side effects** | No state changes, no I/O, no concurrency operations. | +| **How it fits the package** | Provides public access to the internal `result` field of `OwnerReference`, allowing external code (e.g., tests) to inspect the outcome after lifecycle operations. | + +#### Internal workflow + +```mermaid +flowchart TD + subgraph "GetResults" + A["Receive *OwnerReference"] --> B["Return o.result"] + end +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `OwnerReference.GetResults` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking OwnerReference.GetResults +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/ownerreference" +) + +func main() { + or := &ownerreference.OwnerReference{result: 42} + fmt.Println("Result:", or.GetResults()) // Output: Result: 42 +} +``` + +--- + +### OwnerReference.RunTest + +**RunTest** - Executes a series of checks on the owner references of a pod, logs progress, and records success or failure in `o.result`. + +#### Signature (Go) + +```go +func (*OwnerReference) RunTest(*log.Logger) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes a series of checks on the owner references of a pod, logs progress, and records success or failure in `o.result`. | +| **Parameters** | `logger *log.Logger` – logger used to output informational or error messages. | +| **Return value** | None; results are stored directly on the receiver (`o.result`). | +| **Key dependencies** | • `logger.Info`
• `logger.Error`
• `testhelper.SUCCESS`, `testhelper.FAILURE` | +| **Side effects** | Modifies `o.result`; writes log entries. | +| **How it fits the package** | Part of the lifecycle tests for owner references; verifies that pods are owned by StatefulSet or ReplicaSet objects. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over OwnerReferences"} + B --> C{"Kind == statefulSet || Kind == replicaSet"} + C -- Yes --> D["logger.Info(...)
o.result = SUCCESS"] + C -- No --> E["logger.Error(...)
o.result = FAILURE
return"] + D --> F["End"] + E --> F +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + OwnerReference.RunTest --> logger.Info + OwnerReference.RunTest --> logger.Error +``` + +#### Functions calling `OwnerReference.RunTest` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking OwnerReference.RunTest +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/ownerreference" +) + +func main() { + logger := log.Default() + or := &ownerreference.OwnerReference{ + put: "mypod", + result: "", // will be set by RunTest + } + or.RunTest(logger) +} +``` + +--- diff --git a/docs/tests/lifecycle/podrecreation/podrecreation.md b/docs/tests/lifecycle/podrecreation/podrecreation.md new file mode 100644 index 000000000..d860be93b --- /dev/null +++ b/docs/tests/lifecycle/podrecreation/podrecreation.md @@ -0,0 +1,532 @@ +# Package podrecreation + +**Path**: `tests/lifecycle/podrecreation` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [CordonCleanup](#cordoncleanup) + - [CordonHelper](#cordonhelper) + - [CountPodsWithDelete](#countpodswithdelete) +- [Local Functions](#local-functions) + - [deletePod](#deletepod) + - [skipDaemonPod](#skipdaemonpod) + - [waitPodDeleted](#waitpoddeleted) + +## Overview + +This package implements the logic for pod‑recreation tests in CertSuite’s lifecycle suite, handling node cordoning/uncordoning and controlled deletion of deployment or statefulset pods on a target node. + +### Key Features + +- CordonHelper – toggles a node’s *Unschedulable* flag to cordon or uncordon it +- CountPodsWithDelete – enumerates and optionally deletes pods belonging to Deployments, StatefulSets, or DaemonSets with configurable delete mode (Foreground/Background) +- waitPodDeleted – watches the namespace for pod removal and times out if deletion stalls + +### Design Notes + +- CordonHelper uses a retry loop to cope with Kubernetes update conflicts +- CountPodsWithDelete skips daemon‑set pods because they should not be removed during recreation tests +- Deletion of non‑background pods is performed asynchronously and waited on via a WaitGroup to ensure full cleanup before proceeding + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func CordonCleanup(node string, check *checksdb.Check)](#cordoncleanup) | Ensures a Kubernetes node is marked schedulable again after test operations. It attempts to uncordon the node and aborts the test if this fails. | +| [func CordonHelper(name, operation string) error](#cordonhelper) | Sets the *Unschedulable* flag on a Kubernetes node to either cordon or uncordon it. | +| [func CountPodsWithDelete(pods []*provider.Pod, nodeName, mode string) (count int, err error)](#countpodswithdelete) | Counts all deployment or statefulset pods scheduled on a specified node. If `mode` is not `NoDelete`, it initiates deletion of those pods using the appropriate delete mode (`Foreground` or `Background`). | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error](#deletepod) | Removes a pod from the cluster. If the deletion mode is not background, it spawns a goroutine that waits for the pod to be fully deleted before signalling completion via the supplied `WaitGroup`. | +| [func skipDaemonPod(pod *corev1.Pod) bool](#skipdaemonpod) | Determines whether a pod is owned by a DaemonSet and should therefore be exempt from deletion during recreation tests. | +| [func waitPodDeleted(ns string, podName string, timeout int64, watcher watch.Interface)](#waitpoddeleted) | Monitors the specified Kubernetes namespace for the deletion of a pod named `podName`. If the pod is deleted or the watcher signals a deletion event, the function returns. It also times out after `timeout` seconds if no deletion occurs. | + +## Exported Functions + +### CordonCleanup + +**CordonCleanup** - Ensures a Kubernetes node is marked schedulable again after test operations. It attempts to uncordon the node and aborts the test if this fails. + +#### 1) Signature (Go) + +```go +func CordonCleanup(node string, check *checksdb.Check) +``` + +--- + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures a Kubernetes node is marked schedulable again after test operations. It attempts to uncordon the node and aborts the test if this fails. | +| **Parameters** | `node string` – name of the node to uncordon.
`check *checksdb.Check` – context object used for logging, aborting, and reporting. | +| **Return value** | None (void). Errors are handled via the `check.Abort` method. | +| **Key dependencies** | - `CordonHelper(node, Uncordon)`
- `check.Abort(fmt.Sprintf(...))`
- `fmt.Sprintf` from standard library | +| **Side effects** | Calls `CordonHelper`, which updates the node’s spec via Kubernetes API.
May abort test execution through `check.Abort`. | +| **How it fits the package** | Part of pod‑recreation tests; invoked in a deferred cleanup loop to restore node state after each drain/cordon operation. | + +--- + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + CordonCleanup --> Call_CordonHelper["Call CordonHelper(node, Uncordon)"] + Call_CordonHelper -- Success --> Done + Call_CordonHelper -- Failure --> Abort_Check["check.Abort(fmt.Sprintf(...))"] +``` + +--- + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_CordonCleanup --> func_CordonHelper +``` + +--- + +#### 5) Functions calling `CordonCleanup` (Mermaid) + +```mermaid +graph TD + func_testPodsRecreation --> func_CordonCleanup +``` + +--- + +#### 6) Usage example (Go) + +```go +// Minimal example invoking CordonCleanup +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/checksdb" +) + +func example() { + var check *checksdb.Check // assume initialized + podrecreation.CordonCleanup("node-1", check) +} +``` + +--- + +--- + +### CordonHelper + +**CordonHelper** - Sets the *Unschedulable* flag on a Kubernetes node to either cordon or uncordon it. + +#### 1) Signature (Go) + +```go +func CordonHelper(name, operation string) error +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Sets the *Unschedulable* flag on a Kubernetes node to either cordon or uncordon it. | +| **Parameters** | `name` string – the node name; `operation` string – `"Cordon"` or `"Uncordon"`. | +| **Return value** | `error` – nil if the update succeeded, otherwise the error encountered (including retry failures). | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• `log.Info`, `log.Error`
• `retry.RetryOnConflict`
• Kubernetes clientset (`CoreV1().Nodes()`) | +| **Side effects** | Mutates the node object in the cluster (sets `Spec.Unschedulable`). Performs I/O via API calls and may retry on conflicts. | +| **How it fits the package** | Provides a low‑level helper used by higher‑level pod‑recreation tests to drain or restore nodes during lifecycle checks. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Get ClientsHolder"] --> B["Log operation"] + B --> C["RetryOnConflict"] + C --> D{"Fetch Node"} + D -->|"Success"| E["Set Unschedulable based on operation"] + E --> F["Update Node"] + F --> G["Return error or nil"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_CordonHelper --> func_GetClientsHolder + func_CordonHelper --> func_Info + func_CordonHelper --> func_RetryOnConflict + func_CordonHelper --> func_Update + func_CordonHelper --> func_Error +``` + +#### 5) Functions calling `CordonHelper` (Mermaid) + +```mermaid +graph TD + func_testPodsRecreation --> func_CordonHelper + func_CordonCleanup --> func_CordonHelper +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking CordonHelper +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation" +) + +func main() { + // Cordons a node named "worker-1" + if err := podrecreation.CordonHelper("worker-1", podrecreation.Cordon); err != nil { + log.Fatalf("Failed to cordon: %v", err) + } + + // Later, uncordon the same node + if err := podrecreation.CordonHelper("worker-1", podrecreation.Uncordon); err != nil { + log.Fatalf("Failed to uncordon: %v", err) + } +} +``` + +--- + +### CountPodsWithDelete + +**CountPodsWithDelete** - Counts all deployment or statefulset pods scheduled on a specified node. If `mode` is not `NoDelete`, it initiates deletion of those pods using the appropriate delete mode (`Foreground` or `Background`). + +#### Signature (Go) + +```go +func CountPodsWithDelete(pods []*provider.Pod, nodeName, mode string) (count int, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Counts all deployment or statefulset pods scheduled on a specified node. If `mode` is not `NoDelete`, it initiates deletion of those pods using the appropriate delete mode (`Foreground` or `Background`). | +| **Parameters** | `pods []*provider.Pod` – list of pod wrappers; `nodeName string` – target node; `mode string` – delete strategy (`NoDelete`, `DeleteForeground`, etc.). | +| **Return value** | `count int` – number of pods considered; `err error` – aggregated errors are not returned, only the first deletion error is logged. | +| **Key dependencies** | • `skipDaemonPod(pod *corev1.Pod) bool` – skips daemonset pods.
• `deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup)` – performs the actual pod deletion and watcher setup.
• `log.Error` – logs deletion errors. | +| **Side effects** | Deletes pods in parallel via goroutines; blocks until all deletions complete (`wg.Wait()`). Logs but does not return individual deletion failures. | +| **How it fits the package** | Used by node‑draining tests to simulate pod recreation: first counts pods without deleting, then deletes them with a chosen mode and waits for completion before verifying cluster readiness. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over pods"} + B -->|"node matches & is deployment/statefulset"| C["Check skipDaemonPod"] + C -->|"true"| D["Continue loop"] + C -->|"false"| E["Increment count"] + E -->|"mode==NoDelete"| D + E -->|"mode!=NoDelete"| F["deletePod (async)"] + F --> G["Log error if any"] + B --> H["End of loop"] + H --> I["wg.Wait()"] + I --> J["Return count, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CountPodsWithDelete --> func_skipDaemonPod + func_CountPodsWithDelete --> func_deletePod + func_CountPodsWithDelete --> func_log.Error +``` + +#### Functions calling `CountPodsWithDelete` (Mermaid) + +```mermaid +graph TD + func_testPodsRecreation --> func_CountPodsWithDelete +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CountPodsWithDelete +package main + +import ( + "fmt" + podrecreation "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podrecreation" + provider "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func main() { + // Assume pods is populated elsewhere + var pods []*provider.Pod + + node := "worker-1" + + // Count pods without deleting + count, err := podrecreation.CountPodsWithDelete(pods, node, podrecreation.NoDelete) + if err != nil { + fmt.Printf("Error counting pods: %v\n", err) + return + } + fmt.Printf("Node %s has %d relevant pods.\n", node, count) + + // Delete pods with foreground mode + _, err = podrecreation.CountPodsWithDelete(pods, node, podrecreation.DeleteForeground) + if err != nil { + fmt.Printf("Error deleting pods: %v\n", err) + } +} +``` + +--- + +## Local Functions + +### deletePod + +**deletePod** - Removes a pod from the cluster. If the deletion mode is not background, it spawns a goroutine that waits for the pod to be fully deleted before signalling completion via the supplied `WaitGroup`. + +Delete a Kubernetes pod and optionally wait for its removal based on the deletion mode. + +```go +func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Removes a pod from the cluster. If the deletion mode is not background, it spawns a goroutine that waits for the pod to be fully deleted before signalling completion via the supplied `WaitGroup`. | +| **Parameters** | *`pod`* *corev1.Pod* – target pod
*`mode`* string – deletion strategy (`DeleteBackground`, `NoDelete`, etc.)
*`wg`* *sync.WaitGroup* – synchronisation aid for callers that need to know when the pod is gone | +| **Return value** | error – non‑nil if any step (watch creation or delete call) fails | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – retrieves shared Kubernetes client
• `log.Debug` / `log.Error` – logging utilities
• `clients.K8sClient.CoreV1().Pods(...).Watch(...)` – sets up a watch for the pod’s deletion event
• `clients.K8sClient.CoreV1().Pods(...).Delete(...)` – performs the actual delete call | +| **Side effects** | • Makes an API request to delete the pod.
• If `mode != DeleteBackground`, starts a goroutine that watches for the deletion event and decrements the `WaitGroup`. | +| **How it fits the package** | Used by higher‑level functions (e.g., `CountPodsWithDelete`) to remove pods during lifecycle tests, ensuring proper cleanup and optional waiting for completion. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Retrieve K8s client"] --> B["Log deletion intent"] + B --> C["Create watch on pod"] + C --> D{"Watch creation ok?"} + D -- no --> E["Return error"] + D -- yes --> F["Delete pod via API"] + F --> G{"Delete ok?"} + G -- no --> H["Log error & return"] + G -- yes --> I{"mode == DeleteBackground?"} + I -- yes --> J["Return nil"] + I -- no --> K["Start goroutine to wait"] + K --> L["waitPodDeleted(namespace, podName, gracePeriodSeconds, watcher)"] + L --> M["wg.Done()"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_deletePod --> func_GetClientsHolder + func_deletePod --> func_Debug + func_deletePod --> func_Watch + func_deletePod --> func_Delete + func_deletePod --> func_Error + func_deletePod --> func_String + func_deletePod --> func_Add + func_deletePod --> func_Done + func_deletePod --> func_waitPodDeleted +``` + +#### Functions calling `deletePod` + +```mermaid +graph TD + func_CountPodsWithDelete --> func_deletePod +``` + +#### Usage example (Go) + +```go +// Minimal example invoking deletePod +package main + +import ( + "sync" + + corev1 "k8s.io/api/core/v1" +) + +func main() { + var wg sync.WaitGroup + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "example-pod", + }, + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: ptr.Int64(30), + }, + } + // Mode can be "DeleteBackground" or any custom string understood by the caller + if err := deletePod(pod, "DeleteForeground", &wg); err != nil { + fmt.Printf("Failed to delete pod: %v\n", err) + return + } + wg.Wait() // Wait until the pod is fully deleted (if not background mode) +} +``` + +--- + +### skipDaemonPod + +**skipDaemonPod** - Determines whether a pod is owned by a DaemonSet and should therefore be exempt from deletion during recreation tests. + +#### Signature (Go) + +```go +func skipDaemonPod(pod *corev1.Pod) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether a pod is owned by a DaemonSet and should therefore be exempt from deletion during recreation tests. | +| **Parameters** | `pod` – pointer to a `corev1.Pod`; the pod to inspect. | +| **Return value** | `bool`: `true` if the pod’s owner references include a DaemonSet, otherwise `false`. | +| **Key dependencies** | *Core Kubernetes API types* (`corev1.Pod`, `OwnerReference`). | +| **Side effects** | None; purely read‑only inspection. | +| **How it fits the package** | Used by the pod recreation logic to filter out DaemonSet pods, ensuring only user‑managed workloads are targeted for deletion. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive pod"] --> B{"Inspect OwnerReferences"} + B -->|"Contains DaemonSet"| C["Return true"] + B -->|"No DaemonSet"| D["Return false"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `skipDaemonPod` (Mermaid) + +```mermaid +graph TD + func_CountPodsWithDelete --> func_skipDaemonPod +``` + +#### Usage example (Go) + +```go +// Minimal example invoking skipDaemonPod +import ( + "k8s.io/api/core/v1" +) + +func main() { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example", + OwnerReferences: []metav1.OwnerReference{ + {Kind: "DaemonSet", Name: "ds-example"}, + }, + }, + } + if skipDaemonPod(pod) { + fmt.Println("Skipping pod owned by DaemonSet") + } else { + fmt.Println("Pod can be deleted") + } +} +``` + +--- + +### waitPodDeleted + +**waitPodDeleted** - Monitors the specified Kubernetes namespace for the deletion of a pod named `podName`. If the pod is deleted or the watcher signals a deletion event, the function returns. It also times out after `timeout` seconds if no deletion occurs. + +#### 1) Signature (Go) + +```go +func waitPodDeleted(ns string, podName string, timeout int64, watcher watch.Interface) +``` + +--- + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Monitors the specified Kubernetes namespace for the deletion of a pod named `podName`. If the pod is deleted or the watcher signals a deletion event, the function returns. It also times out after `timeout` seconds if no deletion occurs. | +| **Parameters** | *`ns string` – Namespace containing the pod.
* `podName string` – Name of the pod to watch for deletion.
*`timeout int64` – Maximum number of seconds to wait before timing out.
* `watcher watch.Interface` – A Kubernetes watch interface that streams pod events. | +| **Return value** | None (void). The function signals completion by returning or timing out. | +| **Key dependencies** | • `log.Debug` and `log.Info` from the internal logging package
• `time.After`, `time.Duration` for timeout handling
• `watch.Interface.ResultChan()` to receive watch events | +| **Side effects** | *Stops the provided watcher (`watcher.Stop()`).
* Logs debug and info messages via the logger.
* No mutation of external state; only logs and internal control flow. | +| **How it fits the package** | In `podrecreation`, this helper is invoked after initiating a pod deletion to asynchronously wait for confirmation that the pod has been removed, ensuring subsequent tests run against the correct cluster state. | + +--- + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start["Start"] --> LogEnter["log.Debug: entering waitPodDeleted"] + LogEnter --> Loop["for { select }"] + Loop --> WatchEvent{"event := <-watcher.ResultChan()"} + WatchEvent -->|"event.Type == Deleted"| LogDelete["log.Debug: pod deleted"] --> Return["return"] + WatchEvent -->|"other event"| Loop + Loop --> TimeoutEvent{"<- time.After(timeout)"} + TimeoutEvent -->|"timeout reached"| LogTimeout["log.Info: timed out"] --> Return +``` + +--- + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_waitPodDeleted --> Logger.Debug + func_waitPodDeleted --> Watcher.ResultChan + func_waitPodDeleted --> time.After + func_waitPodDeleted --> time.Duration + func_waitPodDeleted --> Logger.Info +``` + +--- + +#### 5) Functions calling `waitPodDeleted` (Mermaid) + +```mermaid +graph TD + deletePod --> waitPodDeleted +``` + +--- + +#### 6) Usage example (Go) + +```go +// Minimal example invoking waitPodDeleted +import ( + "k8s.io/apimachinery/pkg/watch" +) + +// Assume watcher is already created for the pod we want to monitor. +var ns = "default" +var podName = "example-pod" +var timeoutSeconds int64 = 30 +var watcher watch.Interface // obtained from a Kubernetes client + +waitPodDeleted(ns, podName, timeoutSeconds, watcher) +``` + +--- + +--- diff --git a/docs/tests/lifecycle/podsets/podsets.md b/docs/tests/lifecycle/podsets/podsets.md new file mode 100644 index 000000000..f34480e09 --- /dev/null +++ b/docs/tests/lifecycle/podsets/podsets.md @@ -0,0 +1,695 @@ +# Package podsets + +**Path**: `tests/lifecycle/podsets` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [GetAllNodesForAllPodSets](#getallnodesforallpodsets) + - [WaitForAllPodSetsReady](#waitforallpodsetsready) + - [WaitForStatefulSetReady](#waitforstatefulsetready) +- [Local Functions](#local-functions) + - [getDeploymentsInfo](#getdeploymentsinfo) + - [getNotReadyDeployments](#getnotreadydeployments) + - [getNotReadyStatefulSets](#getnotreadystatefulsets) + - [getStatefulSetsInfo](#getstatefulsetsinfo) + - [isDeploymentReady](#isdeploymentready) + - [isStatefulSetReady](#isstatefulsetready) + +## Overview + +The podsets package provides utilities for monitoring and waiting on Kubernetes Deployment and StatefulSet resources within test environments, ensuring that pods are correctly scheduled and ready before proceeding. + +### Key Features + +- Retrieves all nodes hosting ReplicaSet or StatefulSet pods +- Polls the cluster until Deployments and StatefulSets become ready or a timeout occurs +- Convenience functions to wait for individual StatefulSet readiness + +### Design Notes + +- Assumes provider package supplies status checks; errors logged but not propagated +- Limits polling granularity to Sleep intervals; may miss rapid state changes +- Best practice: use exported WaitFor… functions rather than internal helpers directly + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GetAllNodesForAllPodSets(pods []*provider.Pod) (nodes map[string]bool)](#getallnodesforallpodsets) | Returns a map of node names that host at least one pod whose owner is a ReplicaSet or StatefulSet. The boolean value in the map is always `true`. | +| [func WaitForAllPodSetsReady(env *provider.TestEnvironment, timeout time.Duration, logger *log.Logger) ([]*provider.Deployment, []*provider.StatefulSet)](#waitforallpodsetsready) | Repeatedly polls a test environment until all Deployments and StatefulSets are ready or a timeout expires. Returns the remaining not‑ready items. | +| [func WaitForStatefulSetReady(ns string, name string, timeout time.Duration, logger *log.Logger) bool](#waitforstatefulsetready) | Polls the Kubernetes API for a StatefulSet’s status and returns `true` when it becomes ready or `false` if the timeout is reached. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func getDeploymentsInfo(deployments []*provider.Deployment) []string](#getdeploymentsinfo) | Converts each `*provider.Deployment` into a string formatted as `:` and returns the collection. | +| [func getNotReadyDeployments(deployments []*provider.Deployment) []*provider.Deployment](#getnotreadydeployments) | Iterates over a slice of `Deployment` objects, checks each one's readiness via `isDeploymentReady`, and returns only those that are not ready or whose status could not be determined. | +| [func getNotReadyStatefulSets(statefulSets []*provider.StatefulSet) []*provider.StatefulSet](#getnotreadystatefulsets) | Iterates over a slice of `*provider.StatefulSet`, determines readiness via `isStatefulSetReady`, and returns only those that are not ready or whose status could not be retrieved. | +| [func getStatefulSetsInfo(statefulSets []*provider.StatefulSet) []string](#getstatefulsetsinfo) | Produces a slice of strings, each formatted as `namespace:name`, representing the location and identity of given StatefulSet resources. | +| [func isDeploymentReady(name, namespace string) (bool, error)](#isdeploymentready) | Determines if the specified Deployment has reached a ready state by delegating to provider logic. | +| [func isStatefulSetReady(name, namespace string) (bool, error)](#isstatefulsetready) | Determines readiness of a Kubernetes StatefulSet by retrieving its latest state and evaluating its status. | + +## Exported Functions + +### GetAllNodesForAllPodSets + +**GetAllNodesForAllPodSets** - Returns a map of node names that host at least one pod whose owner is a ReplicaSet or StatefulSet. The boolean value in the map is always `true`. + +#### Signature (Go) + +```go +func GetAllNodesForAllPodSets(pods []*provider.Pod) (nodes map[string]bool) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a map of node names that host at least one pod whose owner is a ReplicaSet or StatefulSet. The boolean value in the map is always `true`. | +| **Parameters** | `pods []*provider.Pod` – slice of all pods to inspect. | +| **Return value** | `nodes map[string]bool` – keys are node names; values are `true`. | +| **Key dependencies** | *`make` (to allocate the map)
* Iteration over pod and owner reference data structures | +| **Side effects** | None: pure function, no external state changes or I/O. | +| **How it fits the package** | Provides a helper for other tests that need to know which nodes contain pods from ReplicaSet or StatefulSet workloads (e.g., node draining tests). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Allocate empty map `nodes`"] + B --> C{"For each pod in `pods`"} + C --> D{"For each owner reference of pod"} + D -->|"Kind is ReplicaSet or StatefulSet"| E["Add pod.Spec.NodeName to `nodes`, set value true, break inner loop"] + D -->|"Other kind"| F["Continue to next owner reference"] + C --> G["Return `nodes`"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetAllNodesForAllPodSets --> func_make +``` + +#### Functions calling `GetAllNodesForAllPodSets` (Mermaid) + +```mermaid +graph TD + testPodsRecreation --> func_GetAllNodesForAllPodSets +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetAllNodesForAllPodSets +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/provider" +) + +func main() { + // Assume pods is a slice of *provider.Pod populated elsewhere + var pods []*provider.Pod + + nodes := podsets.GetAllNodesForAllPodSets(pods) + for nodeName := range nodes { + fmt.Println("Node hosting RS/STS pod:", nodeName) + } +} +``` + +--- + +### WaitForAllPodSetsReady + +**WaitForAllPodSetsReady** - Repeatedly polls a test environment until all Deployments and StatefulSets are ready or a timeout expires. Returns the remaining not‑ready items. + +#### Signature (Go) + +```go +func WaitForAllPodSetsReady(env *provider.TestEnvironment, timeout time.Duration, logger *log.Logger) ([]*provider.Deployment, []*provider.StatefulSet) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Repeatedly polls a test environment until all Deployments and StatefulSets are ready or a timeout expires. Returns the remaining not‑ready items. | +| **Parameters** | `env *provider.TestEnvironment` – collection of podsets to check.
`timeout time.Duration` – maximum wait period.
`logger *log.Logger` – logger for progress and error reporting. | +| **Return value** | Two slices: deployments that never reached readiness, and StatefulSets that remained not ready after the timeout. | +| **Key dependencies** | `time.Sleep`, `time.Now`, `time.Since`, `time.Duration`; helper functions `getDeploymentsInfo`, `getStatefulSetsInfo`, `getNotReadyDeployments`, `getNotReadyStatefulSets`. | +| **Side effects** | Periodic logging; no mutation of the environment or podsets. | +| **How it fits the package** | Provides a blocking readiness check used by lifecycle tests to ensure resources are stable before proceeding with operations such as node draining or pod recreation. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Log initial wait message"] + B --> C{"Loop until timeout"} + C -->|"Timeout not reached"| D["Query Deployments readiness"] + D --> E["Record not‑ready deployments"] + E --> F["Query StatefulSets readiness"] + F --> G["Record not‑ready statefulsets"] + G --> H["Log current status"] + H --> I{"All ready?"} + I -- Yes --> J["Break loop"] + I -- No --> K["Sleep queryInterval"] + K --> C + J --> L["Log final non‑ready lists"] + L --> M["Return remaining deployments, statefulsets"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_WaitForAllPodSetsReady --> func_getDeploymentsInfo + func_WaitForAllPodSetsReady --> func_getStatefulSetsInfo + func_WaitForAllPodSetsReady --> func_getNotReadyDeployments + func_WaitForAllPodSetsReady --> func_getNotReadyStatefulSets +``` + +#### Functions calling `WaitForAllPodSetsReady` (Mermaid) + +```mermaid +graph TD + func_testPodsRecreation --> func_WaitForAllPodSetsReady +``` + +#### Usage example (Go) + +```go +// Minimal example invoking WaitForAllPodSetsReady +env := provider.NewTestEnvironment() +timeout := 5 * time.Minute +logger := log.Default() + +notReadyDeps, notReadySts := podsets.WaitForAllPodSetsReady(env, timeout, logger) +if len(notReadyDeps) > 0 || len(notReadySts) > 0 { + fmt.Println("Some podsets never became ready") +} +``` + +--- + +### WaitForStatefulSetReady + +**WaitForStatefulSetReady** - Polls the Kubernetes API for a StatefulSet’s status and returns `true` when it becomes ready or `false` if the timeout is reached. + +Checks a StatefulSet in the specified namespace until it reports readiness or a timeout expires, logging progress throughout. + +#### Signature (Go) + +```go +func WaitForStatefulSetReady(ns string, name string, timeout time.Duration, logger *log.Logger) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Polls the Kubernetes API for a StatefulSet’s status and returns `true` when it becomes ready or `false` if the timeout is reached. | +| **Parameters** | `ns string – namespace of the StatefulSet`, `name string – name of the StatefulSet`, `timeout time.Duration – maximum wait time`, `logger *log.Logger – logger for debug/info/error messages`. | +| **Return value** | `bool – true if ready, false otherwise`. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• `time.Now()`, `time.Since()`, `time.Sleep()`
• `provider.GetUpdatedStatefulset()`
• `StatefulSet.IsStatefulSetReady()`
• Logger methods (`Debug`, `Info`, `Error`). | +| **Side effects** | • Makes repeated API calls to fetch the StatefulSet.
• Emits log messages; no mutation of external state. | +| **How it fits the package** | Utility function used by scaling helpers to ensure a StatefulSet is fully operational after changes. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start["Start"] --> LogDebug["logger.Debug"] + LogDebug --> GetClients["clientsholder.GetClientsHolder()"] + GetClients --> Loop{"time.Since(start) < timeout"} + Loop -- true --> GetSS["provider.GetUpdatedStatefulset()"] + GetSS -- error --> LogError["logger.Error"] + GetSS -- ready? --> Ready["ss.IsStatefulSetReady()"] + Ready -- true --> LogInfo["logger.Info"] --> ReturnTrue("return true") + Ready -- false --> Sleep["time.Sleep(1s)"] --> Loop + Loop -- false --> LogErrOut["logger.Error"] --> ReturnFalse("return false") +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + WaitForStatefulSetReady --> GetClientsHolder + WaitForStatefulSetReady --> time.Now + WaitForStatefulSetReady --> provider.GetUpdatedStatefulset + WaitForStatefulSetReady --> StatefulSet.IsStatefulSetReady + WaitForStatefulSetReady --> time.Sleep + WaitForStatefulSetReady --> logger.Debug + WaitForStatefulSetReady --> logger.Info + WaitForStatefulSetReady --> logger.Error +``` + +#### Functions calling `WaitForStatefulSetReady` (Mermaid) + +```mermaid +graph TD + scaleHpaStatefulSetHelper --> WaitForStatefulSetReady + scaleStatefulsetHelper --> WaitForStatefulSetReady +``` + +#### Usage example (Go) + +```go +// Minimal example invoking WaitForStatefulSetReady +import ( + "log" + "time" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets" +) + +func main() { + logger := log.New(os.Stdout, "", log.LstdFlags) + ready := podsets.WaitForStatefulSetReady("default", "my-app", 5*time.Minute, logger) + if !ready { + // handle timeout + } +} +``` + +--- + +## Local Functions + +### getDeploymentsInfo + +**getDeploymentsInfo** - Converts each `*provider.Deployment` into a string formatted as `:` and returns the collection. + +#### Signature (Go) + +```go +func getDeploymentsInfo(deployments []*provider.Deployment) []string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Converts each `*provider.Deployment` into a string formatted as `:` and returns the collection. | +| **Parameters** | `deployments []*provider.Deployment` – slice of deployment pointers to process. | +| **Return value** | `[]string` – ordered list of “namespace:name” identifiers. | +| **Key dependencies** | • `append` (built‑in)
• `fmt.Sprintf` from the standard library | +| **Side effects** | None; pure function that only reads input and returns new data. | +| **How it fits the package** | Used by higher‑level test utilities to log or compare deployment identities during readiness checks. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over deployments"} + B --> C["Format namespace:name"] + C --> D["Append to result slice"] + D --> E["Return result slice"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getDeploymentsInfo --> fmt.Sprintf + func_getDeploymentsInfo --> append +``` + +#### Functions calling `getDeploymentsInfo` (Mermaid) + +```mermaid +graph TD + func_WaitForAllPodSetsReady --> func_getDeploymentsInfo +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getDeploymentsInfo +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/provider" +) + +func main() { + // Sample deployments (normally provided by the test environment) + deployments := []*provider.Deployment{ + {Namespace: "tnf", Name: "test"}, + {Namespace: "tnf", Name: "hazelcast-platform-controller-manager"}, + } + + info := podsets.GetDeploymentsInfo(deployments) // note: function is unexported; in real use it would be called within the package + fmt.Println(info) +} +``` + +--- + +### getNotReadyDeployments + +**getNotReadyDeployments** - Iterates over a slice of `Deployment` objects, checks each one's readiness via `isDeploymentReady`, and returns only those that are not ready or whose status could not be determined. + +#### Signature (Go) + +```go +func getNotReadyDeployments(deployments []*provider.Deployment) []*provider.Deployment +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over a slice of `Deployment` objects, checks each one's readiness via `isDeploymentReady`, and returns only those that are not ready or whose status could not be determined. | +| **Parameters** | `deployments []*provider.Deployment` – list to evaluate. | +| **Return value** | `[]*provider.Deployment` – subset containing non‑ready deployments. | +| **Key dependencies** | *`isDeploymentReady(name, namespace)` – determines readiness.
* `log.Error`, `log.Debug` – logging side effects.
*`append` – slice manipulation.
* `dep.ToString()` – string representation for logs. | +| **Side effects** | Logs errors and debug messages; does not modify input slice or external state. | +| **How it fits the package** | Used by the pod‑set readiness checker (`WaitForAllPodSetsReady`) to iteratively filter out ready deployments until all are confirmed ready or a timeout occurs. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Loop over deployments"} + B -->|"dep"| C["Call isDeploymentReady(dep.Name, dep.Namespace)"] + C --> D{"Error?"} + D -->|"Yes"| E["Log error + mark as not ready"] + D -->|"No"| F{"Ready?"} + F -->|"Yes"| G["Log debug – ready"] + F -->|"No"| H["Mark as not ready"] + H --> I["Append to result"] + G --> I + B --> J["Return result"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getNotReadyDeployments --> func_isDeploymentReady + func_getNotReadyDeployments --> log.Error + func_getNotReadyDeployments --> log.Debug + func_getNotReadyDeployments --> ToString + func_getNotReadyDeployments --> append +``` + +#### Functions calling `getNotReadyDeployments` (Mermaid) + +```mermaid +graph TD + WaitForAllPodSetsReady --> getNotReadyDeployments +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getNotReadyDeployments +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func main() { + // Assume env.Deployments is already populated + var deployments []*provider.Deployment + + notReady := podsets.GetNotReadyDeployments(deployments) // note: function is unexported; normally called within the package + _ = notReady // use result as needed +} +``` + +--- + +### getNotReadyStatefulSets + +**getNotReadyStatefulSets** - Iterates over a slice of `*provider.StatefulSet`, determines readiness via `isStatefulSetReady`, and returns only those that are not ready or whose status could not be retrieved. + +#### Signature (Go) + +```go +func getNotReadyStatefulSets(statefulSets []*provider.StatefulSet) []*provider.StatefulSet +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over a slice of `*provider.StatefulSet`, determines readiness via `isStatefulSetReady`, and returns only those that are not ready or whose status could not be retrieved. | +| **Parameters** | `statefulSets []*provider.StatefulSet` – list of StatefulSets to evaluate. | +| **Return value** | `[]*provider.StatefulSet` – the subset of input StatefulSets that are not ready or failed to query. | +| **Key dependencies** | • `isStatefulSetReady(name, namespace string) (bool, error)`
• Logging via `log.Error`, `log.Debug` | +| **Side effects** | Emits debug/error logs; no mutation of input slice. | +| **How it fits the package** | Used by `WaitForAllPodSetsReady` to repeatedly poll podset readiness during test setup. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["getNotReadyStatefulSets"] --> B["Initialize empty notReady list"] + B --> C{"Iterate over input slice"} + C -->|"for each sts"| D["Call isStatefulSetReady(sts.Name, sts.Namespace)"] + D --> E{"Error?"} + E -- Yes --> F["Log error, add sts to notReady"] + E -- No --> G{"Ready?"} + G -- True --> H["Log ready message"] + G -- False --> I["Add sts to notReady"] + I --> J + H --> J + J --> K["Return notReady list"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getNotReadyStatefulSets --> func_isStatefulSetReady + func_getNotReadyStatefulSets --> log.Error + func_getNotReadyStatefulSets --> log.Debug +``` + +#### Functions calling `getNotReadyStatefulSets` (Mermaid) + +```mermaid +graph TD + func_WaitForAllPodSetsReady --> func_getNotReadyStatefulSets +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getNotReadyStatefulSets +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func example() { + // Assume stsList is obtained from the test environment. + var stsList []*provider.StatefulSet + notReady := podsets.getNotReadyStatefulSets(stsList) + fmt.Printf("Found %d not‑ready StatefulSets\n", len(notReady)) +} +``` + +--- + +--- + +### getStatefulSetsInfo + +**getStatefulSetsInfo** - Produces a slice of strings, each formatted as `namespace:name`, representing the location and identity of given StatefulSet resources. + +#### Signature (Go) + +```go +func getStatefulSetsInfo(statefulSets []*provider.StatefulSet) []string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a slice of strings, each formatted as `namespace:name`, representing the location and identity of given StatefulSet resources. | +| **Parameters** | `statefulSets []*provider.StatefulSet` – collection of StatefulSet pointers to inspect. | +| **Return value** | `[]string` – list of namespace‑name pairs for all provided StatefulSets. | +| **Key dependencies** | • `append` (builtin)
• `fmt.Sprintf` from the standard library | +| **Side effects** | None; purely functional, no mutation of inputs or external state. | +| **How it fits the package** | Utility used by higher‑level monitoring functions to log and report readiness status of StatefulSets during test lifecycle. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> Iterate["Iterate over each StatefulSet"] + Iterate --> Format["Sprintf(%s:%s, Namespace, Name)"] + Format --> Append["Append formatted string to result slice"] + Append --> End["Return the slice of strings"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getStatefulSetsInfo --> fmt.Sprintf + func_getStatefulSetsInfo --> append +``` + +#### Functions calling `getStatefulSetsInfo` (Mermaid) + +```mermaid +graph TD + func_WaitForAllPodSetsReady --> func_getStatefulSetsInfo +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getStatefulSetsInfo +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets" + "github.com/redhat-best-practices-for-k8s/certsuite/provider" +) + +func main() { + var sts []*provider.StatefulSet + // sts would be populated from a Kubernetes client or test fixture + info := podsets.getStatefulSetsInfo(sts) + for _, s := range info { + fmt.Println(s) // prints namespace:name for each StatefulSet + } +} +``` + +--- + +### isDeploymentReady + +**isDeploymentReady** - Determines if the specified Deployment has reached a ready state by delegating to provider logic. + +#### 1) Signature (Go) + +```go +func isDeploymentReady(name, namespace string) (bool, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if the specified Deployment has reached a ready state by delegating to provider logic. | +| **Parameters** | `name` – deployment name; `namespace` – namespace containing the deployment. | +| **Return value** | `bool` – true if the deployment is ready; `error` – any error encountered while retrieving or evaluating the deployment. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• `k8s.io/client-go/kubernetes/typed/apps/v1.AppsV1Interface` (AppsV1 API)
• `provider.GetUpdatedDeployment()`
• `provider.Deployment.IsDeploymentReady()` | +| **Side effects** | None; performs read‑only operations against the Kubernetes cluster. | +| **How it fits the package** | Utility used by test helpers to filter deployments that are not yet ready during lifecycle tests. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Retrieve AppsV1 client"} + B --> C["get ClientsHolder"] + C --> D["AppsV1 API"] + D --> E{"Get Deployment"} + E --> F["provider.GetUpdatedDeployment"] + F --> G{"Check readiness"} + G --> H["Return IsDeploymentReady result"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_isDeploymentReady --> AppsV1 + func_isDeploymentReady --> func_GetClientsHolder + func_isDeploymentReady --> func_GetUpdatedDeployment + func_isDeploymentReady --> func_IsDeploymentReady +``` + +#### 5) Functions calling `isDeploymentReady` (Mermaid) + +```mermaid +graph TD + func_getNotReadyDeployments --> func_isDeploymentReady +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking isDeploymentReady +ready, err := podsets.isDeploymentReady("my-app", "default") +if err != nil { + log.Fatalf("failed to check deployment readiness: %v", err) +} +fmt.Printf("Deployment ready: %t\n", ready) +``` + +--- + +### isStatefulSetReady + +**isStatefulSetReady** - Determines readiness of a Kubernetes StatefulSet by retrieving its latest state and evaluating its status. + +Checks whether a StatefulSet identified by name and namespace is ready in the cluster. + +```go +func isStatefulSetReady(name, namespace string) (bool, error) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines readiness of a Kubernetes StatefulSet by retrieving its latest state and evaluating its status. | +| **Parameters** | `name` string – name of the StatefulSet; `namespace` string – namespace containing the StatefulSet. | +| **Return value** | `bool` – true if the StatefulSet is ready, false otherwise; `error` – non‑nil on retrieval or evaluation failure. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• `provider.GetUpdatedStatefulset(appsV1Api, namespace, name)`
• `sts.IsStatefulSetReady()` method of the returned StatefulSet | +| **Side effects** | None (pure function). | +| **How it fits the package** | Used by test helpers to filter ready vs. not‑ready StatefulSets during lifecycle tests. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Get K8s AppsV1 client"] --> B["Retrieve updated StatefulSet"] + B --> C["Check readiness via IsStatefulSetReady()"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_isStatefulSetReady --> func_GetClientsHolder + func_isStatefulSetReady --> func_GetUpdatedStatefulset + func_IsStatefulSetReady --> func_isStatefulSetReady +``` + +#### Functions calling `isStatefulSetReady` + +```mermaid +graph TD + func_getNotReadyStatefulSets --> func_isStatefulSetReady +``` + +#### Usage example (Go) + +```go +// Minimal example invoking isStatefulSetReady +ready, err := podsets.isStatefulSetReady("my-statefulset", "default") +if err != nil { + log.Fatalf("Failed to check readiness: %v", err) +} +fmt.Printf("StatefulSet ready: %t\n", ready) +``` + +--- diff --git a/docs/tests/lifecycle/scaling/scaling.md b/docs/tests/lifecycle/scaling/scaling.md new file mode 100644 index 000000000..119abdb6f --- /dev/null +++ b/docs/tests/lifecycle/scaling/scaling.md @@ -0,0 +1,1479 @@ +# Package scaling + +**Path**: `tests/lifecycle/scaling` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [CheckOwnerReference](#checkownerreference) + - [GetResourceHPA](#getresourcehpa) + - [IsManaged](#ismanaged) + - [TestScaleCrd](#testscalecrd) + - [TestScaleDeployment](#testscaledeployment) + - [TestScaleHPACrd](#testscalehpacrd) + - [TestScaleHpaDeployment](#testscalehpadeployment) + - [TestScaleHpaStatefulSet](#testscalehpastatefulset) + - [TestScaleStatefulSet](#testscalestatefulset) +- [Local Functions](#local-functions) + - [scaleCrHelper](#scalecrhelper) + - [scaleDeploymentHelper](#scaledeploymenthelper) + - [scaleHpaCRDHelper](#scalehpacrdhelper) + - [scaleHpaDeploymentHelper](#scalehpadeploymenthelper) + - [scaleHpaStatefulSetHelper](#scalehpastatefulsethelper) + - [scaleStatefulsetHelper](#scalestatefulsethelper) + +## Overview + +The scaling package provides utilities and tests for verifying that Kubernetes resources—Deployments, StatefulSets, and Custom Resources—can be scaled up and down reliably, both with and without Horizontal Pod Autoscalers. + +### Key Features + +- Comprehensive test functions (e.g., TestScaleDeployment, TestScaleStatefulSet) that perform real scaling operations and validate readiness +- Helper routines that adjust replica counts while handling conflicts and waiting for pod or HPA stabilization +- Owner‑reference filtering to ensure only scalable CRDs are targeted + +### Design Notes + +- Scaling helpers retry on conflict to cope with concurrent updates; they block until the target resource reaches the desired state +- Tests operate against live clusters, so they require appropriate RBAC permissions and may be slow; use in integration or end‑to‑end test suites +- Owner reference checks rely on a filter list of CRDs marked as scalable; missing entries will cause tests to skip scaling for that CRD + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func CheckOwnerReference(ownerReference []apiv1.OwnerReference, crdFilter []configuration.CrdFilter, crds []*apiextv1.CustomResourceDefinition) bool](#checkownerreference) | Evaluates whether all owner references of a Kubernetes object refer to CRDs that are marked as scalable in the supplied filter list. Returns `true` if at least one reference matches a scalable CRD; otherwise, returns `false`. | +| [func GetResourceHPA(hpaList []*scalingv1.HorizontalPodAutoscaler, name, namespace, kind string) *scalingv1.HorizontalPodAutoscaler](#getresourcehpa) | Searches a list of `HorizontalPodAutoscaler` objects for one that targets the specified resource (`name`, `namespace`, and `kind`) and returns it. | +| [func IsManaged(podSetName string, managedPodSet []configuration.ManagedDeploymentsStatefulsets) bool](#ismanaged) | Returns `true` if the supplied pod set name is present in the provided slice of `ManagedDeploymentsStatefulsets`; otherwise returns `false`. | +| [func (*provider.CrScale, schema.GroupResource, time.Duration, *log.Logger) bool](#testscalecrd) | Attempts to scale a Custom Resource up and down by one replica, verifying that scaling operations succeed. | +| [func TestScaleDeployment(deployment *appsv1.Deployment, timeout time.Duration, logger *log.Logger) bool](#testscaledeployment) | Determines whether a Kubernetes `Deployment` can be scaled up and down reliably when not managed by a Horizontal Pod Autoscaler. It performs a scale‑up followed by a scale‑down (or vice versa) and reports success or failure. | +| [func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscaler, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool](#testscalehpacrd) | Validates that a CR can be scaled via its HPA. It performs an up‑scale followed by a down‑scale (or vice versa), then restores the original HPA limits. | +| [func (*provider.Deployment, *v1autoscaling.HorizontalPodAutoscaler, time.Duration, *log.Logger) bool](#testscalehpadeployment) | Verifies that a Deployment managed by an HPA can be scaled up and down correctly while maintaining its desired replica count. | +| [func TestScaleHpaStatefulSet(statefulset *appsv1.StatefulSet, hpa *v1autoscaling.HorizontalPodAutoscaler, timeout time.Duration, logger *log.Logger) bool](#testscalehpastatefulset) | Attempts to scale a StatefulSet up and down using its Horizontal Pod Autoscaler (HPA), ensuring the HPA’s `minReplicas`/`maxReplicas` are respected and that the StatefulSet becomes ready after each operation. Returns `true` if all scaling steps succeed. | +| [func TestScaleStatefulSet(statefulset *appsv1.StatefulSet, timeout time.Duration, logger *log.Logger) bool](#testscalestatefulset) | Performs a basic scale test on a StatefulSet: scales it up if replicas ≤ 1, otherwise scales down. After each operation it waits for readiness via `scaleStatefulsetHelper`. Returns `true` only when both operations succeed. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func(scale.ScalesGetter, schema.GroupResource, *provider.CrScale, int32, bool, time.Duration, *log.Logger) bool](#scalecrhelper) | Adjusts the replica count of a CRD instance and waits for the scaling operation to complete. | +| [func scaleDeploymentHelper( client typedappsv1.AppsV1Interface, deployment *appsv1.Deployment, replicas int32, timeout time.Duration, up bool, logger *log.Logger, ) bool](#scaledeploymenthelper) | Adjusts a Deployment’s `spec.replicas` to the desired count, handling conflicts via retry and confirming pod readiness within a timeout. | +| [func scaleHpaCRDHelper( hpscaler hps.HorizontalPodAutoscalerInterface, hpaName string, crName string, namespace string, min int32, max int32, timeout time.Duration, groupResourceSchema schema.GroupResource, logger *log.Logger, ) bool](#scalehpacrdhelper) | Updates the `minReplicas` and `maxReplicas` fields of a HorizontalPodAutoscaler (HPA), retries on conflicts, then waits for the associated CustomResource to reach the desired state. Returns `true` if successful. | +| [func scaleHpaDeploymentHelper( hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, deploymentName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger, ) bool](#scalehpadeploymenthelper) | Updates a Horizontal Pod Autoscaler (HPA) to new minimum and maximum replica counts, retries on conflict, then waits until the associated deployment becomes ready. Returns `true` if all steps succeed. | +| [func scaleHpaStatefulSetHelper( hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, statefulsetName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool](#scalehpastatefulsethelper) | Atomically updates a Horizontal Pod Autoscaler's `MinReplicas` and `MaxReplicas`, then waits until the referenced StatefulSet reaches a ready state. Returns `true` on success, `false` otherwise. | +| [func scaleStatefulsetHelper( clients *clientsholder.ClientsHolder, ssClient v1.StatefulSetInterface, statefulset *appsv1.StatefulSet, replicas int32, timeout time.Duration, logger *log.Logger, ) bool](#scalestatefulsethelper) | Scales a StatefulSet to the desired replica count, retrying on conflicts and waiting for readiness. | + +## Exported Functions + +### CheckOwnerReference + +**CheckOwnerReference** - Evaluates whether all owner references of a Kubernetes object refer to CRDs that are marked as scalable in the supplied filter list. Returns `true` if at least one reference matches a scalable CRD; otherwise, returns `false`. + +#### Signature (Go) + +```go +func CheckOwnerReference(ownerReference []apiv1.OwnerReference, crdFilter []configuration.CrdFilter, crds []*apiextv1.CustomResourceDefinition) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Evaluates whether all owner references of a Kubernetes object refer to CRDs that are marked as scalable in the supplied filter list. Returns `true` if at least one reference matches a scalable CRD; otherwise, returns `false`. | +| **Parameters** | - `ownerReference []apiv1.OwnerReference`: Owner references attached to the target object.
- `crdFilter []configuration.CrdFilter`: List of CRD name suffixes and their scalability flag.
- `crds []*apiextv1.CustomResourceDefinition`: All available CRDs in the cluster. | +| **Return value** | `bool` – `true` if a scalable owner reference is found, otherwise `false`. | +| **Key dependencies** | • `strings.HasSuffix` (standard library)
• Types from `apiv1`, `configuration`, and `apiextv1` packages. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by scaling tests (`testDeploymentScaling`, `testStatefulSetScaling`) to skip or flag resources whose ownership cannot be scaled due to non‑scalable CRDs. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate owner refs"} + B -->|"next ref"| C{"Match CRD kind"} + C -->|"found"| D{"Check suffix in filter"} + D -->|"matches"| E["Return true"] + D -->|"no match"| F["Continue loop"] + C -->|"no match"| F + F -->|"done"| G["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CheckOwnerReference --> func_HasSuffix +``` + +#### Functions calling `CheckOwnerReference` (Mermaid) + +```mermaid +graph TD + func_testDeploymentScaling --> func_CheckOwnerReference + func_testStatefulSetScaling --> func_CheckOwnerReference +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CheckOwnerReference +import ( + apiv1 "k8s.io/api/core/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling/..." +) + +func example() { + ownerRefs := []apiv1.OwnerReference{ + {Kind: "Deployment", Name: "my-deploy"}, + } + crdFilter := []configuration.CrdFilter{ + {NameSuffix: "-example.com", Scalable: true}, + } + crds := []*apiextv1.CustomResourceDefinition{ + // populate with CRDs discovered in the cluster + } + + scalable := scaling.CheckOwnerReference(ownerRefs, crdFilter, crds) + if scalable { + fmt.Println("Owner references are scalable") + } else { + fmt.Println("No scalable owner references found") + } +} +``` + +--- + +### GetResourceHPA + +**GetResourceHPA** - Searches a list of `HorizontalPodAutoscaler` objects for one that targets the specified resource (`name`, `namespace`, and `kind`) and returns it. + +#### Signature (Go) + +```go +func GetResourceHPA(hpaList []*scalingv1.HorizontalPodAutoscaler, name, namespace, kind string) *scalingv1.HorizontalPodAutoscaler +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Searches a list of `HorizontalPodAutoscaler` objects for one that targets the specified resource (`name`, `namespace`, and `kind`) and returns it. | +| **Parameters** | - `hpaList []*scalingv1.HorizontalPodAutoscaler`: slice of HPA objects to search.
- `name string`: target resource name.
- `namespace string`: namespace of the target resource.
- `kind string`: kind of the target resource (e.g., `"Deployment"`). | +| **Return value** | `*scalingv1.HorizontalPodAutoscaler`: the matching HPA, or `nil` if none is found. | +| **Key dependencies** | - `scalingv1.HorizontalPodAutoscaler` type from the Kubernetes autoscaling API.
- Basic Go slice iteration and comparison operators. | +| **Side effects** | None – purely functional; does not modify input slices or external state. | +| **How it fits the package** | Provides a helper for test functions that need to determine whether a resource is managed by an HPA before performing scaling tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over hpaList"} + B -->|"hpa.Spec.ScaleTargetRef matches name, namespace, kind"| C["Return hpa"] + B -->|"No match found after loop ends"| D["Return nil"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `GetResourceHPA` (Mermaid) + +```mermaid +graph TD + func_testDeploymentScaling --> GetResourceHPA + func_testScaleCrd --> GetResourceHPA + func_testStatefulSetScaling --> GetResourceHPA +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetResourceHPA +package main + +import ( + "fmt" + + scalingv1 "k8s.io/api/autoscaling/v2beta2" +) + +func main() { + // Example HPA slice (normally obtained from the cluster) + hpaList := []*scalingv1.HorizontalPodAutoscaler{ + &scalingv1.HorizontalPodAutoscaler{Spec: scalingv1.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: scalingv1.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "frontend", + }, + }}, + } + + // Look for an HPA controlling the "frontend" Deployment in "default" + hpa := GetResourceHPA(hpaList, "frontend", "default", "Deployment") + if hpa != nil { + fmt.Println("Found HPA:", hpa.Name) + } else { + fmt.Println("No matching HPA found.") + } +} +``` + +--- + +### IsManaged + +**IsManaged** - Returns `true` if the supplied pod set name is present in the provided slice of `ManagedDeploymentsStatefulsets`; otherwise returns `false`. + +Checks whether a given pod set name appears in a list of managed deployments or statefulsets. + +--- + +#### Signature (Go) + +```go +func IsManaged(podSetName string, managedPodSet []configuration.ManagedDeploymentsStatefulsets) bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` if the supplied pod set name is present in the provided slice of `ManagedDeploymentsStatefulsets`; otherwise returns `false`. | +| **Parameters** | `podSetName string` – name to look for.
`managedPodSet []configuration.ManagedDeploymentsStatefulsets` – slice containing known managed objects. | +| **Return value** | `bool` – `true` when a match is found, `false` otherwise. | +| **Key dependencies** | • Accesses the `Name` field of each element in `managedPodSet`. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by scaling tests to decide whether a deployment or statefulset should be considered for scaling checks. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> Iterate["Iterate over managedPodSet"] + Iterate --> Check{"ps.Name == podSetName"} + Check -- Yes --> ReturnTrue["return true"] + Check -- No --> Next["continue loop"] + Next --> EndLoop["End of slice?"] + EndLoop -- No --> Iterate + EndLoop -- Yes --> ReturnFalse["return false"] +``` + +--- + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_IsManaged --> None +``` + +--- + +#### Functions calling `IsManaged` (Mermaid) + +```mermaid +graph TD + func_testDeploymentScaling --> func_IsManaged + func_testStatefulSetScaling --> func_IsManaged +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking IsManaged +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/configuration" +) + +func main() { + // Example list of managed objects + managed := []configuration.ManagedDeploymentsStatefulsets{ + {Name: "frontend"}, + {Name: "backend"}, + } + + // Check if a pod set is managed + if scaling.IsManaged("frontend", managed) { + fmt.Println("This pod set is managed.") + } else { + fmt.Println("Not a managed pod set.") + } +} +``` + +--- + +### TestScaleCrd + +**TestScaleCrd** - Attempts to scale a Custom Resource up and down by one replica, verifying that scaling operations succeed. + +#### Signature (Go) + +```go +func (*provider.CrScale, schema.GroupResource, time.Duration, *log.Logger) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Attempts to scale a Custom Resource up and down by one replica, verifying that scaling operations succeed. | +| **Parameters** | `crScale` – pointer to the CR being tested.
`groupResourceSchema` – Kubernetes Group‑Resource of the CR.
`timeout` – maximum duration for waiting on scaling completion.
`logger` – logger for diagnostic messages. | +| **Return value** | `bool` – `true` if both scale‑up and scale‑down succeeded; otherwise `false`. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• `scaleCrHelper(...)` (internal helper that performs the actual scaling)
• Methods of `crScale`: `GetName()`, `GetNamespace()`, `Spec.Replicas` | +| **Side effects** | Calls to Kubernetes API via the dynamic scaler; logs errors and status messages. No global state mutation beyond those calls. | +| **How it fits the package** | Provides the core logic used by higher‑level test functions (`testScaleCrd`) to validate that a CR supports scaling without HPA. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check crScale not nil"] --> B{"Replicas <= 1"} + B -- Yes --> C["Increment replicas"] + C --> D["scaleCrHelper(scale up)"] + D --> E{"Success?"} + E -- No --> F["Log error & return false"] + E -- Yes --> G["Decrement replicas"] + G --> H["scaleCrHelper(scale down)"] + H --> I{"Success?"} + I -- No --> J["Log error & return false"] + I -- Yes --> K["Return true"] + B -- No --> L["Decrement replicas"] + L --> M["scaleCrHelper(scale down)"] + M --> N{"Success?"} + N -- No --> O["Log error & return false"] + N -- Yes --> P["Increment replicas"] + P --> Q["scaleCrHelper(scale up)"] + Q --> R{"Success?"} + R -- No --> S["Log error & return false"] + R -- Yes --> K +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestScaleCrd --> clientsholder.GetClientsHolder + func_TestScaleCrd --> scaleCrHelper + func_TestScaleCrd --> crScale.GetName + func_TestScaleCrd --> crScale.GetNamespace +``` + +#### Functions calling `TestScaleCrd` (Mermaid) + +```mermaid +graph TD + scaling.testScaleCrd --> func_TestScaleCrd +``` + +#### Usage example (Go) + +```go +// Minimal example invoking TestScaleCrd +import ( + "time" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" + "k8s.io/apimachinery/pkg/runtime/schema" + log "github.com/sirupsen/logrus" +) + +func main() { + // Assume crScale is obtained from somewhere + var crScale *provider.CrScale + groupRes := schema.GroupResource{Group: "example.com", Resource: "widgets"} + timeout := 30 * time.Second + logger := log.New() + + success := scaling.TestScaleCrd(crScale, groupRes, timeout, logger) + if success { + println("Scaling test passed") + } else { + println("Scaling test failed") + } +} +``` + +--- + +### TestScaleDeployment + +**TestScaleDeployment** - Determines whether a Kubernetes `Deployment` can be scaled up and down reliably when not managed by a Horizontal Pod Autoscaler. It performs a scale‑up followed by a scale‑down (or vice versa) and reports success or failure. + +#### Signature (Go) + +```go +func TestScaleDeployment(deployment *appsv1.Deployment, timeout time.Duration, logger *log.Logger) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether a Kubernetes `Deployment` can be scaled up and down reliably when not managed by a Horizontal Pod Autoscaler. It performs a scale‑up followed by a scale‑down (or vice versa) and reports success or failure. | +| **Parameters** | `deployment *appsv1.Deployment` – the Deployment to test; `timeout time.Duration` – maximum wait for readiness after each scaling operation; `logger *log.Logger` – logger used for informational and error messages. | +| **Return value** | `bool` – `true` if both scale operations succeeded, otherwise `false`. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – obtains Kubernetes client set.
• `scaleDeploymentHelper` – performs the actual scaling logic with retries and readiness checks. | +| **Side effects** | Modifies the number of replicas in the Deployment during the test; logs progress and errors; does not persist changes beyond the test’s scope. | +| **How it fits the package** | Provides core functionality for non‑HPA scaling tests used by higher‑level test orchestration functions such as `testDeploymentScaling`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Determine current replicas"} + B -->|"<=1"| C["Scale up"] + C --> D["Call scaleDeploymentHelper (up)"] + D --> E{"Success?"} + E -->|"No"| F["Log error, return false"] + E -->|"Yes"| G["Scale down"] + G --> H["Call scaleDeploymentHelper (down)"] + H --> I{"Success?"} + I -->|"No"| J["Log error, return false"] + I -->|"Yes"| K["Return true"] + B -->|">1"| L["Scale down"] + L --> M["Call scaleDeploymentHelper (down)"] + M --> N{"Success?"} + N -->|"No"| O["Log error, return false"] + N -->|"Yes"| P["Scale up"] + P --> Q["Call scaleDeploymentHelper (up)"] + Q --> R{"Success?"} + R -->|"No"| S["Log error, return false"] + R -->|"Yes"| T["Return true"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestScaleDeployment --> func_GetClientsHolder + func_TestScaleDeployment --> func_scaleDeploymentHelper +``` + +#### Functions calling `TestScaleDeployment` (Mermaid) + +```mermaid +graph TD + func_testDeploymentScaling --> func_TestScaleDeployment +``` + +#### Usage example (Go) + +```go +// Minimal example invoking TestScaleDeployment +import ( + "time" + logpkg "github.com/redhat-best-practices-for-k8s/certsuite/internal/logging" + appsv1 "k8s.io/api/apps/v1" +) + +func main() { + // Assume deployment is fetched elsewhere + var deployment *appsv1.Deployment + + logger := logpkg.NewLogger() + timeout := 2 * time.Minute + + success := scaling.TestScaleDeployment(deployment, timeout, logger) + if success { + fmt.Println("Scaling test passed") + } else { + fmt.Println("Scaling test failed") + } +} +``` + +--- + +### TestScaleHPACrd + +**TestScaleHPACrd** - Validates that a CR can be scaled via its HPA. It performs an up‑scale followed by a down‑scale (or vice versa), then restores the original HPA limits. + +Performs a scaling test on a Custom Resource (CR) that is managed by a HorizontalPodAutoscaler (HPA). The function scales the CR up and down through the HPA, verifying that the HPA updates correctly and that the CR reaches the desired state. + +--- + +#### Signature (Go) + +```go +func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscaler, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates that a CR can be scaled via its HPA. It performs an up‑scale followed by a down‑scale (or vice versa), then restores the original HPA limits. | +| **Parameters** | `cr *provider.CrScale` – the CR to test.
`hpa *scalingv1.HorizontalPodAutoscaler` – the HPA controlling the CR.
`groupResourceSchema schema.GroupResource` – Kubernetes resource descriptor used for scaling checks.
`timeout time.Duration` – maximum wait time for each scaling operation.
`logger *log.Logger` – logger for debug/error output. | +| **Return value** | `bool` – `true` if all scaling steps succeeded, otherwise `false`. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• `hpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(namespace)`
• `scaleHpaCRDHelper` (internal helper for performing the actual HPA update and wait)
• Logging methods (`logger.Error`, `logger.Debug`) | +| **Side effects** | Calls the Kubernetes API to modify the HPA’s `minReplicas`/`maxReplicas`. No other global state is altered. | +| **How it fits the package** | Part of the `scaling` test suite; used by higher‑level tests (`testScaleCrd`) to verify that CRs with an HPA can be scaled correctly. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check cr != nil"] -->|"false"| B["Log error, return false"] + A -->|"true"| C["Get Kubernetes clients"] + C --> D["Retrieve namespace from CR"] + D --> E["Create HPA client for namespace"] + E --> F["Determine HPA minReplicas (default 1)"] + F --> G["Read CR replicas count"] + G --> H{"replicas <= 1"} + H -->|"yes"| I["Scale UP via scaleHpaCRDHelper"] + I --> J{"success?"} + J -->|"no"| K["Return false"] + J -->|"yes"| L["Scale DOWN via scaleHpaCRDHelper"] + L --> M{"success?"} + M -->|"no"| N["Return false"] + M -->|"yes"| O["Proceed"] + H -->|"no"| P["Scale DOWN via scaleHpaCRDHelper"] + P --> Q{"success?"} + Q -->|"no"| R["Return false"] + Q -->|"yes"| S["Scale UP via scaleHpaCRDHelper"] + S --> T{"success?"} + T -->|"no"| U["Return false"] + T -->|"yes"| O + O --> V["Restore original HPA min/max via scaleHpaCRDHelper"] + V --> W["Return true"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestScaleHPACrd --> clientsholder.GetClientsHolder + func_TestScaleHPACrd --> scalingv1.HorizontalPodAutoscaler.Spec.MinReplicas + func_TestScaleHPACrd --> scaleHpaCRDHelper +``` + +--- + +#### Functions calling `TestScaleHPACrd` (Mermaid) + +```mermaid +graph TD + testScaleCrd --> func_TestScaleHPACrd +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking TestScaleHPACrd +package main + +import ( + "time" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func main() { + cr := &provider.CrScale{ /* populate fields */ } + hpa := &scalingv1.HorizontalPodAutoscaler{ /* populate fields */ } + groupRes := schema.GroupResource{Group: "apps", Resource: "deployments"} + timeout := 5 * time.Minute + logger := log.New(os.Stdout, "", log.LstdFlags) + + success := scaling.TestScaleHPACrd(cr, hpa, groupRes, timeout, logger) + if success { + fmt.Println("Scaling test passed") + } else { + fmt.Println("Scaling test failed") + } +} +``` + +--- + +--- + +### TestScaleHpaDeployment + +**TestScaleHpaDeployment** - Verifies that a Deployment managed by an HPA can be scaled up and down correctly while maintaining its desired replica count. + +#### Signature (Go) + +```go +func (*provider.Deployment, *v1autoscaling.HorizontalPodAutoscaler, time.Duration, *log.Logger) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies that a Deployment managed by an HPA can be scaled up and down correctly while maintaining its desired replica count. | +| **Parameters** | `deployment` – Deployment to test; `hpa` – the owning HorizontalPodAutoscaler; `timeout` – maximum wait for readiness; `logger` – logging helper. | +| **Return value** | `bool` – `true` if all scaling steps succeeded, otherwise `false`. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – retrieves Kubernetes clients.
• `clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(namespace)` – HPA client.
• `scaleHpaDeploymentHelper` – performs the actual scaling and readiness checks. | +| **Side effects** | Modifies the HPA’s `minReplicas` and `maxReplicas`, updates Deployment replica counts, writes log messages, waits for pod set readiness within a timeout. | +| **How it fits the package** | Implements core logic used by higher‑level tests (`testDeploymentScaling`) to confirm that HPA‑controlled workloads behave correctly during scale operations. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Retrieve Kubernetes clients"] --> B{"Determine min replicas"} + B -->|"Non‑nil"| C["min = *hpa.Spec.MinReplicas"] + B -->|"Nil"| D["min = 1"] + C & D --> E["Read current deployment replicas"] + E --> F{"Replica count ≤ 1?"} + F -->|"Yes"| G["Scale UP: set replicas to min+1"] + G --> H["Call scaleHpaDeploymentHelper"] + H --> I{"Success?"} + I -->|"No"| J["Return false"] + I -->|"Yes"| K["Scale DOWN: restore original min/max"] + K --> L["Call scaleHpaDeploymentHelper"] + L --> M{"Success?"} + M -->|"No"| N["Return false"] + M -->|"Yes"| O["Return true"] + F -->|"No"| P["Scale DOWN: replicas‑1"] + P --> Q["Call scaleHpaDeploymentHelper"] + Q --> R{"Success?"} + R -->|"No"| S["Return false"] + R -->|"Yes"| T["Scale UP: replicas+1"] + T --> U["Call scaleHpaDeploymentHelper"] + U --> V{"Success?"} + V -->|"No"| W["Return false"] + V -->|"Yes"| X["Restore min/max via helper"] + X --> Y["Return true"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestScaleHpaDeployment --> func_GetClientsHolder + func_TestScaleHpaDeployment --> func_HorizontalPodAutoscalers + func_TestScaleHpaDeployment --> func_scaleHpaDeploymentHelper +``` + +#### Functions calling `TestScaleHpaDeployment` (Mermaid) + +```mermaid +graph TD + func_testDeploymentScaling --> func_TestScaleHpaDeployment +``` + +#### Usage example (Go) + +```go +// Minimal example invoking TestScaleHpaDeployment +import ( + "time" + log "github.com/sirupsen/logrus" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/provider" + v1autoscaling "k8s.io/api/autoscaling/v1" +) + +func main() { + // Assume deployment and hpa have been fetched from the cluster + var dep *provider.Deployment // Deployment under test + var hpa *v1autoscaling.HorizontalPodAutoscaler // Owning HPA + + logger := log.New() + timeout := 2 * time.Minute + + success := scaling.TestScaleHpaDeployment(dep, hpa, timeout, logger) + if !success { + logger.Error("Scaling test failed") + } else { + logger.Info("Scaling test succeeded") + } +} +``` + +--- + +--- + +### TestScaleHpaStatefulSet + +**TestScaleHpaStatefulSet** - Attempts to scale a StatefulSet up and down using its Horizontal Pod Autoscaler (HPA), ensuring the HPA’s `minReplicas`/`maxReplicas` are respected and that the StatefulSet becomes ready after each operation. Returns `true` if all scaling steps succeed. + +#### Signature (Go) + +```go +func TestScaleHpaStatefulSet(statefulset *appsv1.StatefulSet, hpa *v1autoscaling.HorizontalPodAutoscaler, timeout time.Duration, logger *log.Logger) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Attempts to scale a StatefulSet up and down using its Horizontal Pod Autoscaler (HPA), ensuring the HPA’s `minReplicas`/`maxReplicas` are respected and that the StatefulSet becomes ready after each operation. Returns `true` if all scaling steps succeed. | +| **Parameters** | *statefulset* `*appsv1.StatefulSet` – target StatefulSet.
*hpa* `*v1autoscaling.HorizontalPodAutoscaler` – HPA controlling the StatefulSet.
*timeout* `time.Duration` – maximum wait for readiness after each scale.
*logger* `*log.Logger` – logger for debug and error messages. | +| **Return value** | `bool` – `true` when every scaling operation succeeded, otherwise `false`. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• `clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(namespace)`
• `scaleHpaStatefulSetHelper` (internal retry‑and‑wait helper)
• Kubernetes client-go for HPA get/update and StatefulSet readiness checks | +| **Side effects** | Mutates the HPA’s `Spec.MinReplicas` and `Spec.MaxReplicas`. Triggers actual scaling of the StatefulSet via the HPA. Logs debug/error messages but does not modify global state beyond these operations. | +| **How it fits the package** | Part of the lifecycle scaling tests; used by higher‑level test functions to confirm that an HPA‑managed StatefulSet can be scaled reliably. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Get Kubernetes clients"] --> B["Determine hpaName, namespace"] + B --> C["Retrieve current replicas & min/max from statefulset/HPA"] + C --> D{"replicas ≤ 1?"} + D -- Yes --> E["Scale up to replicas+1 via helper"] + E --> F["Check success"] + F -- No --> G["Return false"] + F -- Yes --> H["Scale down to replicas via helper"] + H --> I["Check success"] + I -- No --> G + I -- Yes --> J["Proceed"] + D -- No --> K["Scale down to replicas-1 via helper"] + K --> L["Check success"] + L -- No --> G + L -- Yes --> M["Scale up to original replicas via helper"] + M --> N["Check success"] + N -- No --> G + N -- Yes --> J + J --> O["Restore HPA min/max via helper"] + O --> P{"Success?"} + P -- Yes --> Q["Return true"] + P -- No --> G +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestScaleHpaStatefulSet --> clientsholder.GetClientsHolder + func_TestScaleHpaStatefulSet --> scaleHpaStatefulSetHelper + func_TestScaleHpaStatefulSet --> log.Logger +``` + +#### Functions calling `TestScaleHpaStatefulSet` (Mermaid) + +```mermaid +graph TD + testStatefulSetScaling --> TestScaleHpaStatefulSet +``` + +#### Usage example (Go) + +```go +// Minimal example invoking TestScaleHpaStatefulSet +import ( + "time" + log "github.com/sirupsen/logrus" + + appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" +) + +func main() { + logger := log.New() + // Assume statefulSet and hpa are obtained from a client or fixture + var statefulSet *appsv1.StatefulSet + var hpa *autoscalingv1.HorizontalPodAutoscaler + + timeout := 2 * time.Minute + success := scaling.TestScaleHpaStatefulSet(statefulSet, hpa, timeout, logger) + if success { + logger.Info("Scaling test passed") + } else { + logger.Error("Scaling test failed") + } +} +``` + +--- + +### TestScaleStatefulSet + +**TestScaleStatefulSet** - Performs a basic scale test on a StatefulSet: scales it up if replicas ≤ 1, otherwise scales down. After each operation it waits for readiness via `scaleStatefulsetHelper`. Returns `true` only when both operations succeed. + +#### Signature (Go) + +```go +func TestScaleStatefulSet(statefulset *appsv1.StatefulSet, timeout time.Duration, logger *log.Logger) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Performs a basic scale test on a StatefulSet: scales it up if replicas ≤ 1, otherwise scales down. After each operation it waits for readiness via `scaleStatefulsetHelper`. Returns `true` only when both operations succeed. | +| **Parameters** | `statefulset *appsv1.StatefulSet` – target object;
`timeout time.Duration` – maximum wait for ready state;
`logger *log.Logger` – logger for debug/error output. | +| **Return value** | `bool` – `true` if scaling succeeded, `false` otherwise. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` to obtain Kubernetes clients.
• `scaleStatefulsetHelper` to perform the actual scale operation and wait for readiness.
• `log.Logger` methods (`Debug`, `Error`). | +| **Side effects** | Modifies the StatefulSet’s replica count in the cluster; writes logs. No other external state is altered. | +| **How it fits the package** | Provides the low‑level scaling routine used by higher‑level tests (e.g., `testStatefulSetScaling`) to verify that a StatefulSet can be scaled when not managed by an HPA. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Get Kubernetes client"] --> B["Retrieve namespace & name"] + B --> C["Determine current replicas"] + C --> D{"replicas <= 1?"} + D -- Yes --> E["Increment replicas"] + E --> F["scaleStatefulsetHelper (up)"] + F --> G{"success?"} + G -- No --> H["Log error, return false"] + G -- Yes --> I["Decrement replicas"] + I --> J["scaleStatefulsetHelper (down)"] + J --> K{"success?"} + K -- No --> L["Log error, return false"] + K -- Yes --> M["Return true"] + D -- No --> N["Decrement replicas"] + N --> O["scaleStatefulsetHelper (down)"] + O --> P{"success?"} + P -- No --> Q["Log error, return false"] + P -- Yes --> R["Increment replicas"] + R --> S["scaleStatefulsetHelper (up)"] + S --> T{"success?"} + T -- No --> U["Log error, return false"] + T -- Yes --> M +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestScaleStatefulSet --> func_GetClientsHolder + func_TestScaleStatefulSet --> func_scaleStatefulsetHelper +``` + +#### Functions calling `TestScaleStatefulSet` (Mermaid) + +```mermaid +graph TD + func_testStatefulSetScaling --> func_TestScaleStatefulSet +``` + +#### Usage example (Go) + +```go +// Minimal example invoking TestScaleStatefulSet +import ( + "log" + "time" + + appsv1 "k8s.io/api/apps/v1" +) + +func main() { + // Assume statefulset is obtained from the cluster + var ss *appsv1.StatefulSet + + logger := log.New(os.Stdout, "", log.LstdFlags) + timeout := 5 * time.Minute + + success := scaling.TestScaleStatefulSet(ss, timeout, logger) + if !success { + log.Println("Scaling test failed") + } else { + log.Println("Scaling test succeeded") + } +} +``` + +--- + +## Local Functions + +### scaleCrHelper + +**scaleCrHelper** - Adjusts the replica count of a CRD instance and waits for the scaling operation to complete. + +#### 1️⃣ Signature + +```go +func(scale.ScalesGetter, schema.GroupResource, *provider.CrScale, int32, bool, time.Duration, *log.Logger) bool +``` + +#### 2️⃣ Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Adjusts the replica count of a CRD instance and waits for the scaling operation to complete. | +| **Parameters** | `scalesGetter` – client providing scale operations
`rc` – resource group and kind (e.g., `"apps/v1", "Deployment"`)
`autoscalerpram` – pointer to the CR object being scaled
`replicas` – target replica count
`up` – flag indicating scaling direction (true for up, false for down)
`timeout` – maximum wait time for scaling to finish
`logger` – logger for debug and error output | +| **Return value** | `bool` – `true` if scaling succeeded; otherwise `false`. | +| **Key dependencies** | • `retry.RetryOnConflict` (exponential back‑off retry)
• `scalesGetter.Scales(...).Get()` & `.Update()` for fetching and updating the scale object
• `podsets.WaitForScalingToComplete()` to poll until desired state is reached | +| **Side effects** | • Mutates the CR’s scale specification via an Update call.
• Logs debug messages and errors; may retry on conflicts. | +| **How it fits the package** | Supports test harness scaling logic by providing a reusable helper for up‑/down scaling of arbitrary CRDs within the `scaling` test suite. | + +#### 3️⃣ Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Scaling direction?"} + B -- Up --> C["Log “Scale UP”"] + B -- Down --> D["Log “Scale DOWN”"] + C & D --> E["RetryOnConflict loop"] + E --> F["Get current scale object"] + F --> G["Set Spec.Replicas = target"] + G --> H["Update scale object"] + H --> I{"Update succeeded?"} + I -- No --> J["Log error, return failure"] + I -- Yes --> K["WaitForScalingToComplete"] + K --> L{"Completed?"} + L -- No --> M["Log error, return failure"] + L -- Yes --> N["Return success"] +``` + +#### 4️⃣ Function dependencies (Mermaid) + +```mermaid +graph TD + func_scaleCrHelper --> func_retry.RetryOnConflict + func_scaleCrHelper --> func_scalesGetter.Scales.Get + func_scaleCrHelper --> func_scalesGetter.Scales.Update + func_scaleCrHelper --> func_podsets.WaitForScalingToComplete +``` + +#### 5️⃣ Functions calling `scaleCrHelper` (Mermaid) + +```mermaid +graph TD + func_TestScaleCrd --> func_scaleCrHelper +``` + +#### 6️⃣ Usage example (Go) + +```go +// Minimal example invoking scaleCrHelper +import ( + "time" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling" + "k8s.io/client-go/util/retry" +) + +func example() { + // Assume `scalesClient` implements scale.ScalesGetter, + // `resource` is a schema.GroupResource, and `cr` is *provider.CrScale. + success := scaling.scaleCrHelper( + scalesClient, + resource, + cr, + 5, // target replicas + true, // scaling up + 30*time.Second, // timeout for wait + logger, // *log.Logger instance + ) + if !success { + fmt.Println("Scaling failed") + } +} +``` + +--- + +--- + +### scaleDeploymentHelper + +**scaleDeploymentHelper** - Adjusts a Deployment’s `spec.replicas` to the desired count, handling conflicts via retry and confirming pod readiness within a timeout. + +#### Signature (Go) + +```go +func scaleDeploymentHelper( + client typedappsv1.AppsV1Interface, + deployment *appsv1.Deployment, + replicas int32, + timeout time.Duration, + up bool, + logger *log.Logger, +) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Adjusts a Deployment’s `spec.replicas` to the desired count, handling conflicts via retry and confirming pod readiness within a timeout. | +| **Parameters** | `client` – Kubernetes AppsV1 client.
`deployment` – Target Deployment object.
`replicas` – Desired replica count.
`timeout` – Max duration to wait for pods to become ready.
`up` – Flag indicating whether scaling up or down (used only for logging).
`logger` – Logger for informational and error messages. | +| **Return value** | `bool` – `true` if the scale operation succeeded; `false` otherwise. | +| **Key dependencies** | • `retry.RetryOnConflict` from `k8s.io/client-go/util/retry`
• Deployment `Get` and `Update` via the client
• `podsets.WaitForDeploymentSetReady` to verify readiness
• Standard Go packages: `context`, `errors` | +| **Side effects** | Mutates the Deployment’s replica count in the cluster; performs network I/O to Kubernetes API; logs informational or error messages. | +| **How it fits the package** | Provides a low‑level helper used by higher‑level tests (e.g., `TestScaleDeployment`) to validate scaling behaviour without HPA. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Scaling direction"} + B -- up --> C["Log “Scale UP”"] + B -- down --> D["Log “Scale DOWN”"] + C & D --> E["retry.RetryOnConflict"] + subgraph retryLoop["Retry on conflict"] + E --> F["Get latest Deployment"] + F --> G{"Error?"} + G -- yes --> H["Log error, return err"] + G -- no --> I["Set dp.Spec.Replicas = &replicas"] + I --> J["Update Deployment"] + J --> K{"Update error?"} + K -- yes --> L["Log error, return err"] + K -- no --> M["WaitForDeploymentSetReady"] + M --> N{"Ready?"} + N -- no --> O["Log error, return new error"] + N -- yes --> P["Return nil"] + end + E --> Q{"Retry result"} + Q -- err --> R["Log final error, return false"] + Q -- nil --> S["Return true"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_scaleDeploymentHelper --> retry_RetryOnConflict + func_scaleDeploymentHelper --> client_Deployments_Get + func_scaleDeploymentHelper --> client_Deployments_Update + func_scaleDeploymentHelper --> podsets_WaitForDeploymentSetReady + func_scaleDeploymentHelper --> context_TODO + func_scaleDeploymentHelper --> errors_New +``` + +#### Functions calling `scaleDeploymentHelper` + +```mermaid +graph TD + func_TestScaleDeployment --> func_scaleDeploymentHelper +``` + +#### Usage example (Go) + +```go +// Minimal example invoking scaleDeploymentHelper +import ( + "log" + "time" + + appsv1 "k8s.io/api/apps/v1" + typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +func example(client typedappsv1.AppsV1Interface, deployment *appsv1.Deployment) { + logger := log.New(os.Stdout, "", log.LstdFlags) + // Scale up to 5 replicas + success := scaleDeploymentHelper(client, deployment, 5, 2*time.Minute, true, logger) + if !success { + logger.Println("Scaling failed") + } +} +``` + +--- + +### scaleHpaCRDHelper + +**scaleHpaCRDHelper** - Updates the `minReplicas` and `maxReplicas` fields of a HorizontalPodAutoscaler (HPA), retries on conflicts, then waits for the associated CustomResource to reach the desired state. Returns `true` if successful. + +#### 1) Signature (Go) + +```go +func scaleHpaCRDHelper( + hpscaler hps.HorizontalPodAutoscalerInterface, + hpaName string, + crName string, + namespace string, + min int32, + max int32, + timeout time.Duration, + groupResourceSchema schema.GroupResource, + logger *log.Logger, +) bool +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Updates the `minReplicas` and `maxReplicas` fields of a HorizontalPodAutoscaler (HPA), retries on conflicts, then waits for the associated CustomResource to reach the desired state. Returns `true` if successful. | +| **Parameters** | `hpscaler` – HPA client interface
`hpaName` – name of the HPA resource
`crName` – name of the related custom resource
`namespace` – Kubernetes namespace
`min`, `max` – desired replica bounds
`timeout` – maximum wait time for scaling to complete
`groupResourceSchema` – schema of the CRD to query during wait
`logger` – logger for error reporting | +| **Return value** | `bool`: `true` if the scale operation succeeded, otherwise `false`. | +| **Key dependencies** | *`k8s.io/client-go/util/retry.RetryOnConflict`
* `hpscaler.Get`, `hpscaler.Update`
*`podsets.WaitForScalingToComplete`
* `errors.New` | +| **Side effects** | Mutates the HPA object in the cluster; may block until scaling completes or timeout. Logs errors via `logger`. | +| **How it fits the package** | Helper used by test functions to perform controlled scaling of HPAs during lifecycle tests, ensuring CRs reflect the new replica counts before proceeding. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"RetryOnConflict"} + B -->|"Success"| C["Get HPA"] + C --> D{"Error?"} + D -- Yes --> E["Log error & return err"] + D -- No --> F["Update Spec.MinReplicas"] + F --> G["Update Spec.MaxReplicas"] + G --> H["Update HPA via API"] + H --> I{"Error?"} + I -- Yes --> J["Log error & return err"] + I -- No --> K["WaitForScalingToComplete"] + K --> L{"Success?"} + L -- No --> M["Log error, return new error"] + L -- Yes --> N["Return nil"] + B -->|"Retry"| B + N --> O["End (true)"] + E --> P["End (false)"] + J --> P + M --> P +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_scaleHpaCRDHelper --> retry_RetryOnConflict + func_scaleHpaCRDHelper --> hpscaler_Get + func_scaleHpaCRDHelper --> hpscaler_Update + func_scaleHpaCRDHelper --> podsets_WaitForScalingToComplete + func_scaleHpaCRDHelper --> errors_New +``` + +#### 5) Functions calling `scaleHpaCRDHelper` (Mermaid) + +```mermaid +graph TD + func_TestScaleHPACrd --> func_scaleHpaCRDHelper +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking scaleHpaCRDHelper +import ( + "time" + hps "k8s.io/client-go/kubernetes/typed/autoscaling/v1" + log "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Assume hpscaler is an initialized HorizontalPodAutoscalerInterface. +var hpscaler hps.HorizontalPodAutoscalerInterface +var logger *log.Logger = log.New() +var groupResource schema.GroupResource + +success := scaleHpaCRDHelper( + hpscaler, + "my-hpa", + "my-cr", + "default", + 2, // min replicas + 5, // max replicas + 30*time.Second, // timeout for scaling to complete + groupResource, + logger, +) + +if success { + fmt.Println("HPA scaled successfully") +} else { + fmt.Println("Failed to scale HPA") +} +``` + +--- + +### scaleHpaDeploymentHelper + +**scaleHpaDeploymentHelper** - Updates a Horizontal Pod Autoscaler (HPA) to new minimum and maximum replica counts, retries on conflict, then waits until the associated deployment becomes ready. Returns `true` if all steps succeed. + +#### Signature (Go) + +```go +func scaleHpaDeploymentHelper( + hpscaler hps.HorizontalPodAutoscalerInterface, + hpaName, deploymentName, namespace string, + min, max int32, + timeout time.Duration, + logger *log.Logger, +) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Updates a Horizontal Pod Autoscaler (HPA) to new minimum and maximum replica counts, retries on conflict, then waits until the associated deployment becomes ready. Returns `true` if all steps succeed. | +| **Parameters** | `hpscaler` – HPA client interface.
`hpaName` – name of the target HPA.
`deploymentName` – name of the deployment controlled by the HPA.
`namespace` – Kubernetes namespace.
`min`, `max` – desired replica bounds.
`timeout` – maximum wait time for deployment readiness.
`logger` – logger for diagnostics. | +| **Return value** | `bool`: `true` on success, `false` if any retry or update fails. | +| **Key dependencies** | • `k8s.io/client-go/util/retry.RetryOnConflict`
• `hpscaler.Get` / `Update`
• `podsets.WaitForDeploymentSetReady` | +| **Side effects** | Mutates the HPA spec (`MinReplicas`, `MaxReplicas`) and triggers a deployment readiness check. Logs errors and state changes. | +| **How it fits the package** | Central helper for tests that simulate scaling of deployments via HPAs, ensuring consistency between autoscaler configuration and actual pod counts. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> RetryOnConflict["Retry on conflict"] + RetryOnConflict --> GetHPA["Get HPA"] + GetHPA --> UpdateSpec["Set Min/Max Replicas"] + UpdateSpec --> UpdateHPA["Update HPA"] + UpdateHPA --> WaitReady["Wait for Deployment Ready"] + WaitReady --> Success{"Success?"} + Success -- Yes --> End + Success -- No --> LogError["Log error"] + LogError --> End +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_scaleHpaDeploymentHelper --> retry.RetryOnConflict + func_scaleHpaDeploymentHelper --> hpscaler.Get + func_scaleHpaDeploymentHelper --> hpscaler.Update + func_scaleHpaDeploymentHelper --> podsets.WaitForDeploymentSetReady +``` + +#### Functions calling `scaleHpaDeploymentHelper` (Mermaid) + +```mermaid +graph TD + func_TestScaleHpaDeployment --> func_scaleHpaDeploymentHelper +``` + +#### Usage example (Go) + +```go +// Minimal example invoking scaleHpaDeploymentHelper +import ( + "log" + hps "k8s.io/client-go/kubernetes/typed/autoscaling/v1" +) + +func demo(hpscaler hps.HorizontalPodAutoscalerInterface, logger *log.Logger) { + ok := scaleHpaDeploymentHelper( + hpscaler, + "my-hpa", // HPA name + "my-deploy", // Deployment name + "default", // Namespace + 2, // Desired min replicas + 5, // Desired max replicas + 30*time.Second, // Timeout for deployment readiness + logger, + ) + if !ok { + log.Println("Scaling operation failed") + } +} +``` + +--- + +### scaleHpaStatefulSetHelper + +**scaleHpaStatefulSetHelper** - Atomically updates a Horizontal Pod Autoscaler's `MinReplicas` and `MaxReplicas`, then waits until the referenced StatefulSet reaches a ready state. Returns `true` on success, `false` otherwise. + +#### Signature (Go) + +```go +func scaleHpaStatefulSetHelper( + hpscaler hps.HorizontalPodAutoscalerInterface, + hpaName, statefulsetName, namespace string, + min, max int32, + timeout time.Duration, + logger *log.Logger) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Atomically updates a Horizontal Pod Autoscaler's `MinReplicas` and `MaxReplicas`, then waits until the referenced StatefulSet reaches a ready state. Returns `true` on success, `false` otherwise. | +| **Parameters** | *`hpscaler`* – HPA client interface.
*`hpaName`* – name of the target HPA.
*`statefulsetName`* – associated StatefulSet name.
*`namespace`* – Kubernetes namespace.
*`min`, `max`* – desired replica bounds.
*`timeout`* – maximum wait time for readiness.
*`logger`* – logger for diagnostics. | +| **Return value** | `bool`: `true` if HPA update and StatefulSet readiness succeeded; otherwise `false`. | +| **Key dependencies** | • `k8s.io/client-go/util/retry.RetryOnConflict`
• HPA client methods: `Get`, `Update`
• `github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/podsets.WaitForStatefulSetReady` | +| **Side effects** | Mutates the specified HPA’s replica limits; logs errors and debug information. No external I/O beyond Kubernetes API calls. | +| **How it fits the package** | Helper used by `TestScaleHpaStatefulSet` to test scaling scenarios, ensuring that HPA configuration changes propagate correctly and the StatefulSet stabilizes within a timeout. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Retry loop"} + B -->|"Success"| C["Get current HPA"] + C --> D["Update MinReplicas & MaxReplicas"] + D --> E["Send Update request"] + E --> F{"Update ok?"} + F -- Yes --> G["Wait for StatefulSet ready"] + G --> H{"Ready?"} + H -- Yes --> I["Return true"] + H -- No --> J["Log error"] + J --> I + F -- No --> K["Log error"] + K --> L["Return false"] + B -- Retry exhausted --> M["Log failure"] + M --> N["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_scaleHpaStatefulSetHelper --> retry_RetryOnConflict + func_scaleHpaStatefulSetHelper --> hpscaler_Get + func_scaleHpaStatefulSetHelper --> hpscaler_Update + func_scaleHpaStatefulSetHelper --> podsets_WaitForStatefulSetReady +``` + +#### Functions calling `scaleHpaStatefulSetHelper` (Mermaid) + +```mermaid +graph TD + TestScaleHpaStatefulSet --> scaleHpaStatefulSetHelper +``` + +#### Usage example (Go) + +```go +// Minimal example invoking scaleHpaStatefulSetHelper +import ( + "log" + "time" + + autoscalingv1 "k8s.io/api/autoscaling/v1" + hps "k8s.io/client-go/kubernetes/typed/autoscaling/v1" +) + +// Assume `hpscaler` is a HorizontalPodAutoscalerInterface instance. +func example(hpscaler hps.HorizontalPodAutoscalerInterface) bool { + logger := log.Default() + // Scale HPA to 2 replicas min/max in namespace "demo". + return scaleHpaStatefulSetHelper( + hpscaler, + "my-hpa", // HPA name + "my-statefulset", // StatefulSet name + "demo", // Namespace + 2, // MinReplicas + 2, // MaxReplicas + 30*time.Second, // Timeout for readiness + logger, + ) +} +``` + +--- + +--- + +### scaleStatefulsetHelper + +**scaleStatefulsetHelper** - Scales a StatefulSet to the desired replica count, retrying on conflicts and waiting for readiness. + +#### 1) Signature (Go) + +```go +func scaleStatefulsetHelper( + clients *clientsholder.ClientsHolder, + ssClient v1.StatefulSetInterface, + statefulset *appsv1.StatefulSet, + replicas int32, + timeout time.Duration, + logger *log.Logger, +) bool +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Scales a StatefulSet to the desired replica count, retrying on conflicts and waiting for readiness. | +| **Parameters** | `clients` – Kubernetes client holder.
`ssClient` – StatefulSet client interface.
`statefulset` – target StatefulSet object.
`replicas` – desired replica number.
`timeout` – maximum wait time for readiness.
`logger` – logger for diagnostics. | +| **Return value** | `bool` – true if scaling succeeded, false otherwise. | +| **Key dependencies** | • `retry.RetryOnConflict`
• `ssClient.Get`
• `clients.K8sClient.AppsV1().StatefulSets(namespace).Update`
• `podsets.WaitForStatefulSetReady`
• `errors.New` | +| **Side effects** | Mutates the StatefulSet’s spec on the cluster; performs network I/O; logs progress. | +| **How it fits the package** | Helper for tests that perform manual scaling of StatefulSets without HPA, ensuring readiness before proceeding. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"RetryOnConflict"} + B --> C["Get latest StatefulSet"] + C --> D["Update replicas field"] + D --> E["Update on API server"] + E --> F{"WaitForStatefulSetReady"} + F --> G["Success?"] + G -- Yes --> H["Return true"] + G -- No --> I["Error, return false"] + B --> J["Retry or abort"] + J --> K["Log failure"] + K --> L["Return false"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_scaleStatefulsetHelper --> retry_RetryOnConflict + func_scaleStatefulsetHelper --> ssClient_Get + func_scaleStatefulsetHelper --> clients_K8sClient_AppV1_StatefulSets_Update + func_scaleStatefulsetHelper --> podsets_WaitForStatefulSetReady + func_scaleStatefulsetHelper --> errors_New +``` + +#### 5) Functions calling `scaleStatefulsetHelper` (Mermaid) + +```mermaid +graph TD + TestScaleStatefulSet --> func_scaleStatefulsetHelper +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking scaleStatefulsetHelper +package main + +import ( + "log" + "time" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" + clientsholder "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/scaling/clientsholder" +) + +func main() { + clients := clientsholder.GetClientsHolder() + ssClient := clients.K8sClient.AppsV1().StatefulSets("default") + statefulset := &appsv1.StatefulSet{ /* populate fields */ } + replicas := int32(3) + timeout := 5 * time.Minute + logger := log.Default() + + success := scaleStatefulsetHelper(clients, ssClient, statefulset, replicas, timeout, logger) + if !success { + logger.Println("Scaling failed") + } else { + logger.Println("Scaling succeeded") + } +} +``` + +--- diff --git a/docs/tests/lifecycle/tolerations/tolerations.md b/docs/tests/lifecycle/tolerations/tolerations.md new file mode 100644 index 000000000..3df2c5eeb --- /dev/null +++ b/docs/tests/lifecycle/tolerations/tolerations.md @@ -0,0 +1,179 @@ +# Package tolerations + +**Path**: `tests/lifecycle/tolerations` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [IsTolerationDefault](#istolerationdefault) + - [IsTolerationModified](#istolerationmodified) + +## Overview + +Provides helper utilities for evaluating Kubernetes pod tolerations during tests, determining if a toleration is default or has been altered by the user. + +### Key Features + +- Identifies default tolerations by checking for the substring "node.kubernetes.io" in the key + +### Design Notes + +- Assumes default tolerations always contain the specific namespace substring +- Does not account for custom default tolerations added via admission controllers +- Best practice: use IsTolerationModified to detect any non‑default or changed toleration before asserting compliance + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func IsTolerationDefault(t corev1.Toleration) bool](#istolerationdefault) | Returns `true` when the toleration key contains the substring `"node.kubernetes.io"`, indicating it is one of the default tolerations that Kubernetes automatically adds to a pod. | +| [func IsTolerationModified(t corev1.Toleration, qosClass corev1.PodQOSClass) bool](#istolerationmodified) | Checks whether a given `corev1.Toleration` is *modified* compared to the tolerations automatically injected by Kubernetes. Returns `true` for any non‑default or altered toleration. | + +## Exported Functions + +### IsTolerationDefault + +**IsTolerationDefault** - Returns `true` when the toleration key contains the substring `"node.kubernetes.io"`, indicating it is one of the default tolerations that Kubernetes automatically adds to a pod. + +#### Signature (Go) + +```go +func IsTolerationDefault(t corev1.Toleration) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` when the toleration key contains the substring `"node.kubernetes.io"`, indicating it is one of the default tolerations that Kubernetes automatically adds to a pod. | +| **Parameters** | `t corev1.Toleration – The toleration object being inspected. | +| **Return value** | `bool – true if the key indicates a default toleration, otherwise false`. | +| **Key dependencies** | • Calls `strings.Contains` from the standard library to search for `"node.kubernetes.io"` in the toleration key. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by higher‑level functions (e.g., `IsTolerationModified`) to quickly exclude default tolerations before performing more detailed checks on their properties. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Receive toleration t"] --> B{"Check if t.Key contains node.kubernetes.io"} + B -- Yes --> C["Return true"] + B -- No --> D["Return false"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_IsTolerationDefault --> func_strings_Contains +``` + +#### Functions calling `IsTolerationDefault` + +```mermaid +graph TD + func_IsTolerationModified --> func_IsTolerationDefault +``` + +#### Usage example (Go) + +```go +import ( + "k8s.io/api/core/v1" +) + +// Minimal example invoking IsTolerationDefault +func Example() { + t := v1.Toleration{ + Key: "node.kubernetes.io/not-ready", + Value: "", + } + if IsTolerationDefault(t) { + fmt.Println("This is a default toleration.") + } else { + fmt.Println("Custom toleration detected.") + } +} +``` + +--- + +### IsTolerationModified + +**IsTolerationModified** - Checks whether a given `corev1.Toleration` is *modified* compared to the tolerations automatically injected by Kubernetes. Returns `true` for any non‑default or altered toleration. + +#### Signature (Go) + +```go +func IsTolerationModified(t corev1.Toleration, qosClass corev1.PodQOSClass) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether a given `corev1.Toleration` is *modified* compared to the tolerations automatically injected by Kubernetes. Returns `true` for any non‑default or altered toleration. | +| **Parameters** | `t corev1.Toleration – the toleration to examine`
`qosClass corev1.PodQOSClass – QoS class of the pod (used for memory‑pressure logic)` | +| **Return value** | `bool – true if the toleration differs from the default set, false otherwise` | +| **Key dependencies** | *`IsTolerationDefault(t)` – quick check that the key starts with `node.kubernetes.io`
* `corev1.TaintEffectNoExecute`, `corev1.TaintEffectNoSchedule`, `corev1.TaintEffectPreferNoSchedule` constants
* `corev1.TolerationOpExists` constant | +| **Side effects** | None – purely functional; no state mutation or I/O. | +| **How it fits the package** | Provides core logic for tests that validate pod tolerations against Kubernetes defaults, enabling higher‑level checks to flag non‑compliant pods. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"IsDefaultKey?"} + B -- No --> C["Return true"] + B -- Yes --> D{"Effect == NoExecute"} + D -- No --> E{"Effect == NoSchedule"} + D -- Yes --> F{"Key is notReady or unreachable"} + F -- No --> G["Return true"] + F -- Yes --> H{"Operator Exists & Seconds==300"} + H -- Yes --> I["Return false"] + H -- No --> J["Return true"] + E -- No --> K{"Effect == PreferNoSchedule"} + K -- Yes --> L["Return true"] + K -- No --> M["Check nonCompliantTolerations list"] + M --> N{"Found?"} + N -- Yes --> O["Return true"] + N -- No --> P["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_IsTolerationModified --> func_IsTolerationDefault +``` + +#### Functions calling `IsTolerationModified` (Mermaid) + +```mermaid +graph TD + func_testPodTolerationBypass --> func_IsTolerationModified +``` + +#### Usage example (Go) + +```go +// Minimal example invoking IsTolerationModified +import ( + "k8s.io/api/core/v1" + tolerations "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/tolerations" +) + +func main() { + t := v1.Toleration{ + Key: "node.kubernetes.io/not-ready", + Operator: v1.TaintEffectNoExecute, + Effect: v1.TaintEffectNoExecute, + } + qos := v1.PodQOSBestEffort + modified := tolerations.IsTolerationModified(t, qos) + fmt.Printf("Is the toleration modified? %v\n", modified) +} +``` + +--- diff --git a/docs/tests/lifecycle/volumes/volumes.md b/docs/tests/lifecycle/volumes/volumes.md new file mode 100644 index 000000000..8eda8542c --- /dev/null +++ b/docs/tests/lifecycle/volumes/volumes.md @@ -0,0 +1,193 @@ +# Package volumes + +**Path**: `tests/lifecycle/volumes` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [IsPodVolumeReclaimPolicyDelete](#ispodvolumereclaimpolicydelete) +- [Local Functions](#local-functions) + - [getPVCFromSlice](#getpvcfromslice) + +## Overview + +Provides utilities for examining pod volume reclaim policies within Kubernetes tests, enabling verification that volumes are correctly configured for deletion. + +### Key Features + +- Determines if a pod’s backing PersistentVolume has the Delete reclamation policy +- Locates specific PersistentVolumeClaim objects by name in a slice +- Encapsulates logic for use in lifecycle test suites + +### Design Notes + +- Functions operate on corev1 types from k8s.io/api/core/v1 +- The reclaim check expects a volume, its PV list, and PVC list to cross‑reference ownership +- Best practice is to invoke IsPodVolumeReclaimPolicyDelete after ensuring the input slices are populated + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func IsPodVolumeReclaimPolicyDelete(vol *corev1.Volume, pvs []corev1.PersistentVolume, pvcs []corev1.PersistentVolumeClaim) bool](#ispodvolumereclaimpolicydelete) | Determines if a pod volume’s backing PV has `PersistentVolumeReclaimPolicyDelete`. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func getPVCFromSlice(pvcs []corev1.PersistentVolumeClaim, pvcName string) *corev1.PersistentVolumeClaim](#getpvcfromslice) | Searches a slice of `PersistentVolumeClaim` objects for one whose name matches the supplied `pvcName` and returns a pointer to it. If no match is found, it returns `nil`. | + +## Exported Functions + +### IsPodVolumeReclaimPolicyDelete + +**IsPodVolumeReclaimPolicyDelete** - Determines if a pod volume’s backing PV has `PersistentVolumeReclaimPolicyDelete`. + +Checks whether a pod’s volume is backed by a persistent volume claim whose associated persistent volume has a reclaim policy of **Delete**. + +#### Signature (Go) + +```go +func IsPodVolumeReclaimPolicyDelete(vol *corev1.Volume, pvs []corev1.PersistentVolume, pvcs []corev1.PersistentVolumeClaim) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if a pod volume’s backing PV has `PersistentVolumeReclaimPolicyDelete`. | +| **Parameters** | `vol *corev1.Volume` – the pod volume to evaluate.
`pvs []corev1.PersistentVolume` – all cluster PVs.
`pvcs []corev1.PersistentVolumeClaim` – all cluster PVCs. | +| **Return value** | `bool` – `true` if the PV’s reclaim policy is Delete, otherwise `false`. | +| **Key dependencies** | • `getPVCFromSlice(pvcs, vol.PersistentVolumeClaim.ClaimName)`
• Comparison of `pv.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimDelete` | +| **Side effects** | None (pure function). | +| **How it fits the package** | Supports compliance checks for pod volume reclaim policies within the lifecycle tests. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"PVC exists?"} + B -- Yes --> C{"Find matching PV"} + C --> D{"PV policy is Delete?"} + D -- Yes --> E["Return true"] + D -- No --> F["Continue search"] + C -- Not found --> G["End with false"] + B -- No --> H["End with false"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_IsPodVolumeReclaimPolicyDelete --> func_getPVCFromSlice +``` + +#### Functions calling `IsPodVolumeReclaimPolicyDelete` + +```mermaid +graph TD + func_testPodPersistentVolumeReclaimPolicy --> func_IsPodVolumeReclaimPolicyDelete +``` + +#### Usage example (Go) + +```go +// Minimal example invoking IsPodVolumeReclaimPolicyDelete +import ( + corev1 "k8s.io/api/core/v1" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/lifecycle/volumes" +) + +func main() { + vol := &corev1.Volume{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-pvc", + }, + } + + // Assume pvs and pvcs are populated from the cluster + var pvs []corev1.PersistentVolume + var pvcs []corev1.PersistentVolumeClaim + + ok := volumes.IsPodVolumeReclaimPolicyDelete(vol, pvs, pvcs) + if ok { + fmt.Println("Volume uses a PV with Delete reclaim policy") + } else { + fmt.Println("Volume does NOT use a PV with Delete reclaim policy") + } +} +``` + +--- + +## Local Functions + +### getPVCFromSlice + +**getPVCFromSlice** - Searches a slice of `PersistentVolumeClaim` objects for one whose name matches the supplied `pvcName` and returns a pointer to it. If no match is found, it returns `nil`. + +#### 1) Signature (Go) + +```go +func getPVCFromSlice(pvcs []corev1.PersistentVolumeClaim, pvcName string) *corev1.PersistentVolumeClaim +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Searches a slice of `PersistentVolumeClaim` objects for one whose name matches the supplied `pvcName` and returns a pointer to it. If no match is found, it returns `nil`. | +| **Parameters** | `pvcs []corev1.PersistentVolumeClaim` – collection of PVCs;
`pvcName string` – target claim name | +| **Return value** | `*corev1.PersistentVolumeClaim` – pointer to the matched PVC or `nil` if absent | +| **Key dependencies** | Uses only standard Go looping and comparison; relies on the `corev1` types from the Kubernetes API. | +| **Side effects** | No mutation of inputs; purely read‑only lookup. | +| **How it fits the package** | Provides a helper for other functions in the `volumes` test package to resolve PVC references when inspecting pod volumes and reclaim policies. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> Iterate{"Iterate over pvcs"} + Iterate -- match? yes --> Return[Return &pvcs["i"]] + Iterate -- match? no --> Continue["continue loop"] + Continue --> End["Return nil"] +``` + +#### 4) Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 5) Functions calling `getPVCFromSlice` (Mermaid) + +```mermaid +graph TD + func_IsPodVolumeReclaimPolicyDelete --> func_getPVCFromSlice +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getPVCFromSlice +import ( + corev1 "k8s.io/api/core/v1" +) + +func example() { + // Sample PVC slice + pvcs := []corev1.PersistentVolumeClaim{ + {ObjectMeta: metav1.ObjectMeta{Name: "pvc-foo"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "pvc-bar"}}, + } + + // Look up a specific PVC by name + pvc := getPVCFromSlice(pvcs, "pvc-bar") + if pvc != nil { + fmt.Println("Found PVC:", pvc.Name) + } else { + fmt.Println("PVC not found") + } +} +``` + +--- diff --git a/docs/tests/manageability/manageability.md b/docs/tests/manageability/manageability.md new file mode 100644 index 000000000..f2c696231 --- /dev/null +++ b/docs/tests/manageability/manageability.md @@ -0,0 +1,348 @@ +# Package manageability + +**Path**: `tests/manageability` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [LoadChecks](#loadchecks) +- [Local Functions](#local-functions) + - [containerPortNameFormatCheck](#containerportnameformatcheck) + - [testContainerPortNameFormat](#testcontainerportnameformat) + - [testContainersImageTag](#testcontainersimagetag) + +## Overview + +The manageability package defines and registers tests that validate container configuration for compliance with partner naming conventions and image tagging requirements. It prepares these checks in the global database so they can be executed during a certsuite run. + +### Key Features + +- Registers manageability checks into the checks database via LoadChecks +- Validates container port names against allowed protocol prefixes +- Ensures each container has an explicit image tag + +### Design Notes + +- Uses a beforeEach function to set up environment context for each test; skip logic runs when no containers are present +- Relies on global allowedProtocolNames slice for protocol validation, which may need updating if new protocols are introduced +- Best practice: call LoadChecks during package initialization so checks are registered automatically + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func LoadChecks()](#loadchecks) | Registers the manageability tests into the global checks database, preparing them for execution. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func containerPortNameFormatCheck(portName string) bool](#containerportnameformatcheck) | Determines whether a container port name follows the partner naming conventions by checking its protocol prefix. | +| [func testContainerPortNameFormat(check *checksdb.Check, env *provider.TestEnvironment)](#testcontainerportnameformat) | Validates that every port name in each container follows the partner naming conventions and records compliance results. | +| [func (*checksdb.Check, *provider.TestEnvironment)()](#testcontainersimagetag) | Determines if each container has an image tag; logs findings and records them in a compliance report. | + +## Exported Functions + +### LoadChecks + +**LoadChecks** - Registers the manageability tests into the global checks database, preparing them for execution. + +#### Signature (Go) + +```go +func LoadChecks() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Registers the manageability tests into the global checks database, preparing them for execution. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | • `log.Debug` – logs loading activity
• `checksdb.NewChecksGroup` – creates a group for these checks
• `WithBeforeEachFn`, `WithSkipCheckFn`, `WithCheckFn` – helper functions that attach behaviour to a check
• `checksdb.NewCheck` – constructs individual checks
• `identifiers.GetTestIDAndLabels` – retrieves test ID and tags
• `testContainersImageTag`, `testContainerPortNameFormat` – the actual test implementations | +| **Side effects** | Modifies the global checks database by adding a new group with two checks; logs debug output. | +| **How it fits the package** | This function is called from `pkg/certsuite.LoadInternalChecksDB()` to expose manageability tests as part of the overall certsuite test registry. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["LoadChecks"] --> B["log.Debug"] + A --> C["checksdb.NewChecksGroup"] + C --> D["WithBeforeEachFn"] + C --> E["Add first check"] + E --> F["checksdb.NewCheck"] + F --> G["identifiers.GetTestIDAndLabels"] + F --> H["testContainersImageTag"] + E --> I["WithSkipCheckFn"] + E --> J["WithCheckFn"] + A --> K["Add second check"] + K --> L["checksdb.NewCheck"] + L --> M["identifiers.GetTestIDAndLabels"] + L --> N["testContainerPortNameFormat"] + K --> O["WithSkipCheckFn"] + K --> P["WithCheckFn"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> log.Debug + func_LoadChecks --> checksdb.NewChecksGroup + func_LoadChecks --> WithBeforeEachFn + func_LoadChecks --> checksdb.NewCheck + func_LoadChecks --> identifiers.GetTestIDAndLabels + func_LoadChecks --> testContainersImageTag + func_LoadChecks --> testContainerPortNameFormat +``` + +#### Functions calling `LoadChecks` (Mermaid) + +```mermaid +graph TD + certsuite.LoadInternalChecksDB --> LoadChecks +``` + +#### Usage example (Go) + +```go +// Minimal example invoking LoadChecks +func main() { + // Typically called by the test harness; here we call it directly. + manageability.LoadChecks() +} +``` + +--- + +## Local Functions + +### containerPortNameFormatCheck + +**containerPortNameFormatCheck** - Determines whether a container port name follows the partner naming conventions by checking its protocol prefix. + +#### Signature (Go) + +```go +func containerPortNameFormatCheck(portName string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether a container port name follows the partner naming conventions by checking its protocol prefix. | +| **Parameters** | `portName` (string) – the full name of the container port to validate. | +| **Return value** | `bool` – `true` if the first segment of `portName` is an allowed protocol name; otherwise `false`. | +| **Key dependencies** | • `strings.Split` from Go's standard library.
• `allowedProtocolNames`, a package‑level map of valid protocol prefixes. | +| **Side effects** | None – purely functional, no mutation or I/O. | +| **How it fits the package** | Used by test functions in the *manageability* suite to flag ports that do not conform to naming rules before reporting results. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Input: portName"] --> B["Split on -"] + B --> C["Extract first segment (protocol)"] + C --> D["Lookup in allowedProtocolNames map"] + D --> E{"Found?"} + E -- Yes --> F["Return true"] + E -- No --> G["Return false"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_containerPortNameFormatCheck --> func_Split +``` + +#### Functions calling `containerPortNameFormatCheck` (Mermaid) + +```mermaid +graph TD + func_testContainerPortNameFormat --> func_containerPortNameFormatCheck +``` + +#### Usage example (Go) + +```go +// Minimal example invoking containerPortNameFormatCheck +package main + +import ( + "fmt" +) + +// Assume allowedProtocolNames is populated elsewhere in the package. +var allowedProtocolNames = map[string]bool{ + "http": true, + "https": true, +} + +func main() { + name := "http-80" + if containerPortNameFormatCheck(name) { + fmt.Printf("Port name %q is valid.\n", name) + } else { + fmt.Printf("Port name %q is invalid.\n", name) + } +} +``` + +--- + +### testContainerPortNameFormat + +**testContainerPortNameFormat** - Validates that every port name in each container follows the partner naming conventions and records compliance results. + +#### 1) Signature (Go) + +```go +func testContainerPortNameFormat(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates that every port name in each container follows the partner naming conventions and records compliance results. | +| **Parameters** | `check *checksdb.Check` – current test context;
`env *provider.TestEnvironment` – environment data containing containers and protocol names. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | • `LogDebug`, `LogError`, `LogInfo` on the check object
• `containerPortNameFormatCheck(portName string) bool`
• `testhelper.NewContainerReportObject`
• `AddField` on report objects
• `SetResult` on the check object | +| **Side effects** | • Populates global map `allowedProtocolNames`.
• Creates and stores compliant/non‑compliant report objects in the check result. | +| **How it fits the package** | Part of the *manageability* test suite; invoked by `LoadChecks` to evaluate port naming for all containers in the environment. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Populate allowedProtocolNames"} + B --> C["Iterate over env.Containers"] + C --> D{"For each container"} + D --> E["LogDebug(container)"] + E --> F["Iterate over container.Ports"] + F --> G{"Check port name format"} + G -- false --> H["LogError; add non‑compliant report"] + G -- true --> I["LogInfo; add compliant report"] + H & I --> J["Continue loop"] + J --> K["End loops"] + K --> L["SetResult(compliant, nonCompliant)"] + L --> M["Finish"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_testContainerPortNameFormat --> func_LogDebug + func_testContainerPortNameFormat --> func_containerPortNameFormatCheck + func_testContainerPortNameFormat --> func_LogError + func_testContainerPortNameFormat --> func_NewContainerReportObject + func_testContainerPortNameFormat --> func_AddField + func_testContainerPortNameFormat --> func_SetResult +``` + +#### 5) Functions calling `testContainerPortNameFormat` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testContainerPortNameFormat +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testContainerPortNameFormat +env := &provider.TestEnvironment{ + ValidProtocolNames: []string{"http", "https"}, + Containers: []*provider.Container{ + { + Namespace: "default", + Podname: "web-pod", + Name: "web-container", + Ports: []provider.Port{{Name: "http-80"}}, + }, + }, +} +check := checksdb.NewCheck("TestContainerPortNameFormat") +testContainerPortNameFormat(check, env) +// Results are now available via check.Result() +``` + +--- + +--- + +### testContainersImageTag + +**testContainersImageTag** - Determines if each container has an image tag; logs findings and records them in a compliance report. + +Evaluates every container in a test environment to verify that an image tag is present, recording compliant and non‑compliant containers. + +#### Signature (Go) + +```go +func (*checksdb.Check, *provider.TestEnvironment)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if each container has an image tag; logs findings and records them in a compliance report. | +| **Parameters** | `check` – the current check context (`*checksdb.Check`).
`env` – test environment containing containers (`*provider.TestEnvironment`). | +| **Return value** | None (side‑effect only). | +| **Key dependencies** | • `check.LogDebug`, `check.LogError`, `check.LogInfo`
• `cut.IsTagEmpty()`
• `testhelper.NewContainerReportObject`
• `check.SetResult` | +| **Side effects** | Emits debug/info/error logs; appends report objects to internal slices; sets the check result via `SetResult`. | +| **How it fits the package** | Implements the *Containers Image Tag* test within the Manageability suite, invoked by `LoadChecks`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + start["Start"] --> iterate{"Iterate over env.Containers"} + iterate --> checkTag{"cut.IsTagEmpty()?"} + checkTag -- Yes --> logError["LogError: missing tag"] + logError --> addNonCompliant["Append non‑compliant report object"] + checkTag -- No --> logInfo["LogInfo: tagged"] + logInfo --> addCompliant["Append compliant report object"] + iterate --> endNode["SetResult and finish"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testContainersImageTag --> func_LogDebug + func_testContainersImageTag --> func_IsTagEmpty + func_testContainersImageTag --> func_LogError + func_testContainersImageTag --> func_append + func_testContainersImageTag --> pkg_testhelper.NewContainerReportObject + func_testContainersImageTag --> func_LogInfo + func_testContainersImageTag --> func_SetResult +``` + +#### Functions calling `testContainersImageTag` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testContainersImageTag +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainersImageTag +env := &provider.TestEnvironment{ + Containers: []*provider.Container{ /* populate with containers */ }, +} +check := checksdb.NewCheck("example-check-id") +testContainersImageTag(check, env) +// After execution, check.Result holds compliant and non‑compliant objects. +``` + +--- diff --git a/docs/tests/networking/icmp/icmp.md b/docs/tests/networking/icmp/icmp.md new file mode 100644 index 000000000..8774274a1 --- /dev/null +++ b/docs/tests/networking/icmp/icmp.md @@ -0,0 +1,519 @@ +# Package icmp + +**Path**: `tests/networking/icmp` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [PingResults](#pingresults) +- [Exported Functions](#exported-functions) + - [BuildNetTestContext](#buildnettestcontext) + - [PingResults.String](#pingresults.string) + - [RunNetworkingTests](#runnetworkingtests) +- [Local Functions](#local-functions) + - [parsePingResult](#parsepingresult) + - [processContainerIpsPerNet](#processcontaineripspernet) + +## Overview + +The icmp package provides utilities for performing ICMP ping tests between containers in Kubernetes environments, building test contexts based on pod networking and reporting results. + +### Key Features + +- BuildNetTestContext constructs a map of network identifiers to NetTestContext objects that define source and destination IPs for pinging. +- RunNetworkingTests executes the ping tests across networks, aggregates success/failure data, and determines if overall compliance is met. +- parsePingResult parses raw ping output into structured PingResults capturing packet statistics and errors. + +### Design Notes + +- The package assumes containers expose network interfaces accessible via provided IP lists; it selects the first matching IP as the tester source. +- Parsing logic relies on regular expressions that may fail with non‑standard ping outputs, limiting robustness across OS variants. +- Best practice is to call BuildNetTestContext before RunNetworkingTests and log detailed reports using testhelper utilities. + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**PingResults**](#pingresults) | Summary of a ping command execution | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func BuildNetTestContext(pods []*provider.Pod, aIPVersion netcommons.IPVersion, aType netcommons.IFType, logger *log.Logger) (netsUnderTest map[string]netcommons.NetTestContext)](#buildnettestcontext) | Generates a mapping of network identifiers to `NetTestContext` objects that describe how ICMP connectivity tests should be performed between pods. It gathers IP addresses, selects tester sources, and populates destination targets based on pod networking configuration. | +| [func (results PingResults) String() string](#pingresults.string) | Produces a concise textual representation of ping test results, including outcome status and transmitted/received/error counts. | +| [func RunNetworkingTests( netsUnderTest map[string]netcommons.NetTestContext, count int, aIPVersion netcommons.IPVersion, logger *log.Logger, ) (report testhelper.FailureReasonOut, skip bool)](#runnetworkingtests) | Runs ICMP ping tests for each network attachment in `netsUnderTest`. It reports compliant and non‑compliant results per container pair and decides whether to skip the overall test. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func parsePingResult(stdout, stderr string) (results PingResults, err error)](#parsepingresult) | Interprets the standard output and error streams from a `ping` execution to populate a `PingResults` structure, determining whether the ping succeeded, failed, or encountered errors. | +| [func processContainerIpsPerNet( containerID *provider.Container, netKey string, ipAddresses []string, ifName string, netsUnderTest map[string]netcommons.NetTestContext, aIPVersion netcommons.IPVersion, logger *log.Logger) {}](#processcontaineripspernet) | Filters container IPs by the desired IP version, then populates or updates a `NetTestContext` entry for the given network key. The first qualifying IP is used as the test initiator (tester source), while the remaining IPs become ping destinations. | + +## Structs + +### PingResults + +#### Fields + +| Field | Type | Description | +|-------------|------|-------------| +| `outcome` | `int` | Enumerated result code (e.g., success, failure, error) returned by the test helper. | +| `transmitted` | `int` | Number of ICMP echo requests that were sent. | +| `received` | `int` | Number of replies received back from the target host. | +| `errors` | `int` | Count of packets that resulted in an error (e.g., time‑outs or unreachable). | + +#### Purpose + +`PingResults` aggregates quantitative data about a single ping test: how many packets were sent, how many were answered, and any errors encountered. The `outcome` field classifies the overall success status using predefined constants from the test helper package. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `PingResults.String()` | Formats the struct into a human‑readable string for logging or display. | +| `parsePingResult(stdout, stderr string)` | Parses raw ping command output, populates a `PingResults` instance, and determines the outcome based on regex matches and packet statistics. | + +--- + +--- + +## Exported Functions + +### BuildNetTestContext + +**BuildNetTestContext** - Generates a mapping of network identifiers to `NetTestContext` objects that describe how ICMP connectivity tests should be performed between pods. It gathers IP addresses, selects tester sources, and populates destination targets based on pod networking configuration. + +#### Signature (Go) + +```go +func BuildNetTestContext(pods []*provider.Pod, aIPVersion netcommons.IPVersion, aType netcommons.IFType, logger *log.Logger) (netsUnderTest map[string]netcommons.NetTestContext) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Generates a mapping of network identifiers to `NetTestContext` objects that describe how ICMP connectivity tests should be performed between pods. It gathers IP addresses, selects tester sources, and populates destination targets based on pod networking configuration. | +| **Parameters** | `pods []*provider.Pod – list of pods to inspect`
`aIPVersion netcommons.IPVersion – target IP version (IPv4/IPv6)`
`aType netcommons.IFType – network interface type (MULTUS or DEFAULT)`
`logger *log.Logger – logger for diagnostics` | +| **Return value** | `netsUnderTest map[string]netcommons.NetTestContext – a mapping from network key to context data used by the ICMP test runner.` | +| **Key dependencies** | • `make` (to create the map)
• `logger.Info` (for verbose output)
• `processContainerIpsPerNet` (to populate each network entry) | +| **Side effects** | No global state is modified. The function performs logging and constructs data structures only. | +| **How it fits the package** | It is the core preparatory routine for ICMP connectivity tests, called by `testNetworkConnectivity`. It translates pod information into a format consumable by the test runner. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over pods"} + B --> C{"Skip if net tests excluded?"} + C -- Yes --> D["Log skip"] + C -- No --> E{"Check interface type"} + E --> F["Multus case"] + F --> G["Process each multus network"] + E --> H["Default case"] + H --> I["Process default network"] + G --> J["Call processContainerIpsPerNet"] + I --> K["Call processContainerIpsPerNet"] + J & K --> L["Continue loop"] + L --> B + B --> M["Return netsUnderTest"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_BuildNetTestContext --> make + func_BuildNetTestContext --> logger.Info + func_BuildNetTestContext --> processContainerIpsPerNet +``` + +#### Functions calling `BuildNetTestContext` (Mermaid) + +```mermaid +graph TD + testNetworkConnectivity --> BuildNetTestContext +``` + +#### Usage example (Go) + +```go +// Minimal example invoking BuildNetTestContext +package main + +import ( + "log" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons" + "github.com/redhat-best-practices-for-k8s/certsuite/provider" +) + +func main() { + // Assume pods is populated elsewhere + var pods []*provider.Pod + + logger := log.Default() + ipVersion := netcommons.IPV4 + ifType := netcommons.DEFAULT + + netsUnderTest := icmp.BuildNetTestContext(pods, ipVersion, ifType, logger) + + // netsUnderTest can now be passed to the ICMP test runner. +} +``` + +--- + +### PingResults.String + +**String** - Produces a concise textual representation of ping test results, including outcome status and transmitted/received/error counts. + +The `String` method formats a `PingResults` value into a human‑readable string that reports the outcome and packet statistics. + +--- + +#### Signature (Go) + +```go +func (results PingResults) String() string +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a concise textual representation of ping test results, including outcome status and transmitted/received/error counts. | +| **Parameters** | `results` – the `PingResults` value on which the method is invoked (receiver). | +| **Return value** | A formatted string such as: *"outcome: SUCCESS transmitted: 10 received: 9 errors: 1"* . | +| **Key dependencies** | • `fmt.Sprintf` from the standard library.
• `testhelper.ResultToString` from `github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper`. | +| **Side effects** | None – purely functional, no state mutation or I/O. | +| **How it fits the package** | Provides a readable output for test logs and debugging within the ICMP testing suite. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph PrepareOutput + A["Call testhelper.ResultToString"] --> B["String outcome"] + end + C["Format with fmt.Sprintf"] --> D["Return formatted string"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_PingResults.String --> func_fmt.Sprintf + func_PingResults.String --> func_testhelper.ResultToString +``` + +--- + +#### Functions calling `PingResults.String` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking PingResults.String + +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp" +) + +func main() { + results := icmp.PingResults{ + outcome: icmp.SUCCESS, + transmitted: 10, + received: 9, + errors: 1, + } + fmt.Println(results.String()) +} +``` + +--- + +--- + +### RunNetworkingTests + +**RunNetworkingTests** - Runs ICMP ping tests for each network attachment in `netsUnderTest`. It reports compliant and non‑compliant results per container pair and decides whether to skip the overall test. + +#### Signature (Go) + +```go +func RunNetworkingTests( + netsUnderTest map[string]netcommons.NetTestContext, + count int, + aIPVersion netcommons.IPVersion, + logger *log.Logger, +) (report testhelper.FailureReasonOut, skip bool) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs ICMP ping tests for each network attachment in `netsUnderTest`. It reports compliant and non‑compliant results per container pair and decides whether to skip the overall test. | +| **Parameters** | - `netsUnderTest` (`map[string]netcommons.NetTestContext`) – one context per network.
- `count` (`int`) – number of ping packets to send.
- `aIPVersion` (`netcommons.IPVersion`) – IPv4 or IPv6 indicator.
- `logger` (`*log.Logger`) – logger for debug/info/error output. | +| **Return value** | - `report` (`testhelper.FailureReasonOut`) – contains slices of compliant and non‑compliant report objects.
- `skip` (`bool`) – true if no networks or containers were available to test. | +| **Key dependencies** | • `netcommons.PrintNetTestContextMap`
• `TestPing` (ping helper)
• `testhelper.NewContainerReportObject`, `NewReportObject`
• Logger methods (`Debug`, `Info`, `Error`) | +| **Side effects** | Emits log messages; constructs and returns report objects but does not modify external state. | +| **How it fits the package** | Core routine for the ICMP test suite, called by higher‑level networking checks to validate inter‑pod connectivity on each network attachment. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CheckEmpty["Is netsUnderTest empty?"] + CheckEmpty -- Yes --> Skip{"skip=true"} + CheckEmpty -- No --> Init{"initialize counters"} + Init --> Loop1{"for each network"} + Loop1 --> CheckTargets{"DestTargets empty?"} + CheckTargets -- Yes --> ContinueLoop + CheckTargets -- No --> PingLoop{"for each destIP"} + PingLoop --> TestPing["Call TestPing"] + TestPing --> Result{"Outcome success?"} + Result -- Success --> RecordComp["Increment compliant, create report object"] + Result -- Failure --> RecordNonComp["Increment non‑compliant, create report object"] + ContinueLoop --> EndLoop + EndLoop --> SummaryChecks{"Any non‑compliant?"} + SummaryChecks -- Yes --> AddNonCompliantReport + SummaryChecks -- No --> NextNetwork + NextNetwork --> Loop1 + Loop1 --> FinalCheck{"atLeastOneNetworkTested?"} + FinalCheck -- No --> Skip + FinalCheck -- Yes --> Return["Return report, skip=false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + RunNetworkingTests --> PrintNetTestContextMap + RunNetworkingTests --> TestPing + RunNetworkingTests --> NewContainerReportObject + RunNetworkingTests --> NewReportObject +``` + +#### Functions calling `RunNetworkingTests` (Mermaid) + +```mermaid +graph TD + testNetworkConnectivity --> RunNetworkingTests +``` + +#### Usage example (Go) + +```go +// Minimal example invoking RunNetworkingTests +import ( + "log" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + logger := log.New(os.Stdout, "", 0) + nets := map[string]netcommons.NetTestContext{ + "default": netcommons.BuildNetTestContext(...), // build a context + } + report, skip := icmp.RunNetworkingTests(nets, 5, netcommons.IPV4, logger) + + if skip { + fmt.Println("No networks to test.") + } else { + fmt.Printf("Compliant: %v\nNon‑compliant: %v\n", len(report.CompliantObjectsOut), len(report.NonCompliantObjectsOut)) + } +} +``` + +--- + +## Local Functions + +### parsePingResult + +**parsePingResult** - Interprets the standard output and error streams from a `ping` execution to populate a `PingResults` structure, determining whether the ping succeeded, failed, or encountered errors. + +#### Signature (Go) + +```go +func parsePingResult(stdout, stderr string) (results PingResults, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Interprets the standard output and error streams from a `ping` execution to populate a `PingResults` structure, determining whether the ping succeeded, failed, or encountered errors. | +| **Parameters** | *stdout* `string` – captured stdout;
*stderr* `string` – captured stderr | +| **Return value** | *results* `PingResults` – populated with transmitted, received, error counts and an outcome flag (`SUCCESS`, `FAILURE`, or `ERROR`).
*err* `error` – non‑nil if parsing fails or the output does not match expected patterns. | +| **Key dependencies** | • `regexp.MustCompile` (for two regexes)
• `regexp.FindStringSubmatch`
• `fmt.Errorf`
• `strconv.Atoi` | +| **Side effects** | No external I/O; only local variable manipulation and error generation. | +| **How it fits the package** | Utility function used by higher‑level test helpers to translate raw ping output into structured results for validation in networking tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Check invalid args"} + B -- match found --> C["Set outcome ERROR, return error"] + B -- no match --> D{"Find success regex"} + D -- not matched --> E["Set outcome FAILURE, return error"] + D -- matched --> F["Parse transmitted/received/errors"] + F --> G{"Determine outcome"} + G -- transmitted==0 or errors>0 --> H["Outcome ERROR"] + G -- received>0 and (transmitted-received)<=1 --> I["Outcome SUCCESS"] + G -- otherwise --> J["Outcome FAILURE"] + H --> K["Return results, nil"] + I --> K + J --> K +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_parsePingResult --> func_MustCompile + func_parsePingResult --> func_FindStringSubmatch + func_parsePingResult --> func_Errorf + func_parsePingResult --> func_Atoi +``` + +#### Functions calling `parsePingResult` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking parsePingResult +stdout := "PING 8.8.8.8: 56 data bytes\n64 bytes from 8.8.8.8: icmp_seq=0 ttl=117 time=10.2 ms\n--- 8.8.8.8 ping statistics ---\n1 packets transmitted, 1 received, 0% packet loss" +stderr := "" + +results, err := parsePingResult(stdout, stderr) +if err != nil { + fmt.Println("Parsing failed:", err) +} else { + fmt.Printf("Outcome: %v, Transmitted: %d, Received: %d, Errors: %d\n", + results.outcome, results.transmitted, results.received, results.errors) +} +``` + +--- + +### processContainerIpsPerNet + +**processContainerIpsPerNet** - Filters container IPs by the desired IP version, then populates or updates a `NetTestContext` entry for the given network key. The first qualifying IP is used as the test initiator (tester source), while the remaining IPs become ping destinations. + +#### 1) Signature (Go) + +```go +func processContainerIpsPerNet( + containerID *provider.Container, + netKey string, + ipAddresses []string, + ifName string, + netsUnderTest map[string]netcommons.NetTestContext, + aIPVersion netcommons.IPVersion, + logger *log.Logger) {} +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Filters container IPs by the desired IP version, then populates or updates a `NetTestContext` entry for the given network key. The first qualifying IP is used as the test initiator (tester source), while the remaining IPs become ping destinations. | +| **Parameters** | • `containerID *provider.Container` – container whose IPs are processed.
• `netKey string` – identifier of the network (e.g., “default” or a multus network name).
• `ipAddresses []string` – list of IP addresses attached to the container on that network.
• `ifName string` – interface name for this network, may be empty.
• `netsUnderTest map[string]netcommons.NetTestContext` – mutable map storing test contexts keyed by network.
• `aIPVersion netcommons.IPVersion` – target IP version (IPv4/IPv6).
• `logger *log.Logger` – logger for debug output. | +| **Return value** | None; the function mutates `netsUnderTest`. | +| **Key dependencies** | • `netcommons.FilterIPListByIPVersion` – filters IP list.
• Standard library: `len`, `append`.
• `log.Logger.Debug`. | +| **Side effects** | • Logs debug messages.
• Adds or updates entries in `netsUnderTest`; may set tester source and destination lists. | +| **How it fits the package** | Used by `icmp.BuildNetTestContext` to construct connectivity test data for each pod/container across all relevant networks before executing ping tests. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Filter IPs"] --> B{"Any IPs left?"} + B -- No --> C["Log skip, return"] + B -- Yes --> D["Ensure map entry exists"] + D --> E["Get current context"] + E --> F["Is tester source unset?"] + F -- Yes --> G["Set tester source (first IP)"] + F -- No --> H["Skip setting tester"] + G --> I["Increment index"] + H --> I + I --> J["Iterate remaining IPs"] + J --> K["Create dest entry per IP"] + K --> L["Append to DestTargets"] + L --> M["Reassign map entry"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_processContainerIpsPerNet --> func_FilterIPListByIPVersion + func_processContainerIpsPerNet --> len + func_processContainerIpsPerNet --> Debug + func_processContainerIpsPerNet --> append +``` + +#### 5) Functions calling `processContainerIpsPerNet` (Mermaid) + +```mermaid +graph TD + func_BuildNetTestContext --> func_processContainerIpsPerNet +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking processContainerIpsPerNet +package main + +import ( + "log" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/icmp" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons" +) + +func main() { + logger := log.New(os.Stdout, "", log.LstdFlags) + netsUnderTest := make(map[string]netcommons.NetTestContext) + + // Dummy container and IP data + container := &provider.Container{ID: "c1"} + ipList := []string{"10.0.0.5", "fe80::1"} + + icmp.processContainerIpsPerNet( + container, + "default", + ipList, + "", + netsUnderTest, + netcommons.IPV4, // or IPV6 + logger) +} +``` + +--- + +--- diff --git a/docs/tests/networking/netcommons/netcommons.md b/docs/tests/networking/netcommons/netcommons.md new file mode 100644 index 000000000..dd4b35ccf --- /dev/null +++ b/docs/tests/networking/netcommons/netcommons.md @@ -0,0 +1,874 @@ +# Package netcommons + +**Path**: `tests/networking/netcommons` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [ContainerIP](#containerip) + - [NetTestContext](#nettestcontext) +- [Exported Functions](#exported-functions) + - [ContainerIP.String](#containerip.string) + - [FilterIPListByIPVersion](#filteriplistbyipversion) + - [GetIPVersion](#getipversion) + - [IPVersion.String](#ipversion.string) + - [NetTestContext.String](#nettestcontext.string) + - [PodIPsToStringList](#podipstostringlist) + - [PrintNetTestContextMap](#printnettestcontextmap) + - [TestReservedPortsUsage](#testreservedportsusage) +- [Local Functions](#local-functions) + - [findRogueContainersDeclaringPorts](#findroguecontainersdeclaringports) + - [findRoguePodsListeningToPorts](#findroguepodslisteningtoports) + +## Overview + +The netcommons package supplies helper types and functions for networking tests in CertSuite. It handles IP parsing, filtering by IPv4/IPv6, building test contexts that describe source and destination containers, and checking pods for use of reserved ports. + +### Key Features + +- Detects whether a string is IPv4 or IPv6 via GetIPVersion and exposes an IPVersion enum with String formatting +- Filters slices of IP strings to retain only the desired version using FilterIPListByIPVersion +- Creates readable representations of ContainerIP and NetTestContext, and aggregates compliance reports for reserved port usage through TestReservedPortsUsage + +### Design Notes + +- GetIPVersion relies on net.ParseIP and treats any non‑IPv4 string as IPv6, returning an error for invalid addresses; callers should handle the error +- The package uses exported structs (ContainerIP, NetTestContext) but keeps many helper functions unexported to avoid polluting the public API +- When building a test context, callers should first convert pod IPs with PodIPsToStringList and then use BuildNetTestContext (not shown in the JSON) before printing or running tests + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**ContainerIP**](#containerip) | One-line purpose | +| [**NetTestContext**](#nettestcontext) | Describes a network test scenario for a specific subnet | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func (cip *ContainerIP) String() string](#containerip.string) | Produces a human‑readable representation of a `ContainerIP`, combining its IP address with the long form of its container identifier. | +| [func FilterIPListByIPVersion(ipList []string, aIPVersion IPVersion) []string](#filteriplistbyipversion) | Produces a new slice containing only those IP addresses from `ipList` that match the specified `aIPVersion` (IPv4 or IPv6). | +| [func GetIPVersion(aIP string) (IPVersion, error)](#getipversion) | Determines whether a given IP address string is IPv4 or IPv6, returning an `IPVersion` value. | +| [func (version IPVersion) String() string](#ipversion.string) | Converts an `IPVersion` enum value into its corresponding string representation (`IPv4`, `IPv6`, `IPv4v6`, or `Undefined`). | +| [func (testContext *NetTestContext) String() string](#nettestcontext.string) | Generates a human‑readable representation of a `NetTestContext`, detailing the initiating container and all target containers to be tested. | +| [func PodIPsToStringList(ips []corev1.PodIP) (ipList []string)](#podipstostringlist) | Transforms a list of `corev1.PodIP` values into their raw IP string representations. | +| [func PrintNetTestContextMap(netsUnderTest map[string]NetTestContext) string](#printnettestcontextmap) | Produces a human‑readable string summarizing each network’s test context, including source and destination containers. | +| [func TestReservedPortsUsage(env *provider.TestEnvironment, reservedPorts map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject)](#testreservedportsusage) | Aggregates compliance reports for pods that listen on or declare any reserved port. It delegates the core check to `findRoguePodsListeningToPorts` and then returns the collected results. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func findRogueContainersDeclaringPorts(containers []*provider.Container, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject)](#findroguecontainersdeclaringports) | Scans each container’s port list and flags those that declare any port present in `portsToTest`. Containers declaring such ports are marked non‑compliant; all others are compliant. | +| [func findRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject)](#findroguepodslisteningtoports) | Scans a list of pods to determine if any container declares or listens on ports that are reserved. Generates compliance reports for each pod. | + +## Structs + +### ContainerIP + +A lightweight representation of a container’s network identity, combining its IP address with a full container identifier and optional interface name. + +#### Fields + +| Field | Type | Description | +|---------------------|----------------------|-------------| +| `IP` | `string` | The IPv4 or IPv6 address assigned to the container. No validation is performed in this struct; callers must ensure it is a valid IP string. | +| `ContainerIdentifier` | `*provider.Container` | A pointer to a `provider.Container` value that encapsulates namespace, pod name, container name, node name, and UID for uniquely identifying the target container. | +| `InterfaceName` | `string` | The specific network interface inside the container that should be used for ping tests (e.g., `"eth0"`). An empty string indicates the default interface. | + +#### Purpose + +The `ContainerIP` struct aggregates all information required to perform networking tests against a specific container in a Kubernetes environment. By bundling the IP, container identity, and desired network interface, test code can reference this single object when executing ping or connectivity checks, logging results, or correlating metrics. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `ContainerIP.String` | Returns a human‑readable string that includes the IP address and the long form of the container identifier, aiding debugging and log output. | + +--- + +### NetTestContext + +| Field | Type | Description | +|-------|------|-------------| +| `TesterContainerNodeName` | `string` | Name of the node where the initiating container runs. | +| `TesterSource` | `ContainerIP` | The container chosen (typically first in list) that will send ping traffic on this subnet. | +| `DestTargets` | `[]ContainerIP` | IP addresses of all other containers on the same subnet that should be pinged by `TesterSource`. | + +#### Purpose + +`NetTestContext` encapsulates everything needed to perform connectivity tests within a single network attachment (subnet). It identifies the node and container that will initiate traffic (`TesterSource`) and lists every target container’s IP address (`DestTargets`) that must be reachable from it. This struct is used by test runners to generate ping commands, validate reachability, and report results per subnet. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NetTestContext.String` | Returns a human‑readable summary of the tester source and its destination targets for logging or debugging. | +| `PrintNetTestContextMap` | Builds a formatted string listing all network test contexts in a map, useful for printing test configuration summaries. | + +--- + +--- + +## Exported Functions + +### ContainerIP.String + +**String** - Produces a human‑readable representation of a `ContainerIP`, combining its IP address with the long form of its container identifier. + +#### Signature (Go) + +```go +func (cip *ContainerIP) String() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a human‑readable representation of a `ContainerIP`, combining its IP address with the long form of its container identifier. | +| **Parameters** | *None* (receiver only) – `cip` is a pointer to the `ContainerIP` instance. | +| **Return value** | A formatted string in the form: ` ( )`. | +| **Key dependencies** | • `fmt.Sprintf` from the standard library
• `ContainerIdentifier.StringLong()` method | +| **Side effects** | None – purely functional; no state mutation or I/O. | +| **How it fits the package** | Provides a convenient string conversion for logging, debugging, and display of networking test data within the `netcommons` testing utilities. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["ContainerIP.String"] --> B["fmt.Sprintf"] + B --> C[""] + B --> D["ContainerIdentifier.StringLong()"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_ContainerIP.String --> fmt.Sprintf + func_ContainerIP.String --> func_StringLong +``` + +#### Functions calling `ContainerIP.String` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ContainerIP.String + +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons" +) + +func main() { + cip := &netcommons.ContainerIP{ + IP: "192.168.1.10", + ContainerIdentifier: netcommons.ContainerIdentifier{ /* fields */ }, + } + fmt.Println(cip.String()) +} +``` + +--- + +### FilterIPListByIPVersion + +**FilterIPListByIPVersion** - Produces a new slice containing only those IP addresses from `ipList` that match the specified `aIPVersion` (IPv4 or IPv6). + +```go +func FilterIPListByIPVersion(ipList []string, aIPVersion IPVersion) []string +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a new slice containing only those IP addresses from `ipList` that match the specified `aIPVersion` (IPv4 or IPv6). | +| **Parameters** | `ipList []string` – list of IP address strings; `aIPVersion IPVersion` – target version to filter on. | +| **Return value** | `[]string` – a slice with only addresses of the requested IP version, preserving original order. | +| **Key dependencies** | • Calls `GetIPVersion(aIP)` to determine each address’s type.
• Uses Go’s built‑in `append` to build the result list. | +| **Side effects** | None – pure function; does not modify input slice or global state. | +| **How it fits the package** | Supports higher‑level networking tests by separating IPv4 and IPv6 addresses for targeted operations (e.g., ping, traceroute). | + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over ipList"} + B --> C["Call GetIPVersion(aIP)"] + C --> D{"Match aIPVersion?"} + D -- Yes --> E["Append to filteredIPList"] + D -- No --> F["Skip"] + E --> G["Continue loop"] + F --> G + G --> H{"End of list?"} + H -- Yes --> I["Return filteredIPList"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_FilterIPListByIPVersion --> func_GetIPVersion + func_FilterIPListByIPVersion --> append +``` + +#### Functions calling `FilterIPListByIPVersion` + +```mermaid +graph TD + func_processContainerIpsPerNet --> func_FilterIPListByIPVersion +``` + +#### Usage example + +```go +// Minimal example invoking FilterIPListByIPVersion +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons" +) + +func main() { + addresses := []string{ + "192.168.1.10", + "fe80::1ff:fe23:4567:890a", + "10.0.0.5", + } + ipv6s := netcommons.FilterIPListByIPVersion(addresses, netcommons.IPv6) + fmt.Println("IPv6 addresses:", ipv6s) // Output: IPv6 addresses: [fe80::1ff:fe23:4567:890a] +} +``` + +--- + +### GetIPVersion + +**GetIPVersion** - Determines whether a given IP address string is IPv4 or IPv6, returning an `IPVersion` value. + +#### Signature (Go) + +```go +func GetIPVersion(aIP string) (IPVersion, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether a given IP address string is IPv4 or IPv6, returning an `IPVersion` value. | +| **Parameters** | `aIP string –` the IP address to analyze | +| **Return value** | `IPVersion –` one of `IPv4`, `IPv6`, or `Undefined`; `error –` non‑nil if the string is not a valid IP | +| **Key dependencies** | • `net.ParseIP` (package `net`)
• `fmt.Errorf` (package `fmt`)
• `ip.To4()` method (standard library) | +| **Side effects** | None – pure function; no state mutation or I/O. | +| **How it fits the package** | Core utility used by higher‑level networking helpers to filter and validate IP addresses across services. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Input string aIP"] --> B{"Is net.ParseIP(aIP) nil?"} + B -- No --> C["Return Undefined, error"] + B -- Yes --> D{"ip.To4() != nil?"} + D -- Yes --> E["Return IPv4, nil"] + D -- No --> F["Return IPv6, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetIPVersion --> net_ParseIP + func_GetIPVersion --> fmt_Errorf + func_GetIPVersion --> ip_To4 +``` + +#### Functions calling `GetIPVersion` (Mermaid) + +```mermaid +graph TD + FilterIPListByIPVersion --> GetIPVersion + GetServiceIPVersion --> GetIPVersion + isClusterIPsDualStack --> GetIPVersion +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetIPVersion +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons" +) + +func main() { + ip := "192.168.1.1" + version, err := netcommons.GetIPVersion(ip) + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + fmt.Printf("The IP %s is of version: %v\n", ip, version) +} +``` + +--- + +### IPVersion.String + +**String** - Converts an `IPVersion` enum value into its corresponding string representation (`IPv4`, `IPv6`, `IPv4v6`, or `Undefined`). + +#### Signature (Go) + +```go +func (version IPVersion) String() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Converts an `IPVersion` enum value into its corresponding string representation (`IPv4`, `IPv6`, `IPv4v6`, or `Undefined`). | +| **Parameters** | *receiver* – `version IPVersion`: the enum instance to stringify. | +| **Return value** | `string`: a constant such as `"ipv4"`, `"ipv6"`, `"ipv4/ipv6"`, or `"undefined"` depending on the receiver’s value. | +| **Key dependencies** | Uses package‑level constants: `IPv4String`, `IPv6String`, `IPv4v6String`, `UndefinedString`. | +| **Side effects** | None – purely functional; no state mutation, I/O, or concurrency involved. | +| **How it fits the package** | Provides a human‑readable form of the internal IP version representation, useful for logging, error messages, and configuration output within the `netcommons` package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph Switch["Switch on `version`"] + caseIPv4["case IPv4\nreturn IPv4String"] + caseIPv6["case IPv6\nreturn IPv6String"] + caseIPv4v6["case IPv4v6\nreturn IPv4v6String"] + caseUndefined["case Undefined\nreturn UndefinedString"] + end + Switch --> Return["Default return: UndefinedString"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `IPVersion.String` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking IPVersion.String +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons" +) + +func main() { + var v netcommons.IPVersion = netcommons.IPv4 + fmt.Println(v.String()) // Output: ipv4 +} +``` + +--- + +### NetTestContext.String + +**String** - Generates a human‑readable representation of a `NetTestContext`, detailing the initiating container and all target containers to be tested. + +```go +func (testContext *NetTestContext) String() string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Generates a human‑readable representation of a `NetTestContext`, detailing the initiating container and all target containers to be tested. | +| **Parameters** | `testContext *NetTestContext` – receiver pointing to the context being formatted. | +| **Return value** | `string` – multiline description of the network test scenario. | +| **Key dependencies** | • `strings.Builder`
• `fmt.Sprintf`
• `WriteString` on the builder
• `len` function for slice length check
• Calls to the `String()` method of nested container objects (`TesterSource.String()`, each target’s `String()`). | +| **Side effects** | None. The function purely constructs and returns a string; it does not modify external state or perform I/O. | +| **How it fits the package** | Provides a convenient, standardized way to log or display test contexts within the networking test suite (`netcommons`). It is used by helpers such as `PrintNetTestContextMap` to produce comprehensive reports. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + sb["Builder"] -->|"WriteString(fmt.Sprintf(...))"| initLabel["From initiating container: ..."] + sb -->|"If DestTargets empty?"| noTargets["--\u003e No target containers..."] + sb -->|"For each target"| tgtLoop["Target Loop"] + tgtLoop -->|"WriteString(fmt.Sprintf(...))"| tgtLabel["To target container: ..."] + sb -->|"Return sb.String()"| result["String"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_NetTestContext.String --> fmt.Sprintf + func_NetTestContext.String --> strings.Builder.WriteString + func_NetTestContext.String --> len + func_NetTestContext.String --> testerSource.String + func_NetTestContext.String --> target.String +``` + +#### Functions calling `NetTestContext.String` (Mermaid) + +```mermaid +graph TD + PrintNetTestContextMap --> func_NetTestContext.String +``` + +> Note: The function itself is also listed as a caller in the supplied data, but this refers to its use within its own source for illustrative purposes. + +#### Usage example (Go) + +```go +// Minimal example invoking NetTestContext.String +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons" +) + +func main() { + // Assume we have a populated NetTestContext instance `ctx` + var ctx netcommons.NetTestContext + // Populate fields as needed... + fmt.Println(ctx.String()) +} +``` + +--- + +--- + +### PodIPsToStringList + +**PodIPsToStringList** - Transforms a list of `corev1.PodIP` values into their raw IP string representations. + +#### Signature (Go) + +```go +func PodIPsToStringList(ips []corev1.PodIP) (ipList []string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Transforms a list of `corev1.PodIP` values into their raw IP string representations. | +| **Parameters** | `ips []corev1.PodIP` – slice containing PodIP objects retrieved from a pod’s status. | +| **Return value** | `ipList []string` – slice of IP address strings extracted from the input. | +| **Key dependencies** | * Calls built‑in `append`. | +| **Side effects** | None; purely functional and thread‑safe. | +| **How it fits the package** | Provides a helper for networking tests to obtain plain string IPs when building network context objects. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over ips"} + B --> C["Append ip.IP to ipList"] + C --> D["Return ipList"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_PodIPsToStringList --> func_append +``` + +#### Functions calling `PodIPsToStringList` (Mermaid) + +```mermaid +graph TD + func_BuildNetTestContext --> func_PodIPsToStringList +``` + +#### Usage example (Go) + +```go +// Minimal example invoking PodIPsToStringList +package main + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + netcommons "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons" +) + +func main() { + podIPs := []corev1.PodIP{ + {IP: "10.0.0.5"}, + {IP: "10.0.0.6"}, + } + ipStrings := netcommons.PodIPsToStringList(podIPs) + fmt.Println(ipStrings) // Output: [10.0.0.5 10.0.0.6] +} +``` + +--- + +### PrintNetTestContextMap + +**PrintNetTestContextMap** - Produces a human‑readable string summarizing each network’s test context, including source and destination containers. + +#### Signature (Go) + +```go +func PrintNetTestContextMap(netsUnderTest map[string]NetTestContext) string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a human‑readable string summarizing each network’s test context, including source and destination containers. | +| **Parameters** | `netsUnderTest` – mapping of network names to their corresponding `NetTestContext`. | +| **Return value** | A single concatenated string containing the formatted representation of all networks. | +| **Key dependencies** | - `strings.Builder` for efficient string construction.
- `fmt.Sprintf` for formatting each line.
- Method `(*NetTestContext).String()` to get per‑network details. | +| **Side effects** | None; purely functional, no I/O or state mutation outside the returned string. | +| **How it fits the package** | Serves as a helper for logging and debugging network test setups within the `netcommons` package. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Is map empty?"} + B -- Yes --> C["Write No networks to test."] + B -- No --> D["Loop over netsUnderTest"] + D --> E["Append ***Test for Network attachment: "] + E --> F["Append netUnderTest.String()"] + F --> G["End loop"] + G --> H["Return built string"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_PrintNetTestContextMap --> strings.Builder + func_PrintNetTestContextMap --> fmt.Sprintf + func_PrintNetTestContextMap --> NetTestContext.String +``` + +#### Functions calling `PrintNetTestContextMap` + +```mermaid +graph TD + RunNetworkingTests --> PrintNetTestContextMap +``` + +#### Usage example (Go) + +```go +// Minimal example invoking PrintNetTestContextMap +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons" +) + +func main() { + // Example map with dummy data + nets := map[string]netcommons.NetTestContext{ + "net1": netcommons.NetTestContext{/* fields omitted */}, + "net2": netcommons.NetTestContext{/* fields omitted */}, + } + + output := netcommons.PrintNetTestContextMap(nets) + println(output) // prints formatted network test context +} +``` + +--- + +### TestReservedPortsUsage + +**TestReservedPortsUsage** - Aggregates compliance reports for pods that listen on or declare any reserved port. It delegates the core check to `findRoguePodsListeningToPorts` and then returns the collected results. + +#### Signature (Go) + +```go +func TestReservedPortsUsage(env *provider.TestEnvironment, reservedPorts map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Aggregates compliance reports for pods that listen on or declare any reserved port. It delegates the core check to `findRoguePodsListeningToPorts` and then returns the collected results. | +| **Parameters** | `env *provider.TestEnvironment` – Test environment containing pod information.
`reservedPorts map[int32]bool` – Map of port numbers that are considered reserved.
`portsOrigin string` – Human‑readable label for the source of these ports (e.g., “OCP”, “Partner”).
`logger *log.Logger` – Logger used for debug and error output. | +| **Return value** | Two slices of `*testhelper.ReportObject`:
  • `compliantObjects` – pods that do not violate the reservation rule.
  • `nonCompliantObjects` – pods that listen on or declare a reserved port.
| +| **Key dependencies** | • Calls `findRoguePodsListeningToPorts` to perform per‑pod analysis.
• Uses `append` from Go’s standard library. | +| **Side effects** | None beyond logging; it does not modify the environment or any global state. | +| **How it fits the package** | Provides a reusable helper for higher‑level tests that need to verify port usage compliance across different environments (e.g., OpenShift, partner‑specific ports). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Call findRoguePodsListeningToPorts"} + B --> C["Receive compliantEntries & nonCompliantEntries"] + C --> D{"Append to result slices"} + D --> E["Return compliantObjects, nonCompliantObjects"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_TestReservedPortsUsage --> func_findRoguePodsListeningToPorts +``` + +#### Functions calling `TestReservedPortsUsage` (Mermaid) + +```mermaid +graph TD + func_testOCPReservedPortsUsage --> func_TestReservedPortsUsage + func_testPartnerSpecificTCPPorts --> func_TestReservedPortsUsage +``` + +#### Usage example (Go) + +```go +// Minimal example invoking TestReservedPortsUsage +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/provider" +) + +func main() { + env := &provider.TestEnvironment{ + // Populate with relevant pods for the test + } + reservedPorts := map[int32]bool{22623: true, 22624: true} + logger := log.Default() + + compliant, nonCompliant := netcommons.TestReservedPortsUsage(env, reservedPorts, "OCP", logger) + + fmt.Printf("Compliant pods: %d\nNon‑compliant pods: %d\n", len(compliant), len(nonCompliant)) +} +``` + +--- + +## Local Functions + +### findRogueContainersDeclaringPorts + +**findRogueContainersDeclaringPorts** - Scans each container’s port list and flags those that declare any port present in `portsToTest`. Containers declaring such ports are marked non‑compliant; all others are compliant. + +#### Signature (Go) + +```go +func findRogueContainersDeclaringPorts(containers []*provider.Container, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Scans each container’s port list and flags those that declare any port present in `portsToTest`. Containers declaring such ports are marked non‑compliant; all others are compliant. | +| **Parameters** | `containers []*provider.Container` – slice of containers to inspect.
`portsToTest map[int32]bool` – set of reserved port numbers to check against.
`portsOrigin string` – label describing the source of the reserved ports (e.g., “Istio” or “Kubernetes”).
`logger *log.Logger` – logger used for tracing and error reporting. | +| **Return value** | Two slices of `*testhelper.ReportObject`: compliantObjects and nonCompliantObjects, each populated with a report per container. | +| **Key dependencies** | • `logger.Info`, `logger.Error`
• `fmt.Sprintf`, `strconv.Itoa`
• `testhelper.NewContainerReportObject`, `SetType`, `AddField` | +| **Side effects** | No global state changes; only log output and creation of report objects. | +| **How it fits the package** | Utility for the networking compliance checks that evaluates whether containers expose ports that are reserved or otherwise disallowed. It is invoked by higher‑level pod‑level functions to aggregate per‑pod results. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> IterateContainers["Iterate over each container"] + IterateContainers --> InspectPorts["Inspect containers ports"] + InspectPorts --> CheckPort{"Is port in portsToTest?"} + CheckPort -- Yes --> LogError & AppendNonCompliant["Log error, append non‑compliant report"] + CheckPort -- No --> LogInfo & AppendCompliant["Log info, append compliant report"] + AppendNonCompliant --> End + AppendCompliant --> End +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_findRogueContainersDeclaringPorts --> logger.Info + func_findRogueContainersDeclaringPorts --> logger.Error + func_findRogueContainersDeclaringPorts --> fmt.Sprintf + func_findRogueContainersDeclaringPorts --> strconv.Itoa + func_findRogueContainersDeclaringPorts --> testhelper.NewContainerReportObject + func_findRogueContainersDeclaringPorts --> testhelper.SetType + func_findRogueContainersDeclaringPorts --> testhelper.AddField +``` + +#### Functions calling `findRogueContainersDeclaringPorts` (Mermaid) + +```mermaid +graph TD + func_findRoguePodsListeningToPorts --> func_findRogueContainersDeclaringPorts +``` + +#### Usage example (Go) + +```go +// Minimal example invoking findRogueContainersDeclaringPorts +package main + +import ( + "log" + + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons" +) + +func main() { + logger := log.Default() + + // Example containers (normally obtained from Kubernetes API) + containers := []*provider.Container{ + { + Name: "app", + Namespace: "default", + Podname: "mypod", + Ports: []provider.Port{ + {ContainerPort: 8080, Protocol: provider.ProtocolTCP}, + {ContainerPort: 443, Protocol: provider.ProtocolTCP}, + }, + }, + } + + // Ports considered reserved for the test + reserved := map[int32]bool{443: true, 8443: true} + + compliant, nonCompliant := netcommons.FindRogueContainersDeclaringPorts( + containers, + reserved, + "Istio", + logger, + ) + + fmt.Println("Compliant:", compliant) + fmt.Println("Non‑compliant:", nonCompliant) +} +``` + +*Note: In the actual package the function is unexported; the example uses an exported wrapper `FindRogueContainersDeclaringPorts` for illustration.* + +--- + +### findRoguePodsListeningToPorts + +**findRoguePodsListeningToPorts** - Scans a list of pods to determine if any container declares or listens on ports that are reserved. Generates compliance reports for each pod. + +#### Signature (Go) + +```go +func findRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Scans a list of pods to determine if any container declares or listens on ports that are reserved. Generates compliance reports for each pod. | +| **Parameters** | `pods []*provider.Pod` – collection of pods to inspect.
`portsToTest map[int32]bool` – set of port numbers considered reserved.
`portsOrigin string` – label describing the source of the reserved ports (e.g., “K8s Reserved”).
`logger *log.Logger` – logger for diagnostic output. | +| **Return value** | Two slices of `*testhelper.ReportObject`: one for compliant pods, one for non‑compliant pods. | +| **Key dependencies** | • `findRogueContainersDeclaringPorts` (container‑level port declaration check)
• `netutil.GetListeningPorts` (retrieve listening ports per container)
• Logging helpers (`Info`, `Error`) and report constructors (`NewPodReportObject`). | +| **Side effects** | Emits log messages; creates new report objects; no global state mutation. | +| **How it fits the package** | Provides core logic for the *Reserved Ports* test in `netcommons`. It is invoked by higher‑level test functions to aggregate compliance results across a cluster. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate pods"} + B -->|"for each pod"| C["Check containers for declared ports"] + C --> D["Collect non‑compliant entries"] + D --> E["Get listening ports of first container"] + E --> F{"Error?"} + F -- yes --> G["Log error & add non‑compliant report"] + F -- no --> H{"Iterate listening ports"} + H -->|"for each port"| I{"Port reserved?"} + I -- yes --> J{"Istio proxy present && ReservedIstioPorts?"} + J -- true --> K["Ignore (Istio) and continue"] + J -- false --> L["Log error & add non‑compliant report"] + I -- no --> M["Add compliant report for listening port"] + H --> N{"Any non‑compliant port found?"} + N -- yes --> O["Add overall pod non‑compliant report"] + N -- no --> P["Add overall pod compliant report"] + O --> Q["End"] + P --> Q +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_findRoguePodsListeningToPorts --> func_findRogueContainersDeclaringPorts + func_findRoguePodsListeningToPorts --> netutil.GetListeningPorts + func_findRoguePodsListeningToPorts --> testhelper.NewPodReportObject +``` + +#### Functions calling `findRoguePodsListeningToPorts` (Mermaid) + +```mermaid +graph TD + TestReservedPortsUsage --> func_findRoguePodsListeningToPorts +``` + +#### Usage example (Go) + +```go +// Minimal example invoking findRoguePodsListeningToPorts + +import ( + "log" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netcommons/provider" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func example() { + pods := []*provider.Pod{ /* populate with Pod objects */ } + reservedPorts := map[int32]bool{22: true, 443: true} + origin := "K8s Reserved" + + logger := log.New(os.Stdout, "", log.LstdFlags) + compliant, nonCompliant := findRoguePodsListeningToPorts(pods, reservedPorts, origin, logger) + + // Handle reports + for _, r := range compliant { + fmt.Println("Compliant:", r.Reason) + } + for _, r := range nonCompliant { + fmt.Println("Non‑compliant:", r.Reason) + } +} +``` + +--- diff --git a/docs/tests/networking/netutil/netutil.md b/docs/tests/networking/netutil/netutil.md new file mode 100644 index 000000000..4677d18a2 --- /dev/null +++ b/docs/tests/networking/netutil/netutil.md @@ -0,0 +1,305 @@ +# Package netutil + +**Path**: `tests/networking/netutil` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [PortInfo](#portinfo) +- [Exported Functions](#exported-functions) + - [GetListeningPorts](#getlisteningports) + - [GetSSHDaemonPort](#getsshdaemonport) +- [Local Functions](#local-functions) + - [parseListeningPorts](#parselisteningports) + +## Overview + +The netutil package supplies helper functions for inspecting the network state of containers in a Kubernetes environment, primarily by executing `nsenter` commands to list listening sockets and extracting specific service ports such as SSH. + +### Key Features + +- Retrieves all active listening ports inside a container and returns them as a map of PortInfo objects +- Parses raw command output into structured port information (protocol, port number) +- Provides a dedicated helper to locate the SSH daemon’s listening port within a container + +### Design Notes + +- Uses `nsenter` via an external client to run commands inside container namespaces; assumes the target container has access to the required binaries +- Parsing logic is strict and expects a specific output format; malformed lines result in an error rather than silent failure +- Typical usage involves calling GetListeningPorts first, then inspecting the returned map for desired ports or protocols + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**PortInfo**](#portinfo) | One-line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GetListeningPorts(cut *provider.Container) (map[PortInfo]bool, error)](#getlisteningports) | Executes an `nsenter` command to list active network sockets in a container and parses the output into a map of `PortInfo`. | +| [func GetSSHDaemonPort(cut *provider.Container) (string, error)](#getsshdaemonport) | Executes a shell pipeline inside the target container to locate the `sshd` process and extract its listening port number. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func parseListeningPorts(string) (map[PortInfo]bool, error)](#parselisteningports) | Converts the raw output of a network‐listing command into a set of `PortInfo` objects representing actively listening ports. | + +## Structs + +### PortInfo + +A lightweight value type that identifies a network port by its numeric identifier and transport protocol. + +#### Fields + +| Field | Type | Description | +|------------|--------|-------------| +| `PortNumber` | `int32` | The integer port number (e.g., 80, 443). | +| `Protocol` | `string` | Transport protocol in uppercase (commonly `"TCP"` or `"UDP"`). | + +#### Purpose + +`PortInfo` is used to represent an individual listening port discovered within a container. It serves as the key in maps that track which ports are active, allowing functions like `GetListeningPorts` to return a set of all listening sockets. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `GetListeningPorts` | Executes a command inside a container to list network sockets and returns a map keyed by `PortInfo`. | +| `parseListeningPorts` | Parses the command output, constructs `PortInfo` instances for each listening socket, and populates a map of them. | + +--- + +--- + +## Exported Functions + +### GetListeningPorts + +**GetListeningPorts** - Executes an `nsenter` command to list active network sockets in a container and parses the output into a map of `PortInfo`. + +#### Signature (Go) + +```go +func GetListeningPorts(cut *provider.Container) (map[PortInfo]bool, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes an `nsenter` command to list active network sockets in a container and parses the output into a map of `PortInfo`. | +| **Parameters** | `cut *provider.Container` – the target container whose listening ports are queried. | +| **Return value** | `map[PortInfo]bool` – set of listening ports; `error` – any failure during command execution or parsing. | +| **Key dependencies** | • `crclient.ExecCommandContainerNSEnter(getListeningPortsCmd, cut)`
• `parseListeningPorts(outStr)` | +| **Side effects** | None beyond network command invocation and error reporting. | +| **How it fits the package** | Supports networking checks by providing runtime port information used in tests such as SSH daemon detection and undeclared port verification. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Run nsenter command"] + B --> C{"Command succeeded?"} + C -- Yes --> D["Parse output with parseListeningPorts"] + C -- No --> E["Return error"] + D --> F["Return port map"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetListeningPorts --> func_ExecCommandContainerNSEnter + func_GetListeningPorts --> func_parseListeningPorts +``` + +#### Functions calling `GetListeningPorts` (Mermaid) + +```mermaid +graph TD + func_testNoSSHDaemonsAllowed --> func_GetListeningPorts + func_testUndeclaredContainerPortsUsage --> func_GetListeningPorts + func_findRoguePodsListeningToPorts --> func_GetListeningPorts +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetListeningPorts +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func main() { + // Assume we have a provider.Container instance named container + var container *provider.Container + + ports, err := netutil.GetListeningPorts(container) + if err != nil { + log.Fatalf("Failed to get listening ports: %v", err) + } + + for p := range ports { + fmt.Printf("Port %d/%s is listening\n", p.PortNumber, p.Protocol) + } +} +``` + +--- + +### GetSSHDaemonPort + +**GetSSHDaemonPort** - Executes a shell pipeline inside the target container to locate the `sshd` process and extract its listening port number. + +#### Signature (Go) + +```go +func GetSSHDaemonPort(cut *provider.Container) (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes a shell pipeline inside the target container to locate the `sshd` process and extract its listening port number. | +| **Parameters** | `cut *provider.Container` – reference to the container in which to search for an SSH daemon. | +| **Return value** | `` – the port number as a trimmed string; empty if no SSH daemon is found or an error occurs. | +| **Key dependencies** | • `crclient.ExecCommandContainerNSEnter` – runs a command via nsenter in the container.
• `fmt.Errorf`, `strings.TrimSpace`. | +| **Side effects** | None beyond network I/O: executes a remote shell command; no state mutation. | +| **How it fits the package** | Utility for networking checks that need to determine whether an SSH service is running inside a pod, used by higher‑level tests such as access control compliance. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Build command"} + B --> C["Find SSH daemon listening port"] + C --> D["Execute via nsenter"] + D --> E{"Success?"} + E -- Yes --> F["Trim output, return port"] + E -- No --> G["Return error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetSSHDaemonPort --> func_ExecCommandContainerNSEnter + func_GetSSHDaemonPort --> func_Errorf + func_GetSSHDaemonPort --> func_TrimSpace +``` + +#### Functions calling `GetSSHDaemonPort` (Mermaid) + +```mermaid +graph TD + func_testNoSSHDaemonsAllowed --> func_GetSSHDaemonPort +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetSSHDaemonPort +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/netutil" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func example(container *provider.Container) { + port, err := netutil.GetSSHDaemonPort(container) + if err != nil { + fmt.Printf("Error: %v\n", err) + return + } + if port == "" { + fmt.Println("No SSH daemon found in container") + } else { + fmt.Printf("SSH daemon listening on port %s\n", port) + } +} +``` + +--- + +## Local Functions + +### parseListeningPorts + +**parseListeningPorts** - Converts the raw output of a network‐listing command into a set of `PortInfo` objects representing actively listening ports. + +#### Signature (Go) + +```go +func parseListeningPorts(string) (map[PortInfo]bool, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Converts the raw output of a network‐listing command into a set of `PortInfo` objects representing actively listening ports. | +| **Parameters** | `cmdOut string –` the multiline text returned by the external command. | +| **Return value** | `map[PortInfo]bool` – a map where each key is a unique port/protocol pair; the boolean value is always `true`. `error` – non‑nil if any parsing step fails. | +| **Key dependencies** | • `strings.TrimSuffix`, `strings.Split`, `strings.Fields`, `strconv.ParseInt`, `fmt.Errorf`, `strings.ToUpper`, type conversion to `int32`. | +| **Side effects** | None; operates purely on its input and returns new data structures. | +| **How it fits the package** | Internally used by `GetListeningPorts` to interpret command output into a programmatic representation of listening sockets. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Trim trailing newline"] --> B["Split output into lines"] + B --> C{"For each line"} + C -->|"valid line"| D["Extract fields"] + D --> E{"Check state == LISTEN"} + E -->|"yes"| F["Parse port number"] + F --> G{"Error?"} + G -->|"no"| H["Convert protocol to upper case"] + H --> I["Create PortInfo"] + I --> J["Add to map"] + G -->|"yes"| K["Return error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_parseListeningPorts --> func_make + func_parseListeningPorts --> strings.TrimSuffix + func_parseListeningPorts --> strings.Split + func_parseListeningPorts --> strings.Fields + func_parseListeningPorts --> len + func_parseListeningPorts --> strconv.ParseInt + func_parseListeningPorts --> fmt.Errorf + func_parseListeningPorts --> strings.ToUpper + func_parseListeningPorts --> int32 +``` + +#### Functions calling `parseListeningPorts` (Mermaid) + +```mermaid +graph TD + GetListeningPorts --> parseListeningPorts +``` + +#### Usage example (Go) + +```go +// Minimal example invoking parseListeningPorts +output := "tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN\n" +ports, err := parseListeningPorts(output) +if err != nil { + log.Fatalf("parsing failed: %v", err) +} +for p := range ports { + fmt.Printf("Port %d/%s is listening\n", p.Port, p.Protocol) +} +``` + +--- diff --git a/docs/tests/networking/networking.md b/docs/tests/networking/networking.md new file mode 100644 index 000000000..abbe004c7 --- /dev/null +++ b/docs/tests/networking/networking.md @@ -0,0 +1,781 @@ +# Package networking + +**Path**: `tests/networking` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [LoadChecks](#loadchecks) +- [Local Functions](#local-functions) + - [testDualStackServices](#testdualstackservices) + - [testExecProbDenyAtCPUPinning](#testexecprobdenyatcpupinning) + - [testNetworkAttachmentDefinitionSRIOVUsingMTU](#testnetworkattachmentdefinitionsriovusingmtu) + - [testNetworkConnectivity](#testnetworkconnectivity) + - [testNetworkPolicyDenyAll](#testnetworkpolicydenyall) + - [testOCPReservedPortsUsage](#testocpreservedportsusage) + - [testPartnerSpecificTCPPorts](#testpartnerspecifictcpports) + - [testRestartOnRebootLabelOnPodsUsingSriov](#testrestartonrebootlabelonpodsusingsriov) + - [testUndeclaredContainerPortsUsage](#testundeclaredcontainerportsusage) + +## Overview + +Provides a suite of network‑related tests for the CertSuite framework, registering checks that validate pod networking policies, port usage, SR‑IOV configuration, service IP support, and ICMP connectivity. + +### Key Features + +- Registers multiple test cases with optional skip conditions and custom check functions +- Executes ICMP ping tests across IPv4/IPv6 and interface types +- Verifies compliance of pods with network policies, reserved ports, SR‑I/O‑V MTU, and CPU‑pinning exec probe rules + +### Design Notes + +- Test registration is performed via LoadChecks which builds a checks group; skip functions guard against missing resources +- Each check function receives a Check object for reporting and a TestEnvironment or pod list +- Reporting uses testhelper objects to aggregate results and log detailed messages + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func LoadChecks()](#loadchecks) | Builds a checks group for the networking suite and registers individual test cases, each with optional skip conditions and check functions. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func testDualStackServices(check *checksdb.Check, env *provider.TestEnvironment)](#testdualstackservices) | Determines whether each Kubernetes Service in the test environment supports IPv6 or is dual‑stack; records compliant and non‑compliant services. | +| [func testExecProbDenyAtCPUPinning(check *checksdb.Check, dpdkPods []*provider.Pod)](#testexecprobdenyatcpupinning) | Ensures that every CPU‑pinned pod used for DPDK does not contain an exec probe. An exec probe is disallowed in this context because it can interfere with strict CPU pinning and predictable performance. | +| [func testNetworkAttachmentDefinitionSRIOVUsingMTU(check *checksdb.Check, sriovPods []*provider.Pod)](#testnetworkattachmentdefinitionsriovusingmtu) | Ensures each SR‑I/O‑V pod declares an MTU; records compliance status. | +| [func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommons.IPVersion, aType netcommons.IFType, check *checksdb.Check)](#testnetworkconnectivity) | Orchestrates ICMP connectivity tests for a given IP version and interface type. Builds the test context, executes ping checks, and records results in the supplied `Check`. | +| [func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironment)()](#testnetworkpolicydenyall) | Ensures each pod in the environment is protected by a NetworkPolicy that denies all ingress and egress traffic. It logs compliance status and records results for reporting. | +| [func testOCPReservedPortsUsage(check *checksdb.Check, env *provider.TestEnvironment)](#testocpreservedportsusage) | Verifies that no pod is listening on ports reserved by OpenShift (22623, 22624). | +| [func testPartnerSpecificTCPPorts(check *checksdb.Check, env *provider.TestEnvironment)](#testpartnerspecifictcpports) | Confirms that the pods in the test environment are not listening on TCP ports reserved by a partner. The function collects all partner‑reserved ports and reports compliance. | +| [func testRestartOnRebootLabelOnPodsUsingSriov(check *checksdb.Check, sriovPods []*provider.Pod)](#testrestartonrebootlabelonpodsusingsriov) | Ensures that every pod using SR‑IOV has the `restart-on-reboot` label set to `"true"`. Pods missing the label or with a different value are marked non‑compliant. | +| [func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.TestEnvironment)](#testundeclaredcontainerportsusage) | For each pod, compares the ports actually listening inside containers with those declared in the pod’s container specifications. Flags pods that expose undeclared ports as non‑compliant. | + +## Exported Functions + +### LoadChecks + +**LoadChecks** - Builds a checks group for the networking suite and registers individual test cases, each with optional skip conditions and check functions. + +#### 1) Signature (Go) + +```go +func LoadChecks() +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a checks group for the networking suite and registers individual test cases, each with optional skip conditions and check functions. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** |
  • `checksdb.NewChecksGroup` – creates the group container
  • `WithBeforeEachFn` – sets a per‑check setup function
  • Various `identifiers.GetTestIDAndLabels` calls – supply unique IDs and tags for each test
  • `testhelper.Get…SkipFn` functions – provide skip predicates based on environment state
  • Specific test functions (`testNetworkConnectivity`, `testUndeclaredContainerPortsUsage`, etc.) that perform the actual checks
| +| **Side effects** | Adds check definitions to an in‑memory database (via `checksdb.NewChecksGroup`) and logs a debug message. No external I/O beyond logging. | +| **How it fits the package** | It is invoked by `pkg/certsuite.LoadInternalChecksDB` to load all networking checks into the central test registry, enabling them to be executed during a certsuite run. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Log debug Loading networking suite checks"] + B --> C{"Create checksGroup"} + C --> D["Set BeforeEachFn"] + D --> E["Add individual checks"] + E --> F["Finish"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_Log.Debug + func_LoadChecks --> func_checksdb.NewChecksGroup + func_LoadChecks --> func_WithBeforeEachFn + func_LoadChecks --> func_identifiers.GetTestIDAndLabels + func_LoadChecks --> func_testhelper.GetNoContainersUnderTestSkipFn + func_LoadChecks --> func_testhelper.GetDaemonSetFailedToSpawnSkipFn + func_LoadChecks --> func_testhelper.GetNoPodsUnderTestSkipFn + func_LoadChecks --> func_testhelper.GetNoServicesUnderTestSkipFn + func_LoadChecks --> func_testhelper.GetNoCPUPinningPodsSkipFn + func_LoadChecks --> func_testhelper.GetNoSRIOVPodsSkipFn + func_LoadChecks --> func_testNetworkConnectivity + func_LoadChecks --> func_testUndeclaredContainerPortsUsage + func_LoadChecks --> func_testOCPReservedPortsUsage + func_LoadChecks --> func_testDualStackServices + func_LoadChecks --> func_testNetworkPolicyDenyAll + func_LoadChecks --> func_testPartnerSpecificTCPPorts + func_LoadChecks --> func_testExecProbDenyAtCPUPinning + func_LoadChecks --> func_testRestartOnRebootLabelOnPodsUsingSriov + func_LoadChecks --> func_testNetworkAttachmentDefinitionSRIOVUsingMTU +``` + +#### 5) Functions calling `LoadChecks` (Mermaid) + +```mermaid +graph TD + func_LoadInternalChecksDB --> func_LoadChecks +``` + +#### 6) Usage example (Go) + +```go +// Assume we are in the certsuite package where LoadInternalChecksDB is defined. +func main() { + // Register all checks from every sub‑suite. + pkgcertsuite.LoadInternalChecksDB() + + // The networking checks are now available for execution. +} +``` + +--- + +--- + +## Local Functions + +### testDualStackServices + +**testDualStackServices** - Determines whether each Kubernetes Service in the test environment supports IPv6 or is dual‑stack; records compliant and non‑compliant services. + +#### Signature (Go) + +```go +func testDualStackServices(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether each Kubernetes Service in the test environment supports IPv6 or is dual‑stack; records compliant and non‑compliant services. | +| **Parameters** | `check` – check context for logging and result reporting.
`env` – environment containing the list of Services to evaluate. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | *`services.GetServiceIPVersion`* – obtains IP version information.
*`testhelper.NewReportObject`* – creates compliance reports.
*`check.LogInfo/LogError`* – logs progress and errors. | +| **Side effects** | Mutates the check result state by appending report objects; writes log entries. | +| **How it fits the package** | Implements the “Dual stack services” test case registered in `LoadChecks`; ensures network services meet IPv6 or dual‑stack requirements for certification compliance. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> ForEachService["Iterate over env.Services"] + ForEachService --> GetIPVersion["services.GetServiceIPVersion(s)"] + GetIPVersion -->|"error"| LogErrorAndRecordFail["Log error & add non‑compliant report"] + GetIPVersion --> CheckVersion["serviceIPVersion == Undefined or IPv4?"] + CheckVersion -->|"yes"| RecordNonCompliant["Add non‑compliant report"] + CheckVersion -->|"no"| RecordCompliant["Add compliant report"] + ForEachService --> End + End --> SetResult["check.SetResult(compliantObjects, nonCompliantObjects)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testDualStackServices --> services.GetServiceIPVersion + func_testDualStackServices --> testhelper.NewReportObject + func_testDualStackServices --> check.LogInfo + func_testDualStackServices --> check.LogError + func_testDualStackServices --> check.SetResult +``` + +#### Functions calling `testDualStackServices` (Mermaid) + +```mermaid +graph TD + checksGroup.Add --> func_testDualStackServices +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testDualStackServices +func runExample(env *provider.TestEnvironment) { + // Create a dummy check object (in real code this comes from the checks framework) + chk := &checksdb.Check{} + testDualStackServices(chk, env) + + // Inspect results + fmt.Printf("Compliant: %v\n", chk.CompliantObjects) + fmt.Printf("Non‑compliant: %v\n", chk.NonCompliantObjects) +} +``` + +--- + +--- + +### testExecProbDenyAtCPUPinning + +**testExecProbDenyAtCPUPinning** - Ensures that every CPU‑pinned pod used for DPDK does not contain an exec probe. An exec probe is disallowed in this context because it can interfere with strict CPU pinning and predictable performance. + +#### Signature (Go) + +```go +func testExecProbDenyAtCPUPinning(check *checksdb.Check, dpdkPods []*provider.Pod) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that every CPU‑pinned pod used for DPDK does not contain an exec probe. An exec probe is disallowed in this context because it can interfere with strict CPU pinning and predictable performance. | +| **Parameters** | `check *checksdb.Check` – the check instance to log results and set final status.
`dpdkPods []*provider.Pod` – slice of pods that are CPU‑pinned and use DPDK. | +| **Return value** | None (void). Results are recorded via `SetResult`. | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `cut.HasExecProbes()`
• `testhelper.NewPodReportObject`
• `check.SetResult` | +| **Side effects** | *Logs information and errors to the check’s logger.
* Builds two slices of report objects (`compliantObjects`, `nonCompliantObjects`).
* Calls `SetResult` to store results for the test. No external I/O or state mutation beyond the check instance. | +| **How it fits the package** | Part of the networking test suite, specifically used by the *DPDK CPU pinning exec probe* test case to enforce best‑practice compliance for high‑performance workloads. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"dpdkPods"} + B -->|"empty"| C["End"] + B --> D{"each pod"} + D --> E["Check each container"] + E --> F{"HasExecProbes?"} + F -- yes --> G["Log error & add non‑compliant object"] + F -- no --> H["Set execProbeFound = true"] + E --> I{"execProbeFound?"} + I -- false --> J["Add compliant object"] + D --> K["Continue to next pod"] + K --> B + B --> L["Call SetResult(compliant, nonCompliant)"] + L --> M["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testExecProbDenyAtCPUPinning --> func_LogInfo + func_testExecProbDenyAtCPUPinning --> func_HasExecProbes + func_testExecProbDenyAtCPUPinning --> func_LogError + func_testExecProbDenyAtCPUPinning --> func_append + func_testExecProbDenyAtCPUPinning --> func_NewPodReportObject + func_testExecProbDenyAtCPUPinning --> func_SetResult +``` + +#### Functions calling `testExecProbDenyAtCPUPinning` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testExecProbDenyAtCPUPinning +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testExecProbDenyAtCPUPinning +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Assume we have a check instance and a list of CPU‑pinned DPDK pods + var check *checksdb.Check + var dpdkPods []*provider.Pod + + networking.TestExecProbDenyAtCPUPinning(check, dpdkPods) +} +``` + +--- + +### testNetworkAttachmentDefinitionSRIOVUsingMTU + +**testNetworkAttachmentDefinitionSRIOVUsingMTU** - Ensures each SR‑I/O‑V pod declares an MTU; records compliance status. + +#### Signature (Go) + +```go +func testNetworkAttachmentDefinitionSRIOVUsingMTU(check *checksdb.Check, sriovPods []*provider.Pod) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures each SR‑I/O‑V pod declares an MTU; records compliance status. | +| **Parameters** | `check` – the test context (`*checksdb.Check`).
`sriovPods` – slice of pods to evaluate (`[]*provider.Pod`). | +| **Return value** | None (side‑effect only). | +| **Key dependencies** | • `pod.IsUsingSRIOVWithMTU()`
• `check.LogError`, `check.LogInfo`
• `testhelper.NewPodReportObject`
• `check.SetResult` | +| **Side effects** | Logs errors or info; aggregates compliant/non‑compliant report objects and stores them via `SetResult`. | +| **How it fits the package** | Used by the networking test suite to validate SR‑I/O‑V pod configuration. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"for each pod"} + B --> C["Call IsUsingSRIOVWithMTU"] + C --> D{"error?"} + D -- yes --> E["Log error, create non‑compliant report"] + D -- no --> F{"result true?"} + F -- yes --> G["Log info, create compliant report"] + F -- no --> H["Log error, create non‑compliant report"] + G & H --> I["Append to respective slice"] + I --> B + B --> J["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testNetworkAttachmentDefinitionSRIOVUsingMTU --> func_IsUsingSRIOVWithMTU + func_testNetworkAttachmentDefinitionSRIOVUsingMTU --> func_LogError + func_testNetworkAttachmentDefinitionSRIOVUsingMTU --> func_NewPodReportObject + func_testNetworkAttachmentDefinitionSRIOVUsingMTU --> func_LogInfo + func_testNetworkAttachmentDefinitionSRIOVUsingMTU --> func_SetResult +``` + +#### Functions calling `testNetworkAttachmentDefinitionSRIOVUsingMTU` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testNetworkAttachmentDefinitionSRIOVUsingMTU +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testNetworkAttachmentDefinitionSRIOVUsingMTU +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/provider" +) + +func example() { + // Assume `check` and `pods` are already obtained + var check *checksdb.Check + var pods []*provider.Pod + + testNetworkAttachmentDefinitionSRIOVUsingMTU(check, pods) +} +``` + +--- + +### testNetworkConnectivity + +**testNetworkConnectivity** - Orchestrates ICMP connectivity tests for a given IP version and interface type. Builds the test context, executes ping checks, and records results in the supplied `Check`. + +#### 1) Signature (Go) + +```go +func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommons.IPVersion, aType netcommons.IFType, check *checksdb.Check) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Orchestrates ICMP connectivity tests for a given IP version and interface type. Builds the test context, executes ping checks, and records results in the supplied `Check`. | +| **Parameters** | `env *provider.TestEnvironment` – environment containing pod data;
`aIPVersion netcommons.IPVersion` – IPv4 or IPv6;
`aType netcommons.IFType` – default or multus interface;
`check *checksdb.Check` – test check object to log and store results. | +| **Return value** | None (side‑effect on `check`). | +| **Key dependencies** | • `icmp.BuildNetTestContext` – constructs networks-to-test map.
• `icmp.RunNetworkingTests` – performs ping operations.
• Methods of `checksdb.Check`: `GetLogger`, `LogInfo`, `SetResult`. | +| **Side effects** | *Mutates the supplied `Check` by setting its result objects.
* Logs informational messages via the check’s logger. | +| **How it fits the package** | This helper is invoked from multiple test cases in `LoadChecks`; each case specifies IP version and interface type, enabling reuse of the same connectivity logic across different scenarios. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["testNetworkConnectivity"] --> B["BuildNetTestContext"] + B --> C["RunNetworkingTests"] + C --> D{"skip?"} + D -->|"true"| E["LogInfo “skipping”"] + D -->|"false"| F["SetResult with report data"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_testNetworkConnectivity --> func_BuildNetTestContext + func_testNetworkConnectivity --> func_RunNetworkingTests + func_testNetworkConnectivity --> func_GetLogger + func_testNetworkConnectivity --> func_LogInfo + func_testNetworkConnectivity --> func_SetResult +``` + +#### 5) Functions calling `testNetworkConnectivity` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testNetworkConnectivity +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testNetworkConnectivity +env := &provider.TestEnvironment{ /* populate Pods, etc. */ } +check := checksdb.NewCheck("example-icmptest") +testNetworkConnectivity(env, netcommons.IPv4, netcommons.DEFAULT, check) + +// The check now contains compliant and non‑compliant results. +``` + +--- + +### testNetworkPolicyDenyAll + +**testNetworkPolicyDenyAll** - Ensures each pod in the environment is protected by a NetworkPolicy that denies all ingress and egress traffic. It logs compliance status and records results for reporting. + +#### Signature (Go) + +```go +func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironment)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures each pod in the environment is protected by a NetworkPolicy that denies all ingress and egress traffic. It logs compliance status and records results for reporting. | +| **Parameters** | `check *checksdb.Check` – test context used for logging and result setting.
`env *provider.TestEnvironment` – contains pods, network policies, and related data. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `policies.LabelsMatch`
• `policies.IsNetworkPolicyCompliant`
• `testhelper.NewPodReportObject`
• `append` (slice operation)
• `check.SetResult` | +| **Side effects** | Generates log entries, creates report objects for compliant/non‑compliant pods, and updates the check result. No external I/O beyond logging. | +| **How it fits the package** | Implements the *Network Policy Deny All* test case registered in `LoadChecks`. It operates on Kubernetes resources exposed by the test environment to validate security posture. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over Pods"} + B --> C["Log pod name"] + C --> D{"Find matching NetworkPolicies in same namespace"} + D -->|"Match"| E["Check if Labels match"] + E --> F{"Egress compliant?"} + F -->|"No"| G["Set egress flag & reason"] + F -->|"Yes"| H["Skip"] + E --> I{"Ingress compliant?"} + I -->|"No"| J["Set ingress flag & reason"] + I -->|"Yes"| K["Skip"] + D --> L["Continue loop over policies"] + B --> M{"All policies processed"} + M --> N{"Compliance status for pod"} + N -->|"Compliant"| O["Create compliant report object"] + N -->|"Non‑compliant"| P["Create non‑compliant report objects"] + O & P --> Q["Append to respective slice"] + M --> R["End loop over pods"] + R --> S["Set final result via check.SetResult"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testNetworkPolicyDenyAll --> func_LogInfo + func_testNetworkPolicyDenyAll --> func_LogError + func_testNetworkPolicyDenyAll --> func_LabelsMatch + func_testNetworkPolicyDenyAll --> func_IsNetworkPolicyCompliant + func_testNetworkPolicyDenyAll --> func_NewPodReportObject + func_testNetworkPolicyDenyAll --> func_SetResult +``` + +#### Functions calling `testNetworkPolicyDenyAll` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testNetworkPolicyDenyAll +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testNetworkPolicyDenyAll +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + check := checksdb.NewCheck() + env := &provider.TestEnvironment{} // populated elsewhere + networking.testNetworkPolicyDenyAll(check, env) +} +``` + +--- + +### testOCPReservedPortsUsage + +**testOCPReservedPortsUsage** - Verifies that no pod is listening on ports reserved by OpenShift (22623, 22624). + +#### Signature (Go) + +```go +func testOCPReservedPortsUsage(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies that no pod is listening on ports reserved by OpenShift (22623, 22624). | +| **Parameters** | `check` – test metadata and result collector.
`env` – execution context containing cluster state. | +| **Return value** | None; results are stored via `check.SetResult`. | +| **Key dependencies** | *`netcommons.TestReservedPortsUsage` – core logic for port checking.
* `check.GetLogger()` – logging support.
* `check.SetResult` – result reporting. | +| **Side effects** | Mutates the check’s result state; performs read‑only queries on the environment. No external I/O beyond logging. | +| **How it fits the package** | Part of the networking test suite; invoked by `LoadChecks` as the OCP reserved ports test case. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Define reserved ports"} + B --> C["TestReservedPortsUsage(env, ports, OCP, logger)"] + C --> D["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testOCPReservedPortsUsage --> func_TestReservedPortsUsage + func_testOCPReservedPortsUsage --> func_GetLogger + func_testOCPReservedPortsUsage --> func_SetResult +``` + +#### Functions calling `testOCPReservedPortsUsage` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testOCPReservedPortsUsage +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOCPReservedPortsUsage +func runExample(env *provider.TestEnvironment) { + // Create a dummy check object; in real code this comes from checksdb. + check := checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestOCPReservedPortsUsage)) + testOCPReservedPortsUsage(check, env) +} +``` + +--- + +### testPartnerSpecificTCPPorts + +**testPartnerSpecificTCPPorts** - Confirms that the pods in the test environment are not listening on TCP ports reserved by a partner. The function collects all partner‑reserved ports and reports compliance. + +#### 1) Signature (Go) + +```go +func testPartnerSpecificTCPPorts(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Confirms that the pods in the test environment are not listening on TCP ports reserved by a partner. The function collects all partner‑reserved ports and reports compliance. | +| **Parameters** | `check *checksdb.Check` – current check context;
`env *provider.TestEnvironment` – environment containing cluster information. | +| **Return value** | None (the result is stored in the check). | +| **Key dependencies** | • `netcommons.TestReservedPortsUsage(env, ReservedPorts, "Partner", check.GetLogger())`
• `check.SetResult(compliantObjects, nonCompliantObjects)` | +| **Side effects** | Updates the check’s result; no external I/O or concurrency. | +| **How it fits the package** | It is a helper invoked by the *Extended partner ports* test case during suite loading to enforce partner‑specific port restrictions. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Define ReservedPorts map"] --> B["TestReservedPortsUsage"] + B --> C["SetResult on check"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_testPartnerSpecificTCPPorts --> netcommons.TestReservedPortsUsage + func_testPartnerSpecificTCPPorts --> checksdb.Check.SetResult +``` + +#### 5) Functions calling `testPartnerSpecificTCPPorts` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testPartnerSpecificTCPPorts +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testPartnerSpecificTCPPorts +func example(env *provider.TestEnvironment, check *checksdb.Check) { + // The function populates the check with compliance data. + testPartnerSpecificTCPPorts(check, env) +} +``` + +--- + +### testRestartOnRebootLabelOnPodsUsingSriov + +**testRestartOnRebootLabelOnPodsUsingSriov** - Ensures that every pod using SR‑IOV has the `restart-on-reboot` label set to `"true"`. Pods missing the label or with a different value are marked non‑compliant. + +#### Signature (Go) + +```go +func testRestartOnRebootLabelOnPodsUsingSriov(check *checksdb.Check, sriovPods []*provider.Pod) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that every pod using SR‑IOV has the `restart-on-reboot` label set to `"true"`. Pods missing the label or with a different value are marked non‑compliant. | +| **Parameters** | `check *checksdb.Check` – context for logging and result reporting.
`sriovPods []*provider.Pod` – list of SR‑IOV pods to evaluate. | +| **Return value** | None (the function records results via the `Check` instance). | +| **Key dependencies** | • `LogInfo`, `LogError` on the check object
• `GetLabels` method of `*provider.Pod`
• `NewPodReportObject` from `testhelper`
• `fmt.Sprintf` for error messages
• `SetResult` on the check object | +| **Side effects** | Writes logs and updates the check result; no external I/O. | +| **How it fits the package** | Implements the “Restart on reboot” test case for SR‑IOV pods within the networking test suite. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over sriovPods"} + B --> C["Get pod labels"] + C --> D{"Label exists?"} + D -- No --> E["Log error, add non‑compliant report"] + D -- Yes --> F{"Value == true?"} + F -- No --> G["Log error, add non‑compliant report"] + F -- Yes --> H["Log info, add compliant report"] + H --> I["End loop"] + I --> J["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testRestartOnRebootLabelOnPodsUsingSriov --> LogInfo + func_testRestartOnRebootLabelOnPodsUsingSriov --> GetLabels + func_testRestartOnRebootLabelOnPodsUsingSriov --> LogError + func_testRestartOnRebootLabelOnPodsUsingSriov --> NewPodReportObject + func_testRestartOnRebootLabelOnPodsUsingSriov --> Sprintf + func_testRestartOnRebootLabelOnPodsUsingSriov --> SetResult +``` + +#### Functions calling `testRestartOnRebootLabelOnPodsUsingSriov` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testRestartOnRebootLabelOnPodsUsingSriov +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testRestartOnRebootLabelOnPodsUsingSriov +func Example() { + // Assume `check` and `pods` are already prepared. + var check *checksdb.Check + var pods []*provider.Pod + + // Run the SR‑IOV label validation. + testRestartOnRebootLabelOnPodsUsingSriov(check, pods) +} +``` + +--- + +--- + +### testUndeclaredContainerPortsUsage + +**testUndeclaredContainerPortsUsage** - For each pod, compares the ports actually listening inside containers with those declared in the pod’s container specifications. Flags pods that expose undeclared ports as non‑compliant. + +#### Signature (Go) + +```go +func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | For each pod, compares the ports actually listening inside containers with those declared in the pod’s container specifications. Flags pods that expose undeclared ports as non‑compliant. | +| **Parameters** | `check *checksdb.Check` – test framework helper for logging and result aggregation.
`env *provider.TestEnvironment` – runtime context containing all evaluated pods. | +| **Return value** | None; results are stored via `check.SetResult`. | +| **Key dependencies** | • `netutil.GetListeningPorts` (executes container command to list listening sockets)
• `testhelper.NewPodReportObject` (creates result objects)
• `strconv.Itoa`, `fmt.Sprintf` for formatting
• `log` methods (`LogInfo`, `LogError`) | +| **Side effects** | • Logs informational and error messages.
• Appends report objects to internal slices; no global state mutation. | +| **How it fits the package** | Implements the “Undeclared container ports usage” test case registered in `LoadChecks`. It ensures pods do not expose ports that are not explicitly declared, a key compliance requirement for Kubernetes workloads. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["testUndeclaredContainerPortsUsage"] --> B["Iterate over env.Pods"] + B --> C["Build declaredPorts map per pod"] + C --> D["Retrieve listening ports of first container"] + D --> E{"Error?"} + E -- yes --> F["Log error & mark non‑compliant"] + E -- no --> G{"Any listening ports?"} + G -- none --> H["Mark pod compliant (no ports)"] + G -- some --> I["Loop over listeningPorts"] + I --> J{"Port declared?"} + J -- no --> K["Mark pod non‑compliant, log error"] + J -- yes --> L["Mark pod compliant, log info"] + I --> M["After loop: if any failures → add summary non‑compliant; else add summary compliant"] + M --> N["SetResult with compliant & non‑compliant slices"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testUndeclaredContainerPortsUsage --> func_GetListeningPorts + func_testUndeclaredContainerPortsUsage --> func_NewPodReportObject + func_testUndeclaredContainerPortsUsage --> func_LogInfo + func_testUndeclaredContainerPortsUsage --> func_LogError +``` + +#### Functions calling `testUndeclaredContainerPortsUsage` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testUndeclaredContainerPortsUsage +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testUndeclaredContainerPortsUsage +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/provider" +) + +func main() { + // Assume env is prepared with pods to test + var env provider.TestEnvironment + check := checksdb.NewCheck(nil) // placeholder, real init omitted + + testUndeclaredContainerPortsUsage(check, &env) +} +``` + +--- diff --git a/docs/tests/networking/policies/policies.md b/docs/tests/networking/policies/policies.md new file mode 100644 index 000000000..15601189a --- /dev/null +++ b/docs/tests/networking/policies/policies.md @@ -0,0 +1,190 @@ +# Package policies + +**Path**: `tests/networking/policies` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [IsNetworkPolicyCompliant](#isnetworkpolicycompliant) + - [LabelsMatch](#labelsmatch) + +## Overview + +Provides helper utilities for testing Kubernetes NetworkPolicy objects, enabling verification of deny‑all compliance and label selector matching. + +### Key Features + +- Determines if a NetworkPolicy enforces a deny‑all rule for Ingress or Egress via IsNetworkPolicyCompliant +- Evaluates whether a pod’s labels satisfy a given LabelSelector with LabelsMatch + +### Design Notes + +- Relies on len checks to interpret empty selectors as matches all; assumes standard Kubernetes API types +- If the selector is non‑empty but the policy does not specify required fields, compliance may be misclassified—users should review the PolicyType passed +- Best practice: use these helpers within unit tests or integration tests where NetworkPolicy objects are constructed and validated + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func IsNetworkPolicyCompliant(np *networkingv1.NetworkPolicy, policyType networkingv1.PolicyType) (bool, string)](#isnetworkpolicycompliant) | Determines whether a `NetworkPolicy` implements a deny‑all rule for the specified `policyType` (Ingress or Egress). | +| [func LabelsMatch(podSelectorLabels v1.LabelSelector, podLabels map[string]string) bool](#labelsmatch) | Checks if the label selector (`podSelectorLabels`) is satisfied by the supplied pod labels (`podLabels`). Returns `true` when all required key‑value pairs are present or when the selector is empty (which matches everything). | + +## Exported Functions + +### IsNetworkPolicyCompliant + +**IsNetworkPolicyCompliant** - Determines whether a `NetworkPolicy` implements a deny‑all rule for the specified `policyType` (Ingress or Egress). + +#### Signature (Go) + +```go +func IsNetworkPolicyCompliant(np *networkingv1.NetworkPolicy, policyType networkingv1.PolicyType) (bool, string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether a `NetworkPolicy` implements a deny‑all rule for the specified `policyType` (Ingress or Egress). | +| **Parameters** | *np* (`*networkingv1.NetworkPolicy`) – The policy to evaluate.
*policyType* (`networkingv1.PolicyType`) – The type of rule being checked. | +| **Return value** | *(bool, string)* – `true` if the policy contains a deny‑all rule for the requested type; otherwise `false`. The second return is an explanatory message when non‑compliant. | +| **Key dependencies** | *len* (builtin)
*networkingv1.PolicyType* and related structs from `"k8s.io/api/networking/v1"` | +| **Side effects** | None – purely functional; no state changes or I/O. | +| **How it fits the package** | Provides core logic for compliance checks used by higher‑level test functions such as `testNetworkPolicyDenyAll`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CheckEmptyTypes{"np.Spec.PolicyTypes empty?"} + CheckEmptyTypes -- Yes --> ReturnFalse["return false, empty policy types"] + CheckEmptyTypes -- No --> CheckType{"policyType == Egress?"} + CheckType -- Yes --> CheckEgressSpec{"np.Spec.Egress nil or len>0?"} + CheckEgressSpec -- True --> ReturnFalseEgress["return false, egress spec not empty for default egress rule"] + CheckEgressSpec -- False --> Continue + CheckType -- No --> CheckIngressSpec{"policyType == Ingress?"} + CheckIngressSpec -- True --> CheckIngressSpecDetail{"np.Spec.Ingress nil or len>0?"} + CheckIngressSpecDetail -- True --> ReturnFalseIngress["return false, ingress spec not empty for default ingress rule"] + CheckIngressSpecDetail -- False --> Continue + Continue --> FindPolicyType{"policyType in np.Spec.PolicyTypes?"} + FindPolicyType -- Yes --> ReturnTrue["return true,"] + FindPolicyType -- No --> ReturnFalseType["return false,"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_IsNetworkPolicyCompliant --> len +``` + +#### Functions calling `IsNetworkPolicyCompliant` (Mermaid) + +```mermaid +graph TD + testNetworkPolicyDenyAll --> func_IsNetworkPolicyCompliant +``` + +#### Usage example (Go) + +```go +// Minimal example invoking IsNetworkPolicyCompliant +import ( + "k8s.io/api/networking/v1" +) + +func main() { + // Example network policy with deny‑all egress + np := &v1.NetworkPolicy{ + Spec: v1.NetworkPolicySpec{ + PolicyTypes: []v1.PolicyType{v1.PolicyTypeEgress}, + Egress: []v1.NetworkPolicyEgressRule{}, + }, + } + + compliant, reason := IsNetworkPolicyCompliant(np, v1.PolicyTypeEgress) + if compliant { + fmt.Println("Policy is compliant") + } else { + fmt.Printf("Not compliant: %s\n", reason) + } +} +``` + +--- + +### LabelsMatch + +**LabelsMatch** - Checks if the label selector (`podSelectorLabels`) is satisfied by the supplied pod labels (`podLabels`). Returns `true` when all required key‑value pairs are present or when the selector is empty (which matches everything). + +#### Signature + +```go +func LabelsMatch(podSelectorLabels v1.LabelSelector, podLabels map[string]string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks if the label selector (`podSelectorLabels`) is satisfied by the supplied pod labels (`podLabels`). Returns `true` when all required key‑value pairs are present or when the selector is empty (which matches everything). | +| **Parameters** | *`podSelectorLabels v1.LabelSelector` – The selector to evaluate.
* `podLabels map[string]string` – Labels of the pod being tested. | +| **Return value** | `bool` – `true` if the selector matches the pod labels, otherwise `false`. | +| **Key dependencies** | • Calls `Size()` on `v1.LabelSelector` to determine if it is empty.
• Uses standard map iteration and string comparison. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by network‑policy compliance checks (e.g., `testNetworkPolicyDenyAll`) to verify that a policy’s pod selector applies to a given pod. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Selector empty?"} + B -- Yes --> C["Return true"] + B -- No --> D["Iterate over MatchLabels"] + D --> E{"Key‑value match found?"} + E -- Yes --> F["Set labelMatch = true, break loops"] + E -- No --> G["Continue search"] + F --> H["End loop"] + H --> I["Return labelMatch"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_LabelsMatch --> func_Size +``` + +#### Functions calling `LabelsMatch` + +```mermaid +graph TD + func_testNetworkPolicyDenyAll --> func_LabelsMatch +``` + +#### Usage example (Go) + +```go +// Minimal example invoking LabelsMatch +import ( + "fmt" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func main() { + selector := v1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + } + podLabels := map[string]string{"app": "frontend", "env": "prod"} + + if LabelsMatch(selector, podLabels) { + fmt.Println("Pod matches the selector") + } else { + fmt.Println("Pod does not match the selector") + } +} +``` + +--- diff --git a/docs/tests/networking/services/services.md b/docs/tests/networking/services/services.md new file mode 100644 index 000000000..faed4fe5b --- /dev/null +++ b/docs/tests/networking/services/services.md @@ -0,0 +1,341 @@ +# Package services + +**Path**: `tests/networking/services` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [GetServiceIPVersion](#getserviceipversion) + - [ToString](#tostring) + - [ToStringSlice](#tostringslice) +- [Local Functions](#local-functions) + - [isClusterIPsDualStack](#isclusteripsdualstack) + +## Overview + +Failed to parse JSON response, but content appears to contain package information. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GetServiceIPVersion(aService *corev1.Service) (result netcommons.IPVersion, err error)](#getserviceipversion) | Inspects a `*corev1.Service` to decide whether it is single‑stack IPv4, single‑stack IPv6, or dual‑stack. Returns the corresponding `netcommons.IPVersion`. | +| [func ToString(aService *corev1.Service) (out string)](#tostring) | Produces a concise textual representation of a `corev1.Service` instance, summarizing its namespace, name, cluster IPs, and all cluster IP addresses. | +| [func ToStringSlice(manyServices []*corev1.Service) (out string)](#tostringslice) | Builds a human‑readable multiline string that lists each service’s namespace, name, ClusterIP, and all ClusterIPs. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func isClusterIPsDualStack(ips []string) (result bool, err error)](#isclusteripsdualstack) | Checks whether the supplied slice of IP address strings includes at least one IPv4 and one IPv6 address, indicating a dual‑stack configuration. | + +## Exported Functions + +### GetServiceIPVersion + +**GetServiceIPVersion** - Inspects a `*corev1.Service` to decide whether it is single‑stack IPv4, single‑stack IPv6, or dual‑stack. Returns the corresponding `netcommons.IPVersion`. + +#### Signature (Go) + +```go +func GetServiceIPVersion(aService *corev1.Service) (result netcommons.IPVersion, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Inspects a `*corev1.Service` to decide whether it is single‑stack IPv4, single‑stack IPv6, or dual‑stack. Returns the corresponding `netcommons.IPVersion`. | +| **Parameters** | `aService *corev1.Service` – Service object whose IP policy and cluster IPs are evaluated. | +| **Return value** | `result netcommons.IPVersion` – one of `IPv4`, `IPv6`, `IPv4v6`, or `Undefined` if unsupported.
`err error` – descriptive error on validation failure. | +| **Key dependencies** | • `netcommons.GetIPVersion`
• `fmt.Errorf`
• `ToString(aService)` helper
• `log.Debug` from internal logger
• `isClusterIPsDualStack` helper | +| **Side effects** | Logs debug messages; otherwise pure function. | +| **How it fits the package** | Provides core logic for IP‑family compliance checks used by higher‑level tests such as `testDualStackServices`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Get ClusterIP version"} + B -->|"error"| C["Return error"] + B --> D{"Check IPFamilyPolicy"} + D -->|"nil"| E["Error: no policy"] + D -->|"SingleStack && IPv6"| F["Log & return IPv6"] + D -->|"SingleStack && IPv4"| G["Log & return IPv4"] + D -->|"PreferDualStack/RequireDualStack && <2 ClusterIPs"| H["Error: insufficient IPs"] + D -->|"Dual‑stack candidate"| I{"isClusterIPsDualStack"} + I -->|"true"| J["Log & return IPv4v6"] + I -->|"false"| K["Error: non‑compliant"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetServiceIPVersion --> func_GetIPVersion + func_GetServiceIPVersion --> fmt_Errorf + func_GetServiceIPVersion --> ToString + func_GetServiceIPVersion --> log_Debug + func_GetServiceIPVersion --> isClusterIPsDualStack +``` + +#### Functions calling `GetServiceIPVersion` (Mermaid) + +```mermaid +graph TD + testDualStackServices --> GetServiceIPVersion +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetServiceIPVersion +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/services" + corev1 "k8s.io/api/core/v1" +) + +func main() { + svc := &corev1.Service{ + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + IPFamilyPolicy: ptrTo(corev1.IPFamilyPolicySingleStack), + }, + } + ipVer, err := services.GetServiceIPVersion(svc) + if err != nil { + log.Fatalf("Failed to determine IP version: %v", err) + } + fmt.Printf("Service supports %s\n", ipVer.String()) +} +``` + +--- + +### ToString + +**ToString** - Produces a concise textual representation of a `corev1.Service` instance, summarizing its namespace, name, cluster IPs, and all cluster IP addresses. + +#### Signature (Go) + +```go +func ToString(aService *corev1.Service) (out string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces a concise textual representation of a `corev1.Service` instance, summarizing its namespace, name, cluster IPs, and all cluster IP addresses. | +| **Parameters** | `aService *corev1.Service` – the Service to describe. | +| **Return value** | `string` – formatted description of the Service. | +| **Key dependencies** | • `fmt.Sprintf` from the standard library. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used throughout the networking test suite to log or report service details in a consistent format, especially when constructing error messages or debugging output. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive *corev1.Service"] --> B{"Format fields"} + B --> C["Namespace"] + B --> D["Name"] + B --> E["ClusterIP"] + B --> F["ClusterIPs slice"] + C --> G["String output"] + D --> G + E --> G + F --> G + G["Return formatted string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ToString --> fmt.Sprintf +``` + +#### Functions calling `ToString` (Mermaid) + +```mermaid +graph TD + testNodePort --> ToString + GetServiceIPVersion --> ToString +``` + +#### Usage example (Go) + +```go +// Minimal example invoking ToString +import ( + corev1 "k8s.io/api/core/v1" +) + +// Assume svc is a *corev1.Service that has been populated elsewhere. +svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "example-service", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + ClusterIPs: []string{"10.0.0.1", "fd00::1"}, + }, +} + +description := ToString(svc) +fmt.Println(description) +// Output: Service ns: default, name: example-service ClusterIP:10.0.0.1 ClusterIPs: [10.0.0.1 fd00::1] +``` + +--- + +--- + +### ToStringSlice + +**ToStringSlice** - Builds a human‑readable multiline string that lists each service’s namespace, name, ClusterIP, and all ClusterIPs. + +Convert a slice of Kubernetes Service objects into a formatted string representation. + +#### Signature (Go) + +```go +func ToStringSlice(manyServices []*corev1.Service) (out string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a human‑readable multiline string that lists each service’s namespace, name, ClusterIP, and all ClusterIPs. | +| **Parameters** | `manyServices []*corev1.Service` – slice of Service pointers to process. | +| **Return value** | `out string` – concatenated representation of all services. | +| **Key dependencies** | • `fmt.Sprintf` from the standard library. | +| **Side effects** | None; purely functional—no state mutation or I/O. | +| **How it fits the package** | Utility helper used in tests to display service details for debugging and reporting purposes within the `services` testing package. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over manyServices"} + B --> C["Format each Service with fmt.Sprintf"] + C --> D["Append to out string"] + D --> E["Return out"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ToStringSlice --> fmt_Sprintf +``` + +#### Functions calling `ToStringSlice` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking ToStringSlice +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/networking/services" + corev1 "k8s.io/api/core/v1" +) + +func main() { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "example"}, + Spec: corev1.ServiceSpec{ClusterIP: "10.0.0.1", ClusterIPs: []string{"10.0.0.1"}}, + } + output := services.ToStringSlice([]*corev1.Service{svc}) + fmt.Println(output) +} +``` + +--- + +## Local Functions + +### isClusterIPsDualStack + +**isClusterIPsDualStack** - Checks whether the supplied slice of IP address strings includes at least one IPv4 and one IPv6 address, indicating a dual‑stack configuration. + +#### Signature (Go) + +```go +func isClusterIPsDualStack(ips []string) (result bool, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the supplied slice of IP address strings includes at least one IPv4 and one IPv6 address, indicating a dual‑stack configuration. | +| **Parameters** | `ips []string` – list of ClusterIP addresses to evaluate. | +| **Return value** | `` – `true` if both IPv4 and IPv6 are present; otherwise `false`. `` – non‑nil when an IP cannot be parsed or its version cannot be determined. | +| **Key dependencies** | • `netcommons.GetIPVersion(aIP string) (IPVersion, error)` – to classify each IP.
• `fmt.Errorf` – for error wrapping. | +| **Side effects** | None; pure function with no state mutation or I/O beyond error creation. | +| **How it fits the package** | Used by service‑level utilities (`GetServiceIPVersion`) to validate that a Service’s `ClusterIPs` slice satisfies dual‑stack requirements. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over ips"} + B -->|"For each ip"| C{"GetIPVersion(ip)"} + C --> D{"ipver"} + D -->|"IPv4"| E["hasIPv4 = true"] + D -->|"IPv6"| F["hasIPv6 = true"] + D -->|"Other"| G["Ignore"] + B --> H["End loop"] + H --> I{"hasIPv4 && hasIPv6"} + I -- Yes --> J["Return (true, nil)"] + I -- No --> K["Return (false, nil)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_isClusterIPsDualStack --> func_GetIPVersion + func_isClusterIPsDualStack --> func_Errorf +``` + +#### Functions calling `isClusterIPsDualStack` (Mermaid) + +```mermaid +graph TD + func_GetServiceIPVersion --> func_isClusterIPsDualStack +``` + +#### Usage example (Go) + +```go +// Minimal example invoking isClusterIPsDualStack +package main + +import ( + "fmt" +) + +func main() { + ips := []string{"10.0.0.1", "fd00::1"} + ok, err := isClusterIPsDualStack(ips) + if err != nil { + fmt.Printf("error: %v\n", err) + return + } + if ok { + fmt.Println("The ClusterIP list is dual‑stack.") + } else { + fmt.Println("Not a dual‑stack configuration.") + } +} +``` + +--- diff --git a/docs/tests/observability/observability.md b/docs/tests/observability/observability.md new file mode 100644 index 000000000..a909cdd21 --- /dev/null +++ b/docs/tests/observability/observability.md @@ -0,0 +1,853 @@ +# Package observability + +**Path**: `tests/observability` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [LoadChecks](#loadchecks) +- [Local Functions](#local-functions) + - [buildServiceAccountToDeprecatedAPIMap](#buildserviceaccounttodeprecatedapimap) + - [containerHasLoggingOutput](#containerhasloggingoutput) + - [evaluateAPICompliance](#evaluateapicompliance) + - [extractUniqueServiceAccountNames](#extractuniqueserviceaccountnames) + - [testAPICompatibilityWithNextOCPRelease](#testapicompatibilitywithnextocprelease) + - [testContainersLogging](#testcontainerslogging) + - [testCrds](#testcrds) + - [testPodDisruptionBudgets](#testpoddisruptionbudgets) + - [testTerminationMessagePolicy](#testterminationmessagepolicy) + +## Overview + +The observability package supplies a collection of checks that validate runtime behavior and configuration of workloads in a Kubernetes or OpenShift cluster. It registers these checks with the internal checks database, providing per‑test setup, skip logic, and execution functions. + +### Key Features + +- Populates the checks database with tests for container logging, CRD schema compliance, PodDisruptionBudget validity, termination message policy, and API deprecation safety. +- Implements environment‑aware skip conditions (e.g., no containers or CRDs under test). +- Reports detailed compliance results via structured report objects. + +### Design Notes + +- Checks are registered at package init time through LoadChecks, enabling automatic discovery when the suite starts. +- Each check follows a consistent signature: func(*checksdb.Check,*provider.TestEnvironment)() and records results directly on the Check object. +- Deprecated API detection uses the OpenShift apiserver v1 data to map service accounts to removed APIs, ensuring only relevant APIs are considered. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func LoadChecks()](#loadchecks) | Populates the internal checks database with all observability‑related tests, attaching per‑test setup/skip logic and execution functions. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func buildServiceAccountToDeprecatedAPIMap([]apiserv1.APIRequestCount, map[string]struct{}) map[string]map[string]string](#buildserviceaccounttodeprecatedapimap) | Creates a nested map where each key is a workload service‑account name and the corresponding value maps API names to their Kubernetes release version in which they will be removed. Only APIs with a non‑empty `status.removedInRelease` field are considered, ensuring that only genuinely deprecated APIs are tracked. | +| [func containerHasLoggingOutput(cut *provider.Container) (bool, error)](#containerhasloggingoutput) | Determines if a container has emitted any stdout/stderr log line by fetching the tail of its pod logs. | +| [func evaluateAPICompliance( serviceAccountToDeprecatedAPIs map[string]map[string]string, kubernetesVersion string, workloadServiceAccountNames map[string]struct{}, ) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject)](#evaluateapicompliance) | Determines whether each service account’s usage of deprecated APIs will remain valid in the next minor Kubernetes release. | +| [func extractUniqueServiceAccountNames(env *provider.TestEnvironment) map[string]struct{}](#extractuniqueserviceaccountnames) | Collects and returns a set of distinct workload‑related service account names found in the supplied test environment. | +| [func testAPICompatibilityWithNextOCPRelease(check *checksdb.Check, env *provider.TestEnvironment) {}](#testapicompatibilitywithnextocprelease) | Determines whether the workload’s service accounts use any APIs that will be removed in the next OpenShift Container Platform (OCP) release and records compliance results. | +| [func testContainersLogging(check *checksdb.Check, env *provider.TestEnvironment)](#testcontainerslogging) | Iterates over all containers under test (CUTs) in the provided environment, verifies that each emits at least one line to its stdout/stderr stream, and records compliance status. | +| [func testCrds(check *checksdb.Check, env *provider.TestEnvironment) {}](#testcrds) | Verifies that each CRD version defines a `status` property in its OpenAPI schema, reporting compliance or non‑compliance. | +| [func testPodDisruptionBudgets(check *checksdb.Check, env *provider.TestEnvironment)](#testpoddisruptionbudgets) | Ensures each Deployment or StatefulSet in the test environment has an associated PodDisruptionBudget (PDB) that satisfies validation rules. Reports compliant and non‑compliant objects. | +| [func testTerminationMessagePolicy(check *checksdb.Check, env *provider.TestEnvironment)](#testterminationmessagepolicy) | Ensures each container in the test environment uses `FallbackToLogsOnError` as its termination message policy. | + +## Exported Functions + +### LoadChecks + +**LoadChecks** - Populates the internal checks database with all observability‑related tests, attaching per‑test setup/skip logic and execution functions. + +#### Signature (Go) + +```go +func LoadChecks() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Populates the internal checks database with all observability‑related tests, attaching per‑test setup/skip logic and execution functions. | +| **Parameters** | None | +| **Return value** | None (side‑effect only) | +| **Key dependencies** | • `log.Debug` – logs loading action
• `checksdb.NewChecksGroup`, `Add`, `WithBeforeEachFn`, `WithCheckFn`, `WithSkipCheckFn`, `WithSkipModeAll` – builder pattern for test metadata
• `identifiers.GetTestIDAndLabels` – derives ID and tags from a predefined identifier constant
• `testhelper.GetNo…SkipFn` – provides skip predicates based on the current test environment
• `testContainersLogging`, `testCrds`, `testTerminationMessagePolicy`, `testPodDisruptionBudgets`, `testAPICompatibilityWithNextOCPRelease` – actual check implementations | +| **Side effects** | • Emits a debug log.
• Creates/updates a checks group named by `common.ObservabilityTestKey`.
• Registers five checks, each with its own skip conditions and execution closure that operates on the shared test environment (`env`). | +| **How it fits the package** | This function is called from `pkg/certsuite.LoadInternalChecksDB`, ensuring that all observability tests are available before a test run begins. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start LoadChecks"] --> B["log.Debug(Loading ...)"] + B --> C["Create checksGroup = NewChecksGroup(ObservabilityKey)"] + C --> D["Apply WithBeforeEachFn(beforeEachFn)"] + D --> E["Add Check: Logging"] + E --> F["WithSkipCheckFn(GetNoContainersUnderTestSkipFn(&env))"] + F --> G["WithCheckFn(testContainersLogging)"] + E --> H["Add Check: CRDs Status Subresource"] + H --> I["WithSkipCheckFn(GetNoCrdsUnderTestSkipFn(&env))"] + I --> J["WithCheckFn(testCrds)"] + E --> K["Add Check: TerminationMessagePolicy"] + K --> L["WithSkipCheckFn(GetNoContainersUnderTestSkipFn(&env))"] + L --> M["WithCheckFn(testTerminationMessagePolicy)"] + E --> N["Add Check: PodDisruptionBudgets"] + N --> O["WithSkipCheckFn(GetNoDeploymentsUnderTestSkipFn(&env), GetNoStatefulSetsUnderTestSkipFn(&env))"] + O --> P["WithSkipModeAll()"] + P --> Q["WithCheckFn(testPodDisruptionBudgets)"] + E --> R["Add Check: API Compatibility"] + R --> S["WithCheckFn(testAPICompatibilityWithNextOCPRelease)"] + S --> T["End LoadChecks"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_LoadChecks --> func_log.Debug + func_LoadChecks --> func_checksdb.NewChecksGroup + func_LoadChecks --> func_Identifers.GetTestIDAndLabels + func_LoadChecks --> func_testhelper.GetNoContainersUnderTestSkipFn + func_LoadChecks --> func_testhelper.GetNoCrdsUnderTestSkipFn + func_LoadChecks --> func_testhelper.GetNoDeploymentsUnderTestSkipFn + func_LoadChecks --> func_testhelper.GetNoStatefulSetsUnderTestSkipFn + func_LoadChecks --> func_testContainersLogging + func_LoadChecks --> func_testCrds + func_LoadChecks --> func_testTerminationMessagePolicy + func_LoadChecks --> func_testPodDisruptionBudgets + func_LoadChecks --> func_testAPICompatibilityWithNextOCPRelease +``` + +#### Functions calling `LoadChecks` + +```mermaid +graph TD + func_pkg/certsuite.LoadInternalChecksDB --> func_LoadChecks +``` + +#### Usage example (Go) + +```go +// During package initialization or before a test run: +func init() { + // Register all observability checks once the environment is ready. + LoadChecks() +} +``` + +--- + +## Local Functions + +### buildServiceAccountToDeprecatedAPIMap + +**buildServiceAccountToDeprecatedAPIMap** - Creates a nested map where each key is a workload service‑account name and the corresponding value maps API names to their Kubernetes release version in which they will be removed. Only APIs with a non‑empty `status.removedInRelease` field are considered, ensuring that only genuinely deprecated APIs are tracked. + +#### Signature (Go) + +```go +func buildServiceAccountToDeprecatedAPIMap([]apiserv1.APIRequestCount, map[string]struct{}) map[string]map[string]string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a nested map where each key is a workload service‑account name and the corresponding value maps API names to their Kubernetes release version in which they will be removed. Only APIs with a non‑empty `status.removedInRelease` field are considered, ensuring that only genuinely deprecated APIs are tracked. | +| **Parameters** | `apiRequestCounts []apiserv1.APIRequestCount` – collection of API usage metrics.
`workloadServiceAccountNames map[string]struct{}` – set of service‑account names relevant to the current workload (extracted from environment). | +| **Return value** | `map[string]map[string]string` – outer key is a service‑account name; inner key/value pair is API name → removal release version. | +| **Key dependencies** | • `strings.Split`
• `make` for initializing maps
• Iteration over nested structs in `apiRequestCounts` (`Status.Last24h`, `ByNode.ByUser`) | +| **Side effects** | No global state mutation; purely functional. The only I/O is reading the passed slice and map. | +| **How it fits the package** | Used by the API‑compatibility test to determine which APIs a workload’s service accounts are calling that will be removed in future Kubernetes releases, enabling compliance checks against the target OCP/K8s version. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over APIRequestCounts"} + B -->|"removedInRelease empty?"| C["Skip"] + B --> D{"Iterate over Last24h"} + D --> E{"Iterate over ByNode"} + E --> F{"Iterate over ByUser"} + F --> G["Split UserName by :"] + G --> H["Take last segment → serviceAccount"] + H --> I{"Is SA in workload list?"} + I -->|"No"| J["Skip"] + I -->|"Yes"| K["Ensure inner map exists"] + K --> L["Add API → removedInRelease to inner map"] + L --> M["Continue loops"] + M --> N["Return serviceAccountToDeprecatedAPIs"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_buildServiceAccountToDeprecatedAPIMap --> make + func_buildServiceAccountToDeprecatedAPIMap --> strings.Split + func_buildServiceAccountToDeprecatedAPIMap --> len +``` + +#### Functions calling `buildServiceAccountToDeprecatedAPIMap` (Mermaid) + +```mermaid +graph TD + testAPICompatibilityWithNextOCPRelease --> buildServiceAccountToDeprecatedAPIMap +``` + +#### Usage example (Go) + +```go +// Minimal example invoking buildServiceAccountToDeprecatedAPIMap +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability" + apiserv1 "github.com/kubernetes-sigs/api-server-metrics/pkg/apis/apiserver/v1" +) + +func example() { + // Assume apiRequestCounts and workloadServiceAccountNames are already populated + var apiRequestCounts []apiserv1.APIRequestCount + workloadServiceAccountNames := map[string]struct{}{ + "eventtest-operator-service-account": {}, + "other-sa": {}, + } + + deprecatedMap := observability.BuildServiceAccountToDeprecatedAPIMap( + apiRequestCounts, + workloadServiceAccountNames, + ) + + // deprecatedMap now contains, for each SA, the APIs that will be removed +} +``` + +--- + +### containerHasLoggingOutput + +**containerHasLoggingOutput** - Determines if a container has emitted any stdout/stderr log line by fetching the tail of its pod logs. + +Checks whether a Kubernetes container has produced any log output by retrieving the last few lines of its logs. + +--- + +#### Signature (Go) + +```go +func containerHasLoggingOutput(cut *provider.Container) (bool, error) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if a container has emitted any stdout/stderr log line by fetching the tail of its pod logs. | +| **Parameters** | `cut *provider.Container` – the container whose logs are examined. | +| **Return value** | `(bool, error)` – `true` if at least one log line exists; `false` otherwise. An error is returned if any step in retrieving or reading the logs fails. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• Kubernetes client `CoreV1().Pods(...).GetLogs(...)`
• `context.TODO()`
• `io.Copy` to read stream
• `bytes.Buffer` to accumulate output | +| **Side effects** | No state mutation; only performs I/O against the Kubernetes API and reads from a network stream. | +| **How it fits the package** | Utility used by observability tests (e.g., `testContainersLogging`) to verify that containers emit log lines, forming part of compliance checks. | + +--- + +#### Internal workflow + +```mermaid +flowchart TD + A["Get Kubernetes client"] --> B["Build PodLogOptions with TailLines=2"] + B --> C["Request pod logs stream"] + C --> D{"Stream error?"} + D -- yes --> E["Return (false, err)"] + D -- no --> F["Read stream into buffer"] + F --> G{"Read error?"} + G -- yes --> H["Return (false, err)"] + G -- no --> I["Check if buffer is non‑empty"] + I --> J["Return (true/false, nil)"] +``` + +--- + +#### Function dependencies + +```mermaid +graph TD + func_containerHasLoggingOutput --> func_GetClientsHolder + func_containerHasLoggingOutput --> func_CoreV1 + func_containerHasLoggingOutput --> func_Pods + func_containerHasLoggingOutput --> func_GetLogs + func_containerHasLoggingOutput --> func_Stream + func_containerHasLoggingOutput --> func_TODO + func_containerHasLoggingOutput --> func_Errorf + func_containerHasLoggingOutput --> func_Close + func_containerHasLoggingOutput --> func_new + func_containerHasLoggingOutput --> func_Copy + func_containerHasLoggingOutput --> func_String +``` + +--- + +#### Functions calling `containerHasLoggingOutput` + +```mermaid +graph TD + func_testContainersLogging --> func_containerHasLoggingOutput +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking containerHasLoggingOutput +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func main() { + // Assume cut is a populated provider.Container instance. + var cut *provider.Container + hasLogs, err := observability.containerHasLoggingOutput(cut) + if err != nil { + log.Fatalf("Error checking logs: %v", err) + } + fmt.Printf("Container has logging output? %t\n", hasLogs) +} +``` + +--- + +### evaluateAPICompliance + +**evaluateAPICompliance** - Determines whether each service account’s usage of deprecated APIs will remain valid in the next minor Kubernetes release. + +#### Signature (Go) + +```go +func evaluateAPICompliance( + serviceAccountToDeprecatedAPIs map[string]map[string]string, + kubernetesVersion string, + workloadServiceAccountNames map[string]struct{}, +) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether each service account’s usage of deprecated APIs will remain valid in the next minor Kubernetes release. | +| **Parameters** | `serviceAccountToDeprecatedAPIs` – map from SA name to API → removal‑release string.
`kubernetesVersion` – current cluster version (e.g., `"4.12.0"`).
`workloadServiceAccountNames` – set of service accounts belonging to the workload under test. | +| **Return value** | Two slices of `*testhelper.ReportObject`: compliant and non‑compliant findings, each containing descriptive fields. | +| **Key dependencies** | • `github.com/Masterminds/semver` for parsing and incrementing versions.
• `fmt` for formatted messages.
• `github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper.NewReportObject`. | +| **Side effects** | No global state changes; only I/O via `fmt.Printf` on error. Generates new report objects. | +| **How it fits the package** | Used by the OpenShift API‑compatibility test to surface which APIs will be removed in the next release and whether the workload remains functional. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Parse current K8s version"] --> B{"Success?"} + B -- No --> C["Print error, return nil"] + B -- Yes --> D["Increment to next minor release"] + D --> E["Iterate over service accounts"] + E --> F["Iterate over deprecated APIs"] + F --> G["Parse removal version"] + G -- Error --> H["Skip API"] + G -- Success --> I{"RemovedMinor > NextMinor?"} + I -- Yes --> J["Create compliant report"] + I -- No --> K["Create non‑compliant report"] + J & K --> L["Add common fields (APIName, ServiceAccount)"] + L --> M["Append to respective slice"] + M --> N["After loops: check if both slices empty"] + N -- Yes --> O["Generate dummy compliant reports for each SA"] + O --> P["Return slices"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_evaluateAPICompliance --> semver.NewVersion + func_evaluateAPICompliance --> fmt.Printf + func_evaluateAPICompliance --> semver.Version.IncMinor + func_evaluateAPICompliance --> fmt.Sprintf + func_evaluateAPICompliance --> testhelper.NewReportObject + func_evaluateAPICompliance --> testhelper.ReportObject.AddField +``` + +#### Functions calling `evaluateAPICompliance` + +```mermaid +graph TD + func_testAPICompatibilityWithNextOCPRelease --> func_evaluateAPICompliance +``` + +#### Usage example (Go) + +```go +// Minimal example invoking evaluateAPICompliance +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func main() { + deprecated := map[string]map[string]string{ + "serviceA": {"deployment.kubernetes.io/revision": "4.13.0"}, + "serviceB": {"apps/v1beta2/Deployment": "4.12.0"}, + } + saSet := map[string]struct{}{ + "serviceA": {}, + "serviceB": {}, + } + + compliant, nonCompliant := evaluateAPICompliance(deprecated, "4.12.0", saSet) + + // Process results + for _, r := range compliant { + fmt.Println("COMPLIANT:", r.ObjectType, r.Fields) + } + for _, r := range nonCompliant { + fmt.Println("NON‑COMPLIANT:", r.ObjectType, r.Fields) + } +} +``` + +--- + +### extractUniqueServiceAccountNames + +**extractUniqueServiceAccountNames** - Collects and returns a set of distinct workload‑related service account names found in the supplied test environment. + +#### Signature (Go) + +```go +func extractUniqueServiceAccountNames(env *provider.TestEnvironment) map[string]struct{} +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Collects and returns a set of distinct workload‑related service account names found in the supplied test environment. | +| **Parameters** | `env *provider.TestEnvironment` – The test environment containing a slice of service accounts. | +| **Return value** | `map[string]struct{}` – A map where keys are unique service account names; values are empty structs to save memory. | +| **Key dependencies** | • `make` (to create the map) | +| **Side effects** | None – purely functional; no state mutation or I/O. | +| **How it fits the package** | Used by API‑compatibility tests to identify which service accounts should be evaluated against deprecated APIs. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Initialize empty map"] + B --> C{"Iterate over env.ServiceAccounts"} + C -->|"For each SA"| D["Insert sa.Name into map"] + D --> C + C --> E["Return the map"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_extractUniqueServiceAccountNames --> func_make +``` + +#### Functions calling `extractUniqueServiceAccountNames` (Mermaid) + +```mermaid +graph TD + func_testAPICompatibilityWithNextOCPRelease --> func_extractUniqueServiceAccountNames +``` + +#### Usage example (Go) + +```go +// Minimal example invoking extractUniqueServiceAccountNames +env := &provider.TestEnvironment{ + ServiceAccounts: []struct{ Name string }{ + {Name: "sa-frontend"}, + {Name: "sa-backend"}, + {Name: "sa-frontend"}, // duplicate, will be deduped + }, +} +uniqueSAs := extractUniqueServiceAccountNames(env) +fmt.Printf("Found %d unique service accounts\n", len(uniqueSAs)) +``` + +--- + +### testAPICompatibilityWithNextOCPRelease + +**testAPICompatibilityWithNextOCPRelease** - Determines whether the workload’s service accounts use any APIs that will be removed in the next OpenShift Container Platform (OCP) release and records compliance results. + +#### 1) Signature (Go) + +```go +func testAPICompatibilityWithNextOCPRelease(check *checksdb.Check, env *provider.TestEnvironment) {} +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether the workload’s service accounts use any APIs that will be removed in the next OpenShift Container Platform (OCP) release and records compliance results. | +| **Parameters** | `check *checksdb.Check` – the test check instance; ` *provider.TestEnvironment` – runtime environment containing cluster info, service accounts, and Kubernetes version. | +| **Return value** | None – results are stored via `check.SetResult`. | +| **Key dependencies** | • `provider.IsOCPCluster()`
• `clientsholder.GetClientsHolder()`
• `oc.ApiserverClient.ApiserverV1().APIRequestCounts().List(...)`
• `extractUniqueServiceAccountNames(env)`
• `buildServiceAccountToDeprecatedAPIMap(apiRequestCounts.Items, workloadServiceAccountNames)`
• `evaluateAPICompliance(serviceAccountToDeprecatedAPIs, env.K8sVersion, workloadServiceAccountNames)`
• `check.SetResult(compliantObjects, nonCompliantObjects)` | +| **Side effects** | • Logs informational and error messages via `check.LogInfo`/`check.LogError`.
• Mutates the check result state through `SetResult`. No external I/O beyond API requests. | +| **How it fits the package** | This function is a core test in the *observability* suite, specifically verifying API deprecation compliance for workloads running on OpenShift clusters. + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Test start"] --> B["Check if cluster is OCP"] + B -- No --> C["Log skip & return"] + B -- Yes --> D["Retrieve APIRequestCounts"] + D --> E{"Error?"} + E -- Yes --> F["Log error & return"] + E -- No --> G["Extract unique service accounts"] + G --> H["Build SA → deprecated API map"] + H --> I["Evaluate compliance against next K8s version"] + I --> J["Set test result on check"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_testAPICompatibilityWithNextOCPRelease --> func_IsOCPCluster + func_testAPICompatibilityWithNextOCPRelease --> func_GetClientsHolder + func_testAPICompatibilityWithNextOCPRelease --> func_List + func_testAPICompatibilityWithNextOCPRelease --> func_extractUniqueServiceAccountNames + func_testAPICompatibilityWithNextOCPRelease --> func_buildServiceAccountToDeprecatedAPIMap + func_testAPICompatibilityWithNextOCPRelease --> func_evaluateAPICompliance + func_testAPICompatibilityWithNextOCPRelease --> func_SetResult +``` + +#### 5) Functions calling `testAPICompatibilityWithNextOCPRelease` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testAPICompatibilityWithNextOCPRelease +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testAPICompatibilityWithNextOCPRelease +check := checksdb.NewCheck("example-check-id") +env := &provider.TestEnvironment{ + ServiceAccounts: []corev1.ServiceAccount{ /* … */ }, + K8sVersion: "4.12", +} +testAPICompatibilityWithNextOCPRelease(check, env) +``` + +--- + +### testContainersLogging + +**testContainersLogging** - Iterates over all containers under test (CUTs) in the provided environment, verifies that each emits at least one line to its stdout/stderr stream, and records compliance status. + +#### Signature (Go) + +```go +func testContainersLogging(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over all containers under test (CUTs) in the provided environment, verifies that each emits at least one line to its stdout/stderr stream, and records compliance status. | +| **Parameters** | `check *checksdb.Check` – the current check instance for logging; `
` `env *provider.TestEnvironment` – context containing all CUTs. | +| **Return value** | None (updates the check result directly). | +| **Key dependencies** | • `LogInfo`, `LogError` on the `check` object.
• `containerHasLoggingOutput(cut)` to fetch log presence.
• `testhelper.NewContainerReportObject` for reporting objects.
• `SetResult` on the `check`. | +| **Side effects** | • Emits informational and error logs via the check.
• Appends report objects to internal slices.
• Calls `check.SetResult`, thereby mutating the check state. No external I/O beyond log retrieval. | +| **How it fits the package** | Part of the Observability test suite; invoked by `LoadChecks` as a check function for the *Logging* test case, ensuring containers produce observable logs. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"For each CUT"} + B --> C["LogInfo “Testing Container”"] + C --> D{"containerHasLoggingOutput?"} + D -- Error --> E["LogError “Failed to get log output”"] + E --> F["Append non‑compliant object"] + D -- No Output --> G["LogError “No log line …”"] + G --> H["Append non‑compliant object"] + D -- Has Output --> I["LogInfo “Container has some logging output”"] + I --> J["Append compliant object"] + B --> K["SetResult(compliant, nonCompliant)"] + K --> L["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testContainersLogging --> func_LogInfo + func_testContainersLogging --> func_containerHasLoggingOutput + func_testContainersLogging --> func_LogError + func_testContainersLogging --> func_NewContainerReportObject + func_testContainersLogging --> func_SetResult +``` + +#### Functions calling `testContainersLogging` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testContainersLogging +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainersLogging +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/provider" +) + +func example() { + env := &provider.TestEnvironment{ /* populate Containers */ } + check := checksdb.NewCheck("logging-test") + observability.testContainersLogging(check, env) +} +``` + +--- + +### testCrds + +**testCrds** - Verifies that each CRD version defines a `status` property in its OpenAPI schema, reporting compliance or non‑compliance. + +#### Signature (Go) + +```go +func testCrds(check *checksdb.Check, env *provider.TestEnvironment) {} +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies that each CRD version defines a `status` property in its OpenAPI schema, reporting compliance or non‑compliance. | +| **Parameters** | `check *checksdb.Check –` the check context used for logging and result setting.
`env *provider.TestEnvironment –` environment containing the list of CRDs to evaluate. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `testhelper.NewReportObject`, `AddField`
• `check.SetResult` | +| **Side effects** | Generates log entries, creates report objects, and updates the check result. No external I/O beyond logging. | +| **How it fits the package** | Implements one of the observability suite checks; invoked by `LoadChecks` to validate CRD status sub‑resource compliance across deployments. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate env.Crds"} + B --> C["Log CRD name"] + C --> D{"For each version in crd.Spec.Versions"} + D --> E{"Check status property"} + E -- missing --> F["Log error; add non‑compliant object"] + E -- present --> G["Log info; add compliant object"] + F & G --> H["End version loop"] + H --> I["End CRD loop"] + I --> J["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testCrds --> func_LogInfo + func_testCrds --> func_LogError + func_testCrds --> func_append + func_testCrds --> func_AddField + func_testCrds --> func_NewReportObject + func_testCrds --> func_SetResult +``` + +#### Functions calling `testCrds` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testCrds +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testCrds +env := &provider.TestEnvironment{ /* populate env.Crds */ } +check := checksdb.NewCheck("TestCRDsStatusSubresource") +testCrds(check, env) +// check now contains compliant/non‑compliant results +``` + +--- + +### testPodDisruptionBudgets + +**testPodDisruptionBudgets** - Ensures each Deployment or StatefulSet in the test environment has an associated PodDisruptionBudget (PDB) that satisfies validation rules. Reports compliant and non‑compliant objects. + +#### Signature (Go) + +```go +func testPodDisruptionBudgets(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures each Deployment or StatefulSet in the test environment has an associated PodDisruptionBudget (PDB) that satisfies validation rules. Reports compliant and non‑compliant objects. | +| **Parameters** | `check *checksdb.Check` – object for logging and result storage.
`env *provider.TestEnvironment` – holds collections of Deployments, StatefulSets, and PDBs to evaluate. | +| **Return value** | None (results are set via `check.SetResult`). | +| **Key dependencies** | • `log.Info`, `log.Error`
• `labels.Set`, `metav1.LabelSelectorAsSelector`, `Matches`
• `pdbv1.CheckPDBIsValid` (validation logic)
• `testhelper.NewReportObject`, `AddField` | +| **Side effects** | Writes log messages; creates report objects for each object; updates the check result. No external I/O beyond logging. | +| **How it fits the package** | Part of the observability test suite; invoked by `LoadChecks` to verify PodDisruptionBudget compliance in a cluster snapshot. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate Deployments"} + B --> C["Log deployment name"] + C --> D["Build selector from Deployment labels"] + D --> E{"Find matching PDBs"} + E --> F["Convert PDB selector to matcher"] + F --> G{"Selector matches?"} + G -- Yes --> H["Validate PDB via CheckPDBIsValid"] + H -- Valid --> I["Record compliant report"] + H -- Invalid --> J["Record non‑compliant report with error"] + G -- No --> K["Continue search"] + E -- Not found --> L["Log missing PDB, record non‑compliant"] + + B --> M{"Iterate StatefulSets"} + M --> N["Log stateful set name"] + N --> O["Build selector from StatefulSet labels"] + O --> P{"Find matching PDBs"} + P --> Q["Convert PDB selector to matcher"] + Q --> R{"Selector matches?"} + R -- Yes --> S["Validate PDB via CheckPDBIsValid"] + S -- Valid --> T["Record compliant report"] + S -- Invalid --> U["Record non‑compliant report with error"] + R -- No --> V["Continue search"] + P -- Not found --> W["Log missing PDB, record non‑compliant"] + + L & W --> X["Set check result"] --> Y["End"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testPodDisruptionBudgets --> LogInfo + func_testPodDisruptionBudgets --> ToString + func_testPodDisruptionBudgets --> Set + func_testPodDisruptionBudgets --> LabelSelectorAsSelector + func_testPodDisruptionBudgets --> LogError + func_testPodDisruptionBudgets --> Matches + func_testPodDisruptionBudgets --> CheckPDBIsValid + func_testPodDisruptionBudgets --> NewReportObject + func_testPodDisruptionBudgets --> fmt.Sprintf + func_testPodDisruptionBudgets --> AddField + func_testPodDisruptionBudgets --> SetResult +``` + +#### Functions calling `testPodDisruptionBudgets` + +```mermaid +graph TD + LoadChecks --> testPodDisruptionBudgets +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPodDisruptionBudgets +check := checksdb.NewCheck("example") +env := provider.TestEnvironment{ + Deployments: []appsv1.Deployment{ /* … */ }, + StatefulSets: []appsv1.StatefulSet{ /* … */ }, + PodDisruptionBudgets: []policyv1.PodDisruptionBudget{ /* … */ }, +} +testPodDisruptionBudgets(check, &env) +``` + +--- + +### testTerminationMessagePolicy + +**testTerminationMessagePolicy** - Ensures each container in the test environment uses `FallbackToLogsOnError` as its termination message policy. + +#### 1) Signature (Go) + +```go +func testTerminationMessagePolicy(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures each container in the test environment uses `FallbackToLogsOnError` as its termination message policy. | +| **Parameters** | `check *checksdb.Check` – check instance for logging and result handling.
`env *provider.TestEnvironment` – holds a slice of containers under test (`Containers`). | +| **Return value** | None; the function records results via `check.SetResult`. | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `testhelper.NewContainerReportObject`
• `check.SetResult` | +| **Side effects** | Logs information, constructs report objects, and sets the check result; no external I/O. | +| **How it fits the package** | Part of the observability test suite; called by `LoadChecks` to validate container policies during test execution. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Containers"} + B -->|"For each container"| C["Log info about the container"] + C --> D{"Check TerminationMessagePolicy"} + D -- != FallbackToLogsOnError --> E["Log error, create non‑compliant report object"] + D -- == FallbackToLogsOnError --> F["Log success, create compliant report object"] + E & F --> G["Append to respective list"] + G --> B + B --> H["SetResult(compliantObjects, nonCompliantObjects)"] + H --> I["End"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_testTerminationMessagePolicy --> func_LogInfo + func_testTerminationMessagePolicy --> func_LogError + func_testTerminationMessagePolicy --> func_NewContainerReportObject + func_testTerminationMessagePolicy --> func_SetResult +``` + +#### 5) Functions calling `testTerminationMessagePolicy` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testTerminationMessagePolicy +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testTerminationMessagePolicy +func Example() { + // Assume check and env are initialized appropriately. + var check *checksdb.Check // e.g., from a checks group + var env *provider.TestEnvironment // contains Containers slice + + testTerminationMessagePolicy(check, env) +} +``` + +--- diff --git a/docs/tests/observability/pdb/pdb.md b/docs/tests/observability/pdb/pdb.md new file mode 100644 index 000000000..615c21913 --- /dev/null +++ b/docs/tests/observability/pdb/pdb.md @@ -0,0 +1,274 @@ +# Package pdb + +**Path**: `tests/observability/pdb` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [CheckPDBIsValid](#checkpdbisvalid) +- [Local Functions](#local-functions) + - [intOrStringToValue](#intorstringtovalue) + - [percentageToFloat](#percentagetofloat) + +## Overview + +The pdb package validates Kubernetes PodDisruptionBudget objects against a given replica count, ensuring that the budget’s minAvailable and maxUnavailable values are logically consistent. + +### Key Features + +- Converts IntOrString fields to concrete integers, interpreting percentage strings relative to replica counts +- Checks minAvailable and maxUnavailable constraints for validity +- Provides error feedback when parsing or validation fails + +### Design Notes + +- Assumes replica count is non‑nil; if nil the function returns false with an error +- Handles percentages by converting them to float values between 0 and 1 before rounding +- Best practice: use CheckPDBIsValid in tests or controllers to pre‑validate PDB objects + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func CheckPDBIsValid(pdb *policyv1.PodDisruptionBudget, replicas *int32) (bool, error)](#checkpdbisvalid) | Determines whether the given `PodDisruptionBudget` (`pdb`) is logically consistent with the supplied replica count. It checks that `minAvailable` and `maxUnavailable` values are within valid ranges. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func(*intstr.IntOrString, int32)(int, error)](#intorstringtovalue) | Transforms a Kubernetes `IntOrString` field into an absolute integer, interpreting strings as percentages of a replica count. | +| [func percentageToFloat(percentage string) (float64, error)](#percentagetofloat) | Parses a string representing a percentage (e.g., `"25%"`) and returns its numeric value as a `float64` between 0 and 1. | + +## Exported Functions + +### CheckPDBIsValid + +**CheckPDBIsValid** - Determines whether the given `PodDisruptionBudget` (`pdb`) is logically consistent with the supplied replica count. It checks that `minAvailable` and `maxUnavailable` values are within valid ranges. + +#### Signature (Go) + +```go +func CheckPDBIsValid(pdb *policyv1.PodDisruptionBudget, replicas *int32) (bool, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether the given `PodDisruptionBudget` (`pdb`) is logically consistent with the supplied replica count. It checks that `minAvailable` and `maxUnavailable` values are within valid ranges. | +| **Parameters** | `pdb *policyv1.PodDisruptionBudget` – PDB to validate.
`replicas *int32` – Number of replicas; if nil, defaults to 1. | +| **Return value** | `(bool, error)` – Returns `true` and a `nil` error when the PDB is valid; otherwise returns `false` with an explanatory error. | +| **Key dependencies** | • `intOrStringToValue` – Converts `IntOrString` fields to integer values.
• `fmt.Errorf` – Formats validation errors. | +| **Side effects** | None. The function only reads inputs and performs calculations; no state mutation or I/O occurs. | +| **How it fits the package** | Provides core validation logic used by higher‑level tests that verify PodDisruptionBudget configurations for Deployments and StatefulSets. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"replicas nil?"} + B -- yes --> C["replicaCount = 1"] + B -- no --> D["replicaCount = *replicas"] + C & D --> E{"pdb.Spec.MinAvailable != nil?"} + E -- yes --> F["convert MinAvailable to int"] + F --> G{"error?"} + G -- yes --> H["return false, err"] + G -- no --> I{"minAvailable == 0?"} + I -- yes --> J["return false, error"] + I -- no --> K{"minAvailable > replicaCount?"} + K -- yes --> L["return false, error"] + K -- no --> M{"pdb.Spec.MaxUnavailable != nil?"} + E -- no --> M + M -- yes --> N["convert MaxUnavailable to int"] + N --> O{"error?"} + O -- yes --> P["return false, err"] + O -- no --> Q{"maxUnavailable >= replicaCount?"} + Q -- yes --> R["return false, error"] + Q -- no --> S["return true, nil"] + M -- no --> S +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CheckPDBIsValid --> func_intOrStringToValue + func_CheckPDBIsValid --> fmt_Errorf +``` + +#### Functions calling `CheckPDBIsValid` (Mermaid) + +```mermaid +graph TD + testPodDisruptionBudgets --> CheckPDBIsValid +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CheckPDBIsValid +import ( + policyv1 "k8s.io/api/policy/v1" +) + +// Assume pdb is a valid *policyv1.PodDisruptionBudget instance +var replicas int32 = 3 + +valid, err := CheckPDBIsValid(pdb, &replicas) +if err != nil { + fmt.Printf("Invalid PDB: %v\n", err) +} else if valid { + fmt.Println("PodDisruptionBudget is valid") +} +``` + +--- + +## Local Functions + +### intOrStringToValue + +**intOrStringToValue** - Transforms a Kubernetes `IntOrString` field into an absolute integer, interpreting strings as percentages of a replica count. + +#### 1) Signature (Go) + +```go +func(*intstr.IntOrString, int32)(int, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Transforms a Kubernetes `IntOrString` field into an absolute integer, interpreting strings as percentages of a replica count. | +| **Parameters** | *`intOrStr`* (`*intstr.IntOrString`) – the value to convert.
*`replicas`* (`int32`) – reference count for percentage calculations. | +| **Return value** | *`int`* – resolved integer value.
*`error`* – non‑nil if the input is invalid or of an unsupported type. | +| **Key dependencies** | `IntValue()` (from `k8s.io/apimachinery/pkg/util/intstr`)
`percentageToFloat()`
`math.RoundToEven()`
`fmt.Errorf()` | +| **Side effects** | None – purely functional; no global state mutation or I/O. | +| **How it fits the package** | Used by PDB validation logic to interpret `.spec.minAvailable` and `.spec.maxUnavailable` fields, which may be expressed as integers or percentage strings. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"intOrStr.Type"} + B -->|"Int"| C["Return intOrStr.IntValue()"] + B -->|"String"| D["Call percentageToFloat(intOrStr.StrVal)"] + D -->|"Err"| E["Return error"] + D -->|"OK"| F["Compute v * replicas"] + F --> G["Round to even using math.RoundToEven"] + G --> H["Return rounded int"] + B -->|"Other"| I["Return error “invalid type”"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_intOrStringToValue --> func_IntValue + func_intOrStringToValue --> func_percentageToFloat + func_intOrStringToValue --> fmt_Errorf + func_intOrStringToValue --> math_RoundToEven +``` + +#### 5) Functions calling `intOrStringToValue` (Mermaid) + +```mermaid +graph TD + func_CheckPDBIsValid --> func_intOrStringToValue +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking intOrStringToValue +import ( + "k8s.io/apimachinery/pkg/util/intstr" + pdbpkg "github.com/redhat-best-practices-for-k8s/certsuite/tests/observability/pdb" +) + +func main() { + // Example: .spec.minAvailable set to 30% + minAvail := intstr.FromString("30%") + replicas := int32(10) + + value, err := pdbpkg.intOrStringToValue(&minAvail, replicas) + if err != nil { + panic(err) + } + fmt.Printf("Resolved minAvailable: %d pods\n", value) // prints 3 +} +``` + +--- + +--- + +### percentageToFloat + +**percentageToFloat** - Parses a string representing a percentage (e.g., `"25%"`) and returns its numeric value as a `float64` between 0 and 1. + +#### Signature (Go) + +```go +func percentageToFloat(percentage string) (float64, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses a string representing a percentage (e.g., `"25%"`) and returns its numeric value as a `float64` between 0 and 1. | +| **Parameters** | `percentage string – the input text containing a numeric value followed by`%`. | +| **Return value** | ` – the parsed fraction; – if parsing fails. | +| **Key dependencies** | • `fmt.Sscanf` for format‑based extraction.
• Constant `percentageDivisor` (value 100) to convert percent to a fraction. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by `intOrStringToValue` in the PDB tests to interpret string values that represent replica percentages. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Parse input"} + B -->|"Success"| C["Divide by 100"] + B -->|"Failure"| D["Return error"] + C --> E["Return float64"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_percentageToFloat --> fmt.Sscanf + func_percentageToFloat --> percentageDivisor +``` + +#### Functions calling `percentageToFloat` (Mermaid) + +```mermaid +graph TD + func_intOrStringToValue --> func_percentageToFloat +``` + +#### Usage example (Go) + +```go +// Minimal example invoking percentageToFloat +package main + +import ( + "fmt" +) + +func main() { + value, err := percentageToFloat("42%") + if err != nil { + fmt.Println("error:", err) + return + } + fmt.Printf("Fraction: %f\n", value) // prints 0.420000 +} +``` + +--- diff --git a/docs/tests/operator/access/access.md b/docs/tests/operator/access/access.md new file mode 100644 index 000000000..497ff82ca --- /dev/null +++ b/docs/tests/operator/access/access.md @@ -0,0 +1,108 @@ +# Package access + +**Path**: `tests/operator/access` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [PermissionsHaveBadRule](#permissionshavebadrule) + +## Overview + +Provides utilities for validating that an Operator's deployment permissions do not grant service accounts access to OpenShift security context constraints, which is considered insecure. + +### Key Features + +- Detects if any StrategyDeploymentPermissions rule allows 'security.openshift.io' API group access to the '*securitycontextconstraints*' resource +- Iterates over a slice of v1alpha1.StrategyDeploymentPermissions and returns a boolean flag +- Designed for use in tests or CI pipelines to enforce security best practices + +### Design Notes + +- Assumes that permissions are represented by operator-framework's v1alpha1.StrategyDeploymentPermissions type +- Only checks the specific insecure rule; other potential misconfigurations are not reported +- Best used as part of pre‑deployment validation or automated test suites + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func PermissionsHaveBadRule(clusterPermissions []v1alpha1.StrategyDeploymentPermissions) bool](#permissionshavebadrule) | Determines if any `StrategyDeploymentPermissions` contains a rule that allows a service account to access the *securitycontextconstraints* resource in the *security.openshift.io* API group, which is considered insecure. | + +## Exported Functions + +### PermissionsHaveBadRule + +**PermissionsHaveBadRule** - Determines if any `StrategyDeploymentPermissions` contains a rule that allows a service account to access the *securitycontextconstraints* resource in the *security.openshift.io* API group, which is considered insecure. + +#### Signature (Go) + +```go +func PermissionsHaveBadRule(clusterPermissions []v1alpha1.StrategyDeploymentPermissions) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if any `StrategyDeploymentPermissions` contains a rule that allows a service account to access the *securitycontextconstraints* resource in the *security.openshift.io* API group, which is considered insecure. | +| **Parameters** | - `clusterPermissions []v1alpha1.StrategyDeploymentPermissions` – slice of permissions defined in an operator’s CSV. | +| **Return value** | `bool` – `true` if at least one rule grants such access; otherwise `false`. | +| **Key dependencies** | • Iterates over `StrategyDeploymentPermissions`, their `Rules`, and checks the `APIGroups` and `Resources` fields. | +| **Side effects** | None – purely functional, no state mutation or I/O. | +| **How it fits the package** | Used by tests to flag operators that expose cluster‑level SCC permissions, contributing to compliance validation. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"For each permission"} + B --> C{"Check rule API groups"} + C -->|"Match security group"| D{"Check resources"} + D -->|"Match SCC resource"| E["Mark bad rule"] + D -->|"No match"| F["Continue to next rule"] + C -->|"No match"| G["Skip rule"] + B --> H["Return result"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `PermissionsHaveBadRule` (Mermaid) + +```mermaid +graph TD + func_testOperatorInstallationAccessToSCC --> func_PermissionsHaveBadRule +``` + +#### Usage example (Go) + +```go +// Minimal example invoking PermissionsHaveBadRule +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/access" + v1alpha1 "github.com/openshift/api/apps/v1alpha1" +) + +func main() { + perms := []v1alpha1.StrategyDeploymentPermissions{ + { + Rules: []v1alpha1.PolicyRule{ + { + APIGroups: []string{"security.openshift.io"}, + Resources: []string{"securitycontextconstraints"}, + }, + }, + }, + } + if access.PermissionsHaveBadRule(perms) { + fmt.Println("Operator has insecure SCC permissions") + } else { + fmt.Println("Operator is compliant with SCC rules") + } +} +``` + +--- diff --git a/docs/tests/operator/catalogsource/catalogsource.md b/docs/tests/operator/catalogsource/catalogsource.md new file mode 100644 index 000000000..07e0635c7 --- /dev/null +++ b/docs/tests/operator/catalogsource/catalogsource.md @@ -0,0 +1,113 @@ +# Package catalogsource + +**Path**: `tests/operator/catalogsource` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [SkipPMBasedOnChannel](#skippmbasedonchannel) + +## Overview + +The catalogsource package provides utilities for examining Operator Lifecycle Manager (OLM) catalog sources during tests, particularly to determine whether a PackageManifest should be processed based on its channel entries. + +### Key Features + +- Checks if a desired ClusterServiceVersion is already referenced in any of a PackageChannel's entries, allowing tests to skip redundant processing. + +### Design Notes + +- Assumes the input slice contains all channels for a PackageManifest; it only inspects the 'CurrentCSV' field. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func SkipPMBasedOnChannel(channels []olmpkgv1.PackageChannel, csvName string) bool](#skippmbasedonchannel) | Determines if a `PackageManifest` should be excluded from further processing because the desired CSV is already referenced in one of its channel entries. | + +## Exported Functions + +### SkipPMBasedOnChannel + +**SkipPMBasedOnChannel** - Determines if a `PackageManifest` should be excluded from further processing because the desired CSV is already referenced in one of its channel entries. + +#### 1) Signature (Go) + +```go +func SkipPMBasedOnChannel(channels []olmpkgv1.PackageChannel, csvName string) bool +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if a `PackageManifest` should be excluded from further processing because the desired CSV is already referenced in one of its channel entries. | +| **Parameters** | - `channels []olmpkgv1.PackageChannel` – list of channels defined for the package.
- `csvName string` – name of the CSV currently being examined. | +| **Return value** | `bool`: `true` if the manifest should be skipped; `false` otherwise. | +| **Key dependencies** | • `log.Debug` from `github.com/redhat-best-practices-for-k8s/certsuite/internal/log`
• `len` built‑in function | +| **Side effects** | Emits debug logs only; no state mutation or I/O. | +| **How it fits the package** | Used by catalog source tests to avoid double counting bundle references when an operator’s CSV is already listed in a channel entry of a `PackageManifest`. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over channels"} + B -->|"for each channel"| C["Log currentCSV and number of entries"] + C --> D{"Iterate over entries"} + D -->|"if entry.Name == csvName"| E["Set skipPMBasedOnChannel=false, break inner loop"] + D -->|"else"| F["continue"] + E --> G["break outer loop"] + F --> H["continue to next channel"] + G --> I["Return skipPMBasedOnChannel"] + H --> B + I --> J["End"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_SkipPMBasedOnChannel --> func_Log.Debug + func_SkipPMBasedOnChannel --> len +``` + +#### 5) Functions calling `SkipPMBasedOnChannel` (Mermaid) + +```mermaid +graph TD + testOperatorCatalogSourceBundleCount --> func_SkipPMBasedOnChannel +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking SkipPMBasedOnChannel +package main + +import ( + "fmt" + catalogsource "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/catalogsource" + olmpkgv1 "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func main() { + channels := []olmpkgv1.PackageChannel{ + { + CurrentCSV: "my-operator.v2", + Entries: []olmpkgv1.ChannelEntry{ + {Name: "my-operator.v1"}, + {Name: "my-operator.v2"}, + }, + }, + } + + skip := catalogsource.SkipPMBasedOnChannel(channels, "my-operator.v2") + fmt.Printf("Should skip package manifest? %t\n", skip) +} +``` + +--- + +--- diff --git a/docs/tests/operator/openapi/openapi.md b/docs/tests/operator/openapi/openapi.md new file mode 100644 index 000000000..6fcd0e2f6 --- /dev/null +++ b/docs/tests/operator/openapi/openapi.md @@ -0,0 +1,103 @@ +# Package openapi + +**Path**: `tests/operator/openapi` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [IsCRDDefinedWithOpenAPI3Schema](#iscrddefinedwithopenapi3schema) + +## Overview + +The package provides utilities for inspecting Custom Resource Definitions (CRDs) to determine if any of their versions declare an OpenAPI v3 schema. It is used in tests that need to verify CRD compliance with API‑extension specifications. + +### Key Features + +- Checks all versions of a CRD for the presence of an `OpenAPIV3Schema` field +- Returns a boolean indicating existence, simplifying test assertions +- Utilizes standard string operations to handle case‑insensitive matching + +### Design Notes + +- Assumes that the CRD’s `Spec.Versions` slice is populated; if empty the function returns false +- The check stops at the first detected schema, not validating its correctness +- Best practice: call after loading a fully‑resolved CRD from the API server to ensure accurate results + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func IsCRDDefinedWithOpenAPI3Schema(crd *apiextv1.CustomResourceDefinition) bool](#iscrddefinedwithopenapi3schema) | Determines whether any version of a Custom Resource Definition includes an OpenAPI v3 schema declaration. | + +## Exported Functions + +### IsCRDDefinedWithOpenAPI3Schema + +**IsCRDDefinedWithOpenAPI3Schema** - Determines whether any version of a Custom Resource Definition includes an OpenAPI v3 schema declaration. + +#### Signature (Go) + +```go +func IsCRDDefinedWithOpenAPI3Schema(crd *apiextv1.CustomResourceDefinition) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether any version of a Custom Resource Definition includes an OpenAPI v3 schema declaration. | +| **Parameters** | `crd` – pointer to `apiextv1.CustomResourceDefinition`; the CRD to inspect. | +| **Return value** | `bool`: `true` if at least one version contains the substring “openapiv3schema” (case‑insensitive); otherwise `false`. | +| **Key dependencies** | • `strings.Contains`
• `strings.ToLower` (twice) | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a helper for tests that need to filter CRDs by schema definition, enabling compliance checks on operator‑provided CRDs. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over crd.Spec.Versions"} + B -->|"For each version"| C["Get version.Schema.String()"] + C --> D["Convert to lower case"] + D --> E["Check if contains “openapiv3schema” (lower case)"] + E -->|"Found"| F["Return true"] + E -->|"Not found"| G["Continue loop"] + B -->|"All versions checked"| H["Return false"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_IsCRDDefinedWithOpenAPI3Schema --> func_Contains + func_IsCRDDefinedWithOpenAPI3Schema --> func_ToLower + func_IsCRDDefinedWithOpenAPI3Schema --> func_ToLower +``` + +#### Functions calling `IsCRDDefinedWithOpenAPI3Schema` + +```mermaid +graph TD + func_testOperatorCrdOpenAPISpec --> func_IsCRDDefinedWithOpenAPI3Schema +``` + +#### Usage example (Go) + +```go +// Minimal example invoking IsCRDDefinedWithOpenAPI3Schema +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/openapi" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +func checkCRDSchema(crd *apiextv1.CustomResourceDefinition) { + if openapi.IsCRDDefinedWithOpenAPI3Schema(crd) { + fmt.Println("CRD uses OpenAPI v3 schema") + } else { + fmt.Println("CRD does not use OpenAPI v3 schema") + } +} +``` + +--- diff --git a/docs/tests/operator/operator.md b/docs/tests/operator/operator.md new file mode 100644 index 000000000..b505b6ee0 --- /dev/null +++ b/docs/tests/operator/operator.md @@ -0,0 +1,1753 @@ +# Package operator + +**Path**: `tests/operator` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [CsvResult](#csvresult) +- [Exported Functions](#exported-functions) + - [LoadChecks](#loadchecks) + - [OperatorInstalledMoreThanOnce](#operatorinstalledmorethanonce) + - [SplitCsv](#splitcsv) +- [Local Functions](#local-functions) + - [checkIfCsvUnderTest](#checkifcsvundertest) + - [checkValidOperatorInstallation](#checkvalidoperatorinstallation) + - [findPodsNotBelongingToOperators](#findpodsnotbelongingtooperators) + - [getAllPodsBy](#getallpodsby) + - [getCsvsBy](#getcsvsby) + - [isCsvInNamespaceClusterWide](#iscsvinnamespaceclusterwide) + - [isMultiNamespacedOperator](#ismultinamespacedoperator) + - [isSingleNamespacedOperator](#issinglenamespacedoperator) + - [testMultipleSameOperators](#testmultiplesameoperators) + - [testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces](#testonlysingleormultinamespacedoperatorsallowedintenantnamespaces) + - [testOperatorCatalogSourceBundleCount](#testoperatorcatalogsourcebundlecount) + - [testOperatorCrdOpenAPISpec](#testoperatorcrdopenapispec) + - [testOperatorCrdVersioning](#testoperatorcrdversioning) + - [testOperatorInstallationAccessToSCC](#testoperatorinstallationaccesstoscc) + - [testOperatorInstallationPhaseSucceeded](#testoperatorinstallationphasesucceeded) + - [testOperatorOlmSkipRange](#testoperatorolmskiprange) + - [testOperatorOlmSubscription](#testoperatorolmsubscription) + - [testOperatorPodsNoHugepages](#testoperatorpodsnohugepages) + - [testOperatorSemanticVersioning](#testoperatorsemanticversioning) + - [testOperatorSingleCrdOwner](#testoperatorsinglecrdowner) + +## Overview + +The operator test suite registers checks that validate Operator Lifecycle Manager (OLM) installations, configuration and runtime behavior in a Kubernetes cluster. + +### Key Features + +- Provides helpers to parse CSV strings and determine namespace relationships +- Implements validation for OLM annotations, CRD ownership, SCC permissions, versioning and resource limits +- Generates detailed report objects for operators, catalog sources, pods and CRDs + +### Design Notes + +- Assumes a global TestEnvironment with operator and pod data +- Checks are registered via LoadChecks which constructs checksdb groups +- Functions return simple booleans or slice of error strings to allow aggregation + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**CsvResult**](#csvresult) | Holds the outcome of parsing a CSV string | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func LoadChecks()](#loadchecks) | Populates the *operator* test group with a series of checks that validate operator installation, configuration, and runtime behaviour. | +| [func OperatorInstalledMoreThanOnce(*provider.Operator, *provider.Operator)(bool)](#operatorinstalledmorethanonce) | Function implementation | +| [func SplitCsv(csv string) CsvResult](#splitcsv) | Extracts the CSV name (`NameCsv`) and optional namespace (`Namespace`) from a comma‑separated input string. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func (*v1alpha1.ClusterServiceVersion) bool](#checkifcsvundertest) | Returns `true` if the supplied CSV matches any operator defined in the global `env.Operators` slice, indicating it is being tested. | +| [func checkValidOperatorInstallation(namespace string) ( isDedicatedOperatorNamespace bool, singleOrMultiNamespaceOperators []string, nonSingleOrMultiNamespaceOperators []string, csvsTargetingNamespace []string, operatorsFoundButNotUnderTest []string, podsNotBelongingToOperators []string, err error)](#checkvalidoperatorinstallation) | Determines whether a namespace is dedicated to single‑ or multi‑namespaced operators, gathers lists of operator CSVs and pods that violate the expected installation rules, and reports any errors encountered. | +| [func findPodsNotBelongingToOperators(namespace string) ([]string, error)](#findpodsnotbelongingtooperators) | Returns the names of all pods within *namespace* that are not owned by any operator (ClusterServiceVersion) in that same namespace. | +| [func getAllPodsBy(namespace string, allPods []*provider.Pod) (podsInNamespace []*provider.Pod)](#getallpodsby) | Filters a slice of pod objects, returning only those whose `Namespace` field matches the supplied namespace. | +| [func getCsvsBy(namespace string, allCsvs []*v1alpha1.ClusterServiceVersion) (csvsInNamespace []*v1alpha1.ClusterServiceVersion)](#getcsvsby) | Filters a slice of `ClusterServiceVersion` objects, returning only those whose `Namespace` field matches the supplied namespace. | +| [func isCsvInNamespaceClusterWide(csvName string, allCsvs []*v1alpha1.ClusterServiceVersion) bool](#iscsvinnamespaceclusterwide) | Checks whether the ClusterServiceVersion (CSV) identified by `csvName` targets every namespace in the cluster. It returns `true` if no specific target namespaces are annotated, indicating a cluster‑wide operator. | +| [func isMultiNamespacedOperator(operatorNamespace string, targetNamespaces []string) bool](#ismultinamespacedoperator) | Checks whether a given operator runs across more than one namespace while *excluding* its own namespace from the list of targets. | +| [func isSingleNamespacedOperator(operatorNamespace string, targetNamespaces []string) bool](#issinglenamespacedoperator) | Determines if the operator’s *olm.targetNamespaces* annotation specifies exactly one namespace that is different from the operator’s own namespace. | +| [func testMultipleSameOperators(check *checksdb.Check, env *provider.TestEnvironment)](#testmultiplesameoperators) | Validates that each operator’s CSV name appears only once across the cluster; reports compliance or non‑compliance. | +| [func testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(check *checksdb.Check, env *provider.TestEnvironment)](#testonlysingleormultinamespacedoperatorsallowedintenantnamespaces) | Validates that a namespace dedicated to operator installation contains only single or multi‑namespaced operators and no other operator artifacts (e.g., cluster‑wide CSVs, pods outside the operator’s scope). | +| [func testOperatorCatalogSourceBundleCount(check *checksdb.Check, env *provider.TestEnvironment)](#testoperatorcatalogsourcebundlecount) | Validates that every catalog source used by an operator has a bundle count below 1,000. It logs results and records compliant/non‑compliant objects for reporting. | +| [func testOperatorCrdOpenAPISpec(check *checksdb.Check, env *provider.TestEnvironment)](#testoperatorcrdopenapispec) | Validates that each Custom Resource Definition (CRD) managed by an operator declares an OpenAPI v3 schema in its spec. | +| [func testOperatorCrdVersioning(check *checksdb.Check, env *provider.TestEnvironment)](#testoperatorcrdversioning) | Ensures every Custom Resource Definition (CRD) provided by an Operator follows Kubernetes‑style version naming (`v[alpha/beta]…`). | +| [func testOperatorInstallationAccessToSCC(check *checksdb.Check, env *provider.TestEnvironment)](#testoperatorinstallationaccesstoscc) | Verifies that none of an operator’s cluster permissions grant access to Security Context Constraints (SCCs). Operators with such rules are flagged as non‑compliant. | +| [func testOperatorInstallationPhaseSucceeded(check *checksdb.Check, env *provider.TestEnvironment)](#testoperatorinstallationphasesucceeded) | Verifies every operator in the test environment has reached the *Succeeded* status and records compliance results. | +| [func testOperatorOlmSkipRange(check *checksdb.Check, env *provider.TestEnvironment)](#testoperatorolmskiprange) | Confirms every operator in the test environment includes an `olm.skipRange` annotation on its ClusterServiceVersion (CSV). | +| [func testOperatorOlmSubscription(check *checksdb.Check, env *provider.TestEnvironment) {}](#testoperatorolmsubscription) | Checks whether every operator in the environment has an OLM subscription; records compliant and non‑compliant results. | +| [func testOperatorPodsNoHugepages(check *checksdb.Check, env *provider.TestEnvironment)](#testoperatorpodsnohugepages) | Verifies that none of the operator‑managed pods request huge page memory. If a pod requests huge pages it is marked non‑compliant; otherwise it is compliant. | +| [func (*checksdb.Check, *provider.TestEnvironment)()](#testoperatorsemanticversioning) | Validates that every operator in the test environment has a version string that conforms to [Semantic Versioning](https://semver.org/). | +| [func testOperatorSingleCrdOwner(check *checksdb.Check, env *provider.TestEnvironment)](#testoperatorsinglecrdowner) | Ensures every CRD declared in the environment’s operators is owned by a single operator. If multiple operators own the same CRD name, the check flags it as non‑compliant. | + +## Structs + +### CsvResult + +#### Fields + +| Field | Type | Description | +|-----------|--------|-------------| +| `NameCsv` | `string` | The comma‑separated list of names extracted from the input CSV, after trimming whitespace. | +| `Namespace` | `string` | The namespace value prefixed with `ns=` in the input; if none is present it remains an empty string. | + +#### Purpose + +`CsvResult` aggregates the pieces returned by the `SplitCsv` helper: a cleaned list of names and an optional namespace extracted from a CSV‑formatted string. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `SplitCsv` | Parses a CSV string, separates components by commas, trims spaces, assigns values to `NameCsv` or `Namespace`, and returns the populated `CsvResult`. | + +--- + +## Exported Functions + +### LoadChecks + +**LoadChecks** - Populates the *operator* test group with a series of checks that validate operator installation, configuration, and runtime behaviour. + +#### 1) Signature (Go) + +```go +func LoadChecks() +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Populates the *operator* test group with a series of checks that validate operator installation, configuration, and runtime behaviour. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | `log.Debug` – logs the loading action.
`checksdb.NewChecksGroup` – creates a check group for operators.
`WithBeforeEachFn`, `Add`, `WithCheckFn`, `WithSkipCheckFn` – configure each individual check.
Test helpers such as `GetNoOperatorsSkipFn`, `GetNoOperatorCrdsSkipFn`, `GetNoCatalogSourcesSkipFn`, etc., provide skip logic.
Specific test functions (`testOperatorInstallationPhaseSucceeded`, `testOperatorInstallationAccessToSCC`, …) perform the actual validation. | +| **Side effects** | Mutates the global checks database by adding a new group and its checks; emits debug log entries. No external I/O is performed directly. | +| **How it fits the package** | Called from `pkg/certsuite.LoadInternalChecksDB` to ensure operator‑related tests are available when the test suite runs. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start LoadChecks"] --> B["Log debug message"] + B --> C["Create checks group “operator”"] + C --> D{"Add individual checks"} + D --> E1["Check: Install Status Succeeded"] + D --> E2["Check: No SCC Access"] + D --> E3["Check: Installed via OLM"] + D --> E4["Check: Semantic Versioning"] + D --> E5["Check: CRD Versioning"] + D --> E6["Check: CRD Schema"] + D --> E7["Check: Single CRD Owner"] + D --> E8["Check: Pods No Hugepages"] + D --> E9["Check: OLM Skip Range"] + D --> E10["Check: Multiple Same Operators"] + D --> E11["Check: Catalog Source Bundle Count"] + D --> E12["Check: Tenant Namespace Operator Installation"] + E1 -.->|"skip if no operators"| F["GetNoOperatorsSkipFn"] + E2 -.->|"skip if no operators"| F + E3 -.->|"skip if no operators"| F + E4 -.->|"skip if no operators"| F + E5 -.->|"skip if no CRDs"| G["GetNoOperatorCrdsSkipFn"] + E6 -.->|"skip if no CRDs"| G + E7 -.->|"skip if no operators"| F + E8 -.->|"skip if no operators"| F + E8 -.->|"skip if no pods"| H["GetNoOperatorPodsSkipFn"] + E9 -.->|"skip if no operators"| F + E10 -.->|"skip if no operators"| F + E11 -.->|"skip if no catalog sources"| I["GetNoCatalogSourcesSkipFn"] + E12 -.->|"skip if no operators"| F + D --> J["End LoadChecks"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> log_Debug + func_LoadChecks --> checksdb_NewChecksGroup + func_LoadChecks --> WithBeforeEachFn + func_LoadChecks --> Add + func_LoadChecks --> WithCheckFn + func_LoadChecks --> WithSkipCheckFn + func_LoadChecks --> testOperatorInstallationPhaseSucceeded + func_LoadChecks --> testOperatorInstallationAccessToSCC + func_LoadChecks --> testOperatorOlmSubscription + func_LoadChecks --> testOperatorSemanticVersioning + func_LoadChecks --> testOperatorCrdVersioning + func_LoadChecks --> testOperatorCrdOpenAPISpec + func_LoadChecks --> testOperatorSingleCrdOwner + func_LoadChecks --> testOperatorPodsNoHugepages + func_LoadChecks --> testOperatorOlmSkipRange + func_LoadChecks --> testMultipleSameOperators + func_LoadChecks --> testOperatorCatalogSourceBundleCount + func_LoadChecks --> testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces +``` + +#### 5) Functions calling `LoadChecks` (Mermaid) + +```mermaid +graph TD + func_LoadInternalChecksDB --> func_LoadChecks +``` + +#### 6) Usage example (Go) + +```go +// Within the package initialization, ensure operator checks are registered: +func init() { + // Load all internal check groups, including operators. + certsuite.LoadInternalChecksDB() +} +``` + +--- + +### OperatorInstalledMoreThanOnce + + +**Signature**: `func(*provider.Operator, *provider.Operator)(bool)` + +**Purpose**: + +--- + +### SplitCsv + +**SplitCsv** - Extracts the CSV name (`NameCsv`) and optional namespace (`Namespace`) from a comma‑separated input string. + +Splits a CSV‑style string into its component name and namespace values. + +--- + +#### Signature (Go) + +```go +func SplitCsv(csv string) CsvResult +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Extracts the CSV name (`NameCsv`) and optional namespace (`Namespace`) from a comma‑separated input string. | +| **Parameters** | `csv string` – The raw CSV representation (e.g., `"name, ns=namespace"`). | +| **Return value** | `CsvResult` – A struct containing `NameCsv` and `Namespace`. | +| **Key dependencies** | • `strings.Split`
• `strings.TrimSpace`
• `strings.HasPrefix`
• `strings.TrimPrefix` | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Provides a helper for tests that need to parse CSV identifiers before performing checks on operator pods. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + input_csv --> split_parts["Split by ,"] + split_parts --> trim_parts["Trim spaces"] + trim_parts --> check_prefix{"Has ns= prefix"} + check_prefix -- Yes --> set_namespace["Set Namespace"] + check_prefix -- No --> set_namecsv["Set NameCsv"] + set_namespace & set_namecsv --> return_result["Return CsvResult"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_SplitCsv --> strings.Split + func_SplitCsv --> strings.TrimSpace + func_SplitCsv --> strings.HasPrefix + func_SplitCsv --> strings.TrimPrefix +``` + +--- + +#### Functions calling `SplitCsv` (Mermaid) + +```mermaid +graph TD + testOperatorPodsNoHugepages --> func_SplitCsv +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking SplitCsv +csv := "my-operator, ns=my-namespace" +result := SplitCsv(csv) +fmt.Printf("Name: %s, Namespace: %s\n", result.NameCsv, result.Namespace) +// Output: Name: my-operator, Namespace: my-namespace +``` + +--- + +## Local Functions + +### checkIfCsvUnderTest + +**checkIfCsvUnderTest** - Returns `true` if the supplied CSV matches any operator defined in the global `env.Operators` slice, indicating it is being tested. + +Determines whether a given CSV is part of the current test operator set. + +```go +func (*v1alpha1.ClusterServiceVersion) bool +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` if the supplied CSV matches any operator defined in the global `env.Operators` slice, indicating it is being tested. | +| **Parameters** | `csv *v1alpha1.ClusterServiceVersion` – the CSV to evaluate | +| **Return value** | `bool` – `true` when the CSV belongs to a test operator; otherwise `false`. | +| **Key dependencies** | • `env.Operators` (global slice of test operators)
• Comparison of `csv.Name` with each `testOperator.Csv.Name` | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by higher‑level validation logic to filter out CSVs that are not relevant to the current operator installation test. | + +#### Internal workflow + +```mermaid +flowchart TD + subgraph IterateOverOperators["Iterate over env.Operators"] + iterate --> compareName + end + compareName -- if equal --> returnTrue + compareName -- else --> nextOperator + nextOperator --> iterate + iterate -- finished --> returnFalse +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +```mermaid +graph TD + func_checkIfCsvUnderTest --> func_Dependency1 +``` + +*(Note: no external function calls are made from `checkIfCsvUnderTest`.)* + +#### Functions calling `checkIfCsvUnderTest` + +```mermaid +graph TD + func_checkValidOperatorInstallation --> func_checkIfCsvUnderTest +``` + +#### Usage example (Go) + +```go +// Minimal example invoking checkIfCsvUnderTest +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator" + v1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +// Assume csv is a previously retrieved ClusterServiceVersion instance. +csv := &v1alpha1.ClusterServiceVersion{Name: "my-operator.v1.0.0"} +if operator.checkIfCsvUnderTest(csv) { + fmt.Println("CSV is part of the test set.") +} else { + fmt.Println("CSV is not under test.") +} +``` + +--- + +### checkValidOperatorInstallation + +**checkValidOperatorInstallation** - Determines whether a namespace is dedicated to single‑ or multi‑namespaced operators, gathers lists of operator CSVs and pods that violate the expected installation rules, and reports any errors encountered. + +#### Signature (Go) + +```go +func checkValidOperatorInstallation(namespace string) ( + isDedicatedOperatorNamespace bool, + singleOrMultiNamespaceOperators []string, + nonSingleOrMultiNamespaceOperators []string, + csvsTargetingNamespace []string, + operatorsFoundButNotUnderTest []string, + podsNotBelongingToOperators []string, + err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether a namespace is dedicated to single‑ or multi‑namespaced operators, gathers lists of operator CSVs and pods that violate the expected installation rules, and reports any errors encountered. | +| **Parameters** | `namespace string` – the Kubernetes namespace under evaluation. | +| **Return value** | 1) `isDedicatedOperatorNamespace bool`: true if at least one single‑ or multi‑namespaced operator is installed in this namespace.
2) `singleOrMultiNamespaceOperators []string`: names of operators that satisfy single/multi installation criteria.
3) `nonSingleOrMultiNamespaceOperators []string`: names of operators whose install mode differs from the above.
4) `csvsTargetingNamespace []string`: CSVs targeting this namespace but installed elsewhere.
5) `operatorsFoundButNotUnderTest []string`: operator CSVs found in the namespace that are not part of the current test environment.
6) `podsNotBelongingToOperators []string`: pods present in the namespace that lack a valid operator owner.
7) `err error`: any error encountered during processing. | +| **Key dependencies** | • `getCsvsBy` – filters all CSVs to those in the target namespace.
• `strings.Split` – parses comma‑separated target namespaces.
• `checkIfCsvUnderTest` – checks if a CSV is part of the test environment.
• `isSingleNamespacedOperator`, `isMultiNamespacedOperator` – determine installation scope.
• `isCsvInNamespaceClusterWide` – identifies cluster‑wide operators.
• `findPodsNotBelongingToOperators` – lists pods without operator ownership. | +| **Side effects** | No global state mutation; only reads from the test environment (`env`). Produces error values but does not perform I/O or spawn goroutines. | +| **How it fits the package** | This helper supports tests that validate tenant namespace isolation for operators, ensuring that each namespace hosts only approved operator configurations and no stray pods. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Get CSVs in namespace"} + B --> C["Iterate over CSVs"] + C --> D{"Namespace matches operators own namespace?"} + D -- Yes --> E{"CSV under test?"} + E -- Yes --> F{"Is single/multi‑namespaced?"} + F -- Yes --> G["Add to singleOrMulti list"] + F -- No --> H["Add to nonSingleOrMulti list"] + E -- No --> I["Add to operatorsFoundButNotUnderTest"] + D -- No --> J{"Is CSV cluster‑wide?"} + J -- No --> K["Add to csvsTargetingNamespace"] + C --> L["End iteration"] + L --> M["Find pods not belonging to operators"] + M --> N{"Error?"} + N -- Yes --> O["Return with error"] + N -- No --> P{"Check validity"} + P --> Q{"Any violations?"} + Q -- Yes --> R["isValid = false"] + Q -- No --> S["isValid = true"] + R & S --> T["Return results"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_checkValidOperatorInstallation --> func_getCsvsBy + func_checkValidOperatorInstallation --> strings.Split + func_checkValidOperatorInstallation --> func_checkIfCsvUnderTest + func_checkValidOperatorInstallation --> func_isSingleNamespacedOperator + func_checkValidOperatorInstallation --> func_isMultiNamespacedOperator + func_checkValidOperatorInstallation --> func_isCsvInNamespaceClusterWide + func_checkValidOperatorInstallation --> func_findPodsNotBelongingToOperators +``` + +#### Functions calling `checkValidOperatorInstallation` (Mermaid) + +```mermaid +graph TD + func_testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces --> func_checkValidOperatorInstallation +``` + +#### Usage example (Go) + +```go +// Minimal example invoking checkValidOperatorInstallation +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator" +) + +func main() { + namespace := "tenant-namespace" + + isDedicated, singleOps, nonSingleOps, + targetedCSVs, othersNotUnderTest, orphanPods, err := + operator.checkValidOperatorInstallation(namespace) + + if err != nil { + fmt.Printf("Error checking namespace %s: %v\n", namespace, err) + return + } + + fmt.Printf("Namespace %s dedicated? %t\n", namespace, isDedicated) + fmt.Printf("Single/multi‑namespaced ops: %v\n", singleOps) + fmt.Printf("Non‑single/multi ops: %v\n", nonSingleOps) + fmt.Printf("CSVs targeting namespace: %v\n", targetedCSVs) + fmt.Printf("Operators not under test: %v\n", othersNotUnderTest) + fmt.Printf("Orphan pods: %v\n", orphanPods) +} +``` + +--- + +### findPodsNotBelongingToOperators + +**findPodsNotBelongingToOperators** - Returns the names of all pods within *namespace* that are not owned by any operator (ClusterServiceVersion) in that same namespace. + +#### Signature (Go) + +```go +func findPodsNotBelongingToOperators(namespace string) ([]string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the names of all pods within *namespace* that are not owned by any operator (ClusterServiceVersion) in that same namespace. | +| **Parameters** | `namespace` (string) – the Kubernetes namespace to inspect. | +| **Return value** | `[]string` – list of pod names lacking an operator owner; `error` – error from owner resolution or nil. | +| **Key dependencies** | • `getAllPodsBy(namespace, env.AllPods)` – filters all known pods to those in *namespace*.
• `podhelper.GetPodTopOwner(pod.Namespace, pod.OwnerReferences)` – resolves the top‑level owners of a pod.
• Kubernetes API types (`v1alpha1.ClusterServiceVersionKind`). | +| **Side effects** | None; purely functional. No state mutation or I/O beyond reading global `env.AllPods`. | +| **How it fits the package** | Used by operator validation routines to ensure that every pod in a namespace is controlled by an operator, helping detect orphaned workloads. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Get all pods in namespace"] --> B{"Iterate over each pod"} + B --> C["Resolve top owners via GetPodTopOwner"] + C --> D{"Any owner is a CSV in the same namespace?"} + D -- Yes --> E["Skip pod"] + D -- No --> F["Add pod name to result list"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_findPodsNotBelongingToOperators --> func_getAllPodsBy + func_findPodsNotBelongingToOperators --> func_GetPodTopOwner + func_findPodsNotBelongingToOperators --> func_append +``` + +#### Functions calling `findPodsNotBelongingToOperators` (Mermaid) + +```mermaid +graph TD + func_checkValidOperatorInstallation --> func_findPodsNotBelongingToOperators +``` + +#### Usage example (Go) + +```go +// Minimal example invoking findPodsNotBelongingToOperators +namespace := "my-operator-ns" +orphanedPods, err := findPodsNotBelongingToOperators(namespace) +if err != nil { + log.Fatalf("error finding orphaned pods: %v", err) +} +fmt.Printf("Pods not owned by an operator in %s: %v\n", namespace, orphanedPods) +``` + +--- + +--- + +### getAllPodsBy + +**getAllPodsBy** - Filters a slice of pod objects, returning only those whose `Namespace` field matches the supplied namespace. + +#### Signature (Go) + +```go +func getAllPodsBy(namespace string, allPods []*provider.Pod) (podsInNamespace []*provider.Pod) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Filters a slice of pod objects, returning only those whose `Namespace` field matches the supplied namespace. | +| **Parameters** | *`namespace`* – string; target namespace.
*`allPods`* – slice of pointers to `provider.Pod`; complete pod collection. | +| **Return value** | *`podsInNamespace`* – slice of pointers to `provider.Pod` that belong to the specified namespace. | +| **Key dependencies** | • `append` (built‑in) for accumulating results.
• Accesses `pod.Namespace`. | +| **Side effects** | None; pure function with no external state changes or I/O. | +| **How it fits the package** | Utility helper used by higher‑level functions to isolate pods of a given namespace before further processing (e.g., ownership checks). | + +#### Internal workflow + +```mermaid +flowchart TD + Start --> ForEachPod["For each pod in allPods"] + ForEachPod --> CheckNamespace{"pod.Namespace == namespace"} + CheckNamespace -- Yes --> Append["podsInNamespace = append(podsInNamespace, pod)"] + Append --> NextIteration + CheckNamespace -- No --> NextIteration + NextIteration --> End +``` + +#### Function dependencies + +```mermaid +graph TD + func_getAllPodsBy --> func_append +``` + +#### Functions calling `getAllPodsBy` + +```mermaid +graph TD + func_findPodsNotBelongingToOperators --> func_getAllPodsBy +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getAllPodsBy +pods := []*provider.Pod{ + {Name: "pod1", Namespace: "dev"}, + {Name: "pod2", Namespace: "prod"}, +} +namespace := "dev" +filtered := getAllPodsBy(namespace, pods) +// filtered now contains only pod1 +``` + +--- + +### getCsvsBy + +**getCsvsBy** - Filters a slice of `ClusterServiceVersion` objects, returning only those whose `Namespace` field matches the supplied namespace. + +```go +func getCsvsBy(namespace string, allCsvs []*v1alpha1.ClusterServiceVersion) (csvsInNamespace []*v1alpha1.ClusterServiceVersion) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Filters a slice of `ClusterServiceVersion` objects, returning only those whose `Namespace` field matches the supplied namespace. | +| **Parameters** | - `namespace string` – Target namespace to filter by.
- `allCsvs []*v1alpha1.ClusterServiceVersion` – Complete list of CSVs to search through. | +| **Return value** | `csvsInNamespace []*v1alpha1.ClusterServiceVersion` – Slice containing all CSVs that belong to the specified namespace. | +| **Key dependencies** | • Calls the built‑in `append` function.
• Relies on the `ClusterServiceVersion.Namespace` field from the `k8s.io/api/operator/v1alpha1` package. | +| **Side effects** | None; purely functional, no mutation of input slices or external state. | +| **How it fits the package** | Supports operator validation by isolating CSVs relevant to a particular namespace before performing further checks. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over allCsvs"} + B -->|"csv.Namespace == namespace"| C["Append csv to result"] + B -->|"else"| D["Skip"] + C --> B + D --> B + B --> E["Return csvsInNamespace"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_getCsvsBy --> func_append +``` + +#### Functions calling `getCsvsBy` + +```mermaid +graph TD + func_checkValidOperatorInstallation --> func_getCsvsBy +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getCsvsBy +package main + +import ( + v1alpha1 "k8s.io/api/operator/v1alpha1" +) + +func main() { + // Example CSV objects (normally obtained from the cluster) + csv1 := &v1alpha1.ClusterServiceVersion{Namespace: "dev"} + csv2 := &v1alpha1.ClusterServiceVersion{Namespace: "prod"} + allCsvs := []*v1alpha1.ClusterServiceVersion{csv1, csv2} + + // Retrieve CSVs belonging to the "dev" namespace + devCsvs := getCsvsBy("dev", allCsvs) + + // devCsvs now contains only csv1 +} +``` + +--- + +### isCsvInNamespaceClusterWide + +**isCsvInNamespaceClusterWide** - Checks whether the ClusterServiceVersion (CSV) identified by `csvName` targets every namespace in the cluster. It returns `true` if no specific target namespaces are annotated, indicating a cluster‑wide operator. + +#### Signature (Go) + +```go +func isCsvInNamespaceClusterWide(csvName string, allCsvs []*v1alpha1.ClusterServiceVersion) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the ClusterServiceVersion (CSV) identified by `csvName` targets every namespace in the cluster. It returns `true` if no specific target namespaces are annotated, indicating a cluster‑wide operator. | +| **Parameters** | *`csvName`* `string` – Name of the CSV to inspect.
*`allCsvs`* `[]*v1alpha1.ClusterServiceVersion` – Slice containing all known CSV objects. | +| **Return value** | `bool` – `true` if the CSV is cluster‑wide; `false` otherwise. | +| **Key dependencies** | *Iteration over `allCsvs`.
* Access to each CSV’s `Annotations["olm.targetNamespaces"]`. | +| **Side effects** | None. The function only reads data and returns a value. | +| **How it fits the package** | Used by higher‑level validation logic (e.g., `checkValidOperatorInstallation`) to differentiate between cluster‑wide and namespace‑scoped operators when evaluating operator installation correctness. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"For each CSV in allCsvs"} + B -->|"csv.Name == csvName"| C["Check annotations olm.targetNamespaces"] + C -->|"exists && non‑empty"| D["Set isClusterWide = false & break"] + C -->|"otherwise"| E["Continue loop"] + B --> F("End") + E --> F +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `isCsvInNamespaceClusterWide` + +```mermaid +graph TD + func_checkValidOperatorInstallation --> func_isCsvInNamespaceClusterWide +``` + +#### Usage example (Go) + +```go +// Minimal example invoking isCsvInNamespaceClusterWide +package main + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator" + v1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func main() { + // Assume we have a slice of CSV objects loaded elsewhere. + var allCsvs []*v1alpha1.ClusterServiceVersion + + csvName := "example-operator.v0.1.0" + + if operator.IsCsvInNamespaceClusterWide(csvName, allCsvs) { + fmt.Printf("CSV %s is cluster‑wide.\n", csvName) + } else { + fmt.Printf("CSV %s targets specific namespaces.\n", csvName) + } +} +``` + +*Note:* The function itself is unexported (`isCsvInNamespaceClusterWide`). In the example, it is accessed via a hypothetical exported wrapper `operator.IsCsvInNamespaceClusterWide` for illustration purposes. + +--- + +### isMultiNamespacedOperator + +**isMultiNamespacedOperator** - Checks whether a given operator runs across more than one namespace while *excluding* its own namespace from the list of targets. + +#### 1) Signature (Go) + +```go +func isMultiNamespacedOperator(operatorNamespace string, targetNamespaces []string) bool +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether a given operator runs across more than one namespace while *excluding* its own namespace from the list of targets. | +| **Parameters** | `operatorNamespace string` – the namespace where the operator itself is installed.
`targetNamespaces []string` – namespaces declared in the CSV’s `olm.targetNamespaces` annotation. | +| **Return value** | `bool` – `true` if `len(targetNamespaces) > 1` and the operator’s own namespace is not among them; otherwise `false`. | +| **Key dependencies** | • `len` (builtin)
• `github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper.StringInSlice` | +| **Side effects** | None. Pure function – no state mutation or I/O. | +| **How it fits the package** | Used by operator validation logic to classify operators as *single‑or‑multi‑namespace* installations. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"len(targetNamespaces) > 1"} + B -- No --> C["Return false"] + B -- Yes --> D{"StringInSlice(targetNamespaces, operatorNamespace, false)"} + D -- Yes --> C + D -- No --> E["Return true"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_isMultiNamespacedOperator --> builtin_len + func_isMultiNamespacedOperator --> func_StringInSlice +``` + +#### 5) Functions calling `isMultiNamespacedOperator` (Mermaid) + +```mermaid +graph TD + func_checkValidOperatorInstallation --> func_isMultiNamespacedOperator +``` + +#### 6) Usage example (Go) + +```go +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator" +) + +func main() { + operatorNS := "operators" + targets := []string{"dev", "prod", "operators"} + + if operator.isMultiNamespacedOperator(operatorNS, targets) { + fmt.Println("This operator spans multiple namespaces.") + } else { + fmt.Println("Not a multi‑namespaced operator.") + } +} +``` + +--- + +### isSingleNamespacedOperator + +**isSingleNamespacedOperator** - Determines if the operator’s *olm.targetNamespaces* annotation specifies exactly one namespace that is different from the operator’s own namespace. + +Checks whether an operator is intended for a single namespace that differs from its installation namespace. + +#### Signature (Go) + +```go +func isSingleNamespacedOperator(operatorNamespace string, targetNamespaces []string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if the operator’s *olm.targetNamespaces* annotation specifies exactly one namespace that is different from the operator’s own namespace. | +| **Parameters** | `operatorNamespace` (string) – Namespace where the CSV is installed.
`targetNamespaces` ([]string) – List of namespaces parsed from the CSV’s *olm.targetNamespaces* annotation. | +| **Return value** | `bool` – `true` if exactly one target namespace exists and it is not equal to the installation namespace; otherwise `false`. | +| **Key dependencies** | • `len` (built‑in) to count elements in `targetNamespaces`. | +| **Side effects** | None. Pure function. | +| **How it fits the package** | Used by `checkValidOperatorInstallation` to classify operators as single‑namespace or multi‑namespace installations during validation of operator deployments. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Evaluate len(targetNamespaces)"] + B --> C{"len == 1"} + C -- "yes" --> D["Compare operatorNamespace vs targetNamespaces\\[0\\]"] + D --> E{"operatorNamespace != targetNamespaces\\[0\\]"} + E -- "yes" --> F["Return true"] + E -- "no" --> G["Return false"] + C -- "no" --> H["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_isSingleNamespacedOperator --> func_len +``` + +#### Functions calling `isSingleNamespacedOperator` (Mermaid) + +```mermaid +graph TD + func_checkValidOperatorInstallation --> func_isSingleNamespacedOperator +``` + +#### Usage example (Go) + +```go +// Minimal example invoking isSingleNamespacedOperator +namespace := "operators" +targets := []string{"app-1"} +isSingle := isSingleNamespacedOperator(namespace, targets) +// isSingle == true because the operator runs in a different namespace than its target +``` + +--- + +### testMultipleSameOperators + +**testMultipleSameOperators** - Validates that each operator’s CSV name appears only once across the cluster; reports compliance or non‑compliance. + +#### 1) Signature (Go) + +```go +func testMultipleSameOperators(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates that each operator’s CSV name appears only once across the cluster; reports compliance or non‑compliance. | +| **Parameters** | `check` – test framework check object to log and set results.
`env` – environment containing all discovered operators (`AllOperators`). | +| **Return value** | None (the result is stored in the `check` object). | +| **Key dependencies** | *`OperatorInstalledMoreThanOnce(op, op2)` – comparison helper.
* `testhelper.NewOperatorReportObject()` – creates a report entry.
*Logging helpers: `LogInfo`, `LogDebug`.
* `check.SetResult()` – finalises the test outcome. | +| **Side effects** | Logs diagnostic messages; mutates the internal state of `check` by setting result objects. No external I/O. | +| **How it fits the package** | Part of the *operator* test suite; ensures that OLM does not install duplicate CSVs, which would violate best practices. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + start["Start"] --> logInfo["Log: Checking if operator is installed more than once"] + logInfo --> forAllOps["For each operator op in env.AllOperators"] + forAllOps --> logDebugOp["Log: Checking operator %q, op.Name"] + forAllOps --> forAllOps2["For each operator op2 in env.AllOperators"] + forAllOps2 --> compare["If OperatorInstalledMoreThanOnce(op, op2)"] + compare -- Yes --> addNonCompliant["Append non‑compliant report object"] + addNonCompliant --> breakLoop["Break inner loop"] + compare -- No --> nextOp2["Continue inner loop"] + forAllOps2 --> nextOp2 + forAllOps --> addCompliant["Append compliant report object"] + addCompliant --> endLoop["End outer loop"] + endLoop --> setResult["check.SetResult(compliantObjects, nonCompliantObjects)"] + setResult --> finish["Finish"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_testMultipleSameOperators --> func_OperatorInstalledMoreThanOnce + func_testMultipleSameOperators --> func_NewOperatorReportObject + func_testMultipleSameOperators --> func_SetResult + func_testMultipleSameOperators --> func_LogInfo + func_testMultipleSameOperators --> func_LogDebug +``` + +#### 5) Functions calling `testMultipleSameOperators` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testMultipleSameOperators +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testMultipleSameOperators +check := checksdb.NewCheck("TestID") +env := &provider.TestEnvironment{ + AllOperators: []*provider.Operator{ /* operators to evaluate */ }, +} +testMultipleSameOperators(check, env) +``` + +--- + +### testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces + +**testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces** - Validates that a namespace dedicated to operator installation contains only single or multi‑namespaced operators and no other operator artifacts (e.g., cluster‑wide CSVs, pods outside the operator’s scope). + +#### Signature (Go) + +```go +func testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates that a namespace dedicated to operator installation contains only single or multi‑namespaced operators and no other operator artifacts (e.g., cluster‑wide CSVs, pods outside the operator’s scope). | +| **Parameters** | `check *checksdb.Check` – test context used for logging and result reporting.
`env *provider.TestEnvironment` – execution environment holding namespaces, operators, and CSV data. | +| **Return value** | None (results are stored in the `check` via `SetResult`). | +| **Key dependencies** | • `LogInfo`, `LogError`, `SetResult` from `checksdb.Check`
• `fmt.Sprintf`, `strings.Join`
• `make`, `append` built‑ins
• `testhelper.NewNamespacedReportObject`
• `checkValidOperatorInstallation` (internal helper) | +| **Side effects** | Writes log entries, constructs report objects, and sets the test result on `check`. No external I/O beyond logging. | +| **How it fits the package** | Part of the Operator test suite; invoked by `LoadChecks` to enforce namespace‑specific operator installation rules in tenant environments. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Gather operator namespaces"] --> B["Iterate each dedicated namespace"] + B --> C{"Validate via checkValidOperatorInstallation"} + C -->|"error"| D["Log error & create non‑compliant report"] + C -->|"valid"| E{"Is dedicated?"} + E -->|"yes"| F["Create compliant report"] + E -->|"no"| G["Build detailed non‑compliant message"] + G --> H["Add to non‑compliant list"] + D & F & H --> I["SetResult(compliant, non‑compliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces --> func_checkValidOperatorInstallation + func_testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces --> func_LogInfo + func_testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces --> func_LogError + func_testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces --> func_SetResult + func_testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces --> func_NewNamespacedReportObject +``` + +#### Functions calling `testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces +func ExampleTest() { + env := provider.NewTestEnvironment() + check := checksdb.NewCheck("operator-namespace-validation") + testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(check, &env) + // Result is now stored in check.Result +} +``` + +--- + +### testOperatorCatalogSourceBundleCount + +**testOperatorCatalogSourceBundleCount** - Validates that every catalog source used by an operator has a bundle count below 1,000. It logs results and records compliant/non‑compliant objects for reporting. + +#### Signature (Go) + +```go +func testOperatorCatalogSourceBundleCount(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates that every catalog source used by an operator has a bundle count below 1,000. It logs results and records compliant/non‑compliant objects for reporting. | +| **Parameters** | `check *checksdb.Check` – test context; `env *provider.TestEnvironment` – environment snapshot containing operators, catalog sources, package manifests, and OpenShift version. | +| **Return value** | none (void) | +| **Key dependencies** | • `log.Info`, `log.Debug`, `log.Error` – structured logging
• `semver.NewVersion` – parse OCP version
• `strconv.Itoa` – convert int to string
• `stringhelper.StringInSlice` – check slice membership
• `provider.GetCatalogSourceBundleCount` – obtain bundle count for a catalog source
• `testhelper.NewCatalogSourceReportObject` – create report objects | +| **Side effects** | Emits log messages, populates `compliantObjects`/`nonCompliantObjects`, and calls `check.SetResult`. No external I/O beyond logging. | +| **How it fits the package** | Part of the operator test suite; executed during the *CatalogSourceBundleCount* check to enforce a best‑practice limit on catalog source bundle references. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Is OCP <= 4.12?"} + B -- Yes --> C["Set ocp412Skip = true"] + B -- No --> D["ocp412Skip remains false"] + C & D --> E["Iterate over env.Operators"] + E --> F["For each operator, iterate over env.AllPackageManifests"] + F --> G{"Should skip PM?"} + G -- Yes --> H["Continue to next PM"] + G -- No --> I["Find matching catalog source in env.AllCatalogSources"] + I --> J{"Already reported?"} + J -- Yes --> K["Skip"] + J -- No --> L["Get bundle count via provider.GetCatalogSourceBundleCount"] + L --> M{"bundleCount == -1?"} + M -- Yes --> N["Record non‑compliant error"] + M -- No --> O{"bundleCount > limit?"} + O -- Yes --> P["Record non‑compliant"] + O -- No --> Q["Record compliant"] + K & P & Q --> R["Mark catalog source as reported"] + R --> S["Break inner loops if checked"] + E --> T["End of operators loop"] + T --> U["check.SetResult(compliantObjects, nonCompliantObjects)"] + U --> V["Finish"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testOperatorCatalogSourceBundleCount --> log.Info + func_testOperatorCatalogSourceBundleCount --> semver.NewVersion + func_testOperatorCatalogSourceBundleCount --> strconv.Itoa + func_testOperatorCatalogSourceBundleCount --> stringhelper.StringInSlice + func_testOperatorCatalogSourceBundleCount --> provider.GetCatalogSourceBundleCount + func_testOperatorCatalogSourceBundleCount --> testhelper.NewCatalogSourceReportObject +``` + +#### Functions calling `testOperatorCatalogSourceCatalogSourceBundleCount` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testOperatorCatalogSourceBundleCount +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOperatorCatalogSourceBundleCount +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +// Assume `check` and `env` are already initialized. +func runExample() { + check := checksdb.NewCheck(nil) // placeholder; normally created by the test framework + env := provider.TestEnvironment{} // populated with operators, catalog sources, etc. + testOperatorCatalogSourceBundleCount(check, &env) +} +``` + +--- + +--- + +### testOperatorCrdOpenAPISpec + +**testOperatorCrdOpenAPISpec** - Validates that each Custom Resource Definition (CRD) managed by an operator declares an OpenAPI v3 schema in its spec. + +#### Signature (Go) + +```go +func testOperatorCrdOpenAPISpec(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates that each Custom Resource Definition (CRD) managed by an operator declares an OpenAPI v3 schema in its spec. | +| **Parameters** | `check *checksdb.Check` – the test context; `env *provider.TestEnvironment` – environment containing the CRDs to evaluate | +| **Return value** | None (the function records results via `check.SetResult`) | +| **Key dependencies** | • `openapi.IsCRDDefinedWithOpenAPI3Schema(crd)`
• `testhelper.NewOperatorReportObject(...)`
• `check.LogInfo`, `check.SetResult` | +| **Side effects** | Appends report objects to the check result; logs diagnostic information | +| **How it fits the package** | One of several operator‑specific checks in the `operator` test suite, ensuring CRD schema compliance. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Crds"} + B -->|"CRD has OpenAPI v3"| C["Log success"] + C --> D["Create compliant report object"] + B -->|"No OpenAPI v3"| E["Log failure"] + E --> F["Create non‑compliant report object"] + D & F --> G["Append to respective list"] + G --> H["SetResult(compliant, nonCompliant)"] + H --> I["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testOperatorCrdOpenAPISpec --> func_LogInfo + func_testOperatorCrdOpenAPISpec --> openapi_IsCRDDefinedWithOpenAPI3Schema + func_testOperatorCrdOpenAPISpec --> testhelper_NewOperatorReportObject + func_testOperatorCrdOpenAPISpec --> func_SetResult +``` + +#### Functions calling `testOperatorCrdOpenAPISpec` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testOperatorCrdOpenAPISpec +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOperatorCrdOpenAPISpec +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/provider" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/checksdb" +) + +func main() { + // Assume check and env are already created by the test harness + var check *checksdb.Check + var env *provider.TestEnvironment + + operator.testOperatorCrdOpenAPISpec(check, env) +} +``` + +--- + +### testOperatorCrdVersioning + +**testOperatorCrdVersioning** - Ensures every Custom Resource Definition (CRD) provided by an Operator follows Kubernetes‑style version naming (`v[alpha/beta]…`). + +#### Signature (Go) + +```go +func testOperatorCrdVersioning(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures every Custom Resource Definition (CRD) provided by an Operator follows Kubernetes‑style version naming (`v[alpha/beta]…`). | +| **Parameters** | `check *checksdb.Check` – test context for logging and result aggregation.
`env *provider.TestEnvironment` – environment containing the list of CRDs to examine. | +| **Return value** | None (results are recorded via `check.SetResult`). | +| **Key dependencies** | • `check.LogInfo`, `check.LogDebug`, `check.LogError` – logging.
• `github.com/redhat-best-practices-for-k8s/certsuite/pkg/versions.IsValidK8sVersion` – version validation regex.
• `testhelper.NewOperatorReportObject` – creates per‑CRD compliance reports.
• `check.SetResult` – stores compliant/non‑compliant lists. | +| **Side effects** | Logs diagnostic information; mutates the internal state of `check` by setting its result objects. No external I/O or concurrency. | +| **How it fits the package** | Part of the Operator test suite, specifically registered in `LoadChecks`. It verifies that Operators expose CRDs with proper semantic versioning, a requirement for Kubernetes compatibility. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Crds"} + B --> C["Set doesUseK8sVersioning=true"] + B --> D["nonCompliantVersion="] + C --> E{"For each crd.Spec.Versions"} + E --> F["Get versionName"] + E --> G{"IsValidK8sVersion(versionName)"} + G -- true --> H["Continue loop"] + G -- false --> I["doesUseK8sVersioning=false, nonCompliantVersion=versionName, break"] + H --> J["End inner loop"] + J --> K{"doesUseK8sVersioning"} + K -- true --> L["LogInfo, append compliantObjects"] + K -- false --> M["LogError, append nonCompliantObjects"] + L --> N["Next crd"] + M --> N + N --> O["End outer loop"] + O --> P["check.SetResult(compliantObjects, nonCompliantObjects)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testOperatorCrdVersioning --> func_LogInfo + func_testOperatorCrdVersioning --> func_LogDebug + func_testOperatorCrdVersioning --> func_IsValidK8sVersion + func_testOperatorCrdVersioning --> func_NewOperatorReportObject + func_testOperatorCrdVersioning --> func_LogError + func_testOperatorCrdVersioning --> func_SetResult +``` + +#### Functions calling `testOperatorCrdVersioning` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testOperatorCrdVersioning +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOperatorCrdVersioning +check := checksdb.NewCheck("example-id") +env := &provider.TestEnvironment{ + Crds: []*crdv1.CustomResourceDefinition{ /* populated CRDs */ }, +} +testOperatorCrdVersioning(check, env) +// After execution, check.Result holds compliant and non‑compliant objects. +``` + +--- + +### testOperatorInstallationAccessToSCC + +**testOperatorInstallationAccessToSCC** - Verifies that none of an operator’s cluster permissions grant access to Security Context Constraints (SCCs). Operators with such rules are flagged as non‑compliant. + +#### Signature (Go) + +```go +func testOperatorInstallationAccessToSCC(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies that none of an operator’s cluster permissions grant access to Security Context Constraints (SCCs). Operators with such rules are flagged as non‑compliant. | +| **Parameters** | `check *checksdb.Check` – test context for logging and result handling.
`env *provider.TestEnvironment` – environment containing the list of operators under test. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | • `LogDebug`, `LogInfo` (logging)
• `len`, `append` (built‑in utilities)
• `testhelper.NewOperatorReportObject` (creates compliance reports)
• `access.PermissionsHaveBadRule` (logic that detects SCC rules)
• `check.SetResult` (stores the final outcome) | +| **Side effects** | Mutates internal result slices, logs diagnostic information, but does not alter the input environment. | +| **How it fits the package** | Part of the operator test suite; invoked by `LoadChecks` to enforce best‑practice security checks on installed operators. | + +#### Internal workflow + +```mermaid +flowchart TD + start([Start]) --> iterate["Iterate over env.Operators"] + iterate --> checkPerms{"clusterPermissions empty?"} + checkPerms -- Yes --> noPerms["No clusterPermissions"] + noPerms --> createCompliant["Create compliant report (no rules)"] + createCompliant --> continueLoop["Continue loop"] + checkPerms -- No --> badRuleCheck["access.PermissionsHaveBadRule"] + badRuleCheck -- True --> nonCompliant["Create non‑compliant report"] + badRuleCheck -- False --> compliant["Create compliant report"] + nonCompliant --> continueLoop + compliant --> continueLoop + continueLoop --> endLoop([End loop]) + endLoop --> setResult["check.SetResult"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testOperatorInstallationAccessToSCC --> LogDebug + func_testOperatorInstallationAccessToSCC --> len + func_testOperatorInstallationAccessToSCC --> LogInfo + func_testOperatorInstallationAccessToSCC --> append + func_testOperatorInstallationAccessToSCC --> testhelper.NewOperatorReportObject + func_testOperatorInstallationAccessToSCC --> access.PermissionsHaveBadRule + func_testOperatorInstallationAccessToSCC --> check.SetResult +``` + +#### Functions calling `testOperatorInstallationAccessToSCC` + +```mermaid +graph TD + LoadChecks --> testOperatorInstallationAccessToSCC +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOperatorInstallationAccessToSCC +func runExample() { + // Assume check and env are already initialized. + var check *checksdb.Check + var env *provider.TestEnvironment + + // Invoke the function to perform SCC access checks. + testOperatorInstallationAccessToSCC(check, env) + + // Results can be retrieved from `check`. +} +``` + +--- + +### testOperatorInstallationPhaseSucceeded + +**testOperatorInstallationPhaseSucceeded** - Verifies every operator in the test environment has reached the *Succeeded* status and records compliance results. + +#### Signature (Go) + +```go +func testOperatorInstallationPhaseSucceeded(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies every operator in the test environment has reached the *Succeeded* status and records compliance results. | +| **Parameters** | `check *checksdb.Check` – test check object used for logging and result aggregation.
`env *provider.TestEnvironment` – environment containing a list of operators to evaluate. | +| **Return value** | None (void). Result is stored via `check.SetResult`. | +| **Key dependencies** | • `phasecheck.WaitOperatorReady(csv)` – polls operator status.
• `testhelper.NewOperatorReportObject(...)` – creates report entries.
• Logging functions (`LogInfo`, `LogError`). | +| **Side effects** | *Logs* progress and errors.
*Updates* the check’s result with compliant/non‑compliant operator reports. | +| **How it fits the package** | Implements a core test for operator installation status within the `operator` test suite; called by `LoadChecks`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Operators"} + B -->|"For each op"| C["Log “Testing Operator …”"] + C --> D{"WaitOperatorReady(op.Csv)"} + D -- true --> E["Log “Operator … is in Succeeded phase”"] + E --> F["Create compliant ReportObject"] + D -- false --> G["Log “Operator … not in Succeeded phase”"] + G --> H["Create non‑compliant ReportObject"] + F & H --> I["Append to respective list"] + I --> B + B --> J{"All operators processed?"} + J --> K["check.SetResult(compliant, nonCompliant)"] + K --> L["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testOperatorInstallationPhaseSucceeded --> phasecheck.WaitOperatorReady + func_testOperatorInstallationPhaseSucceeded --> testhelper.NewOperatorReportObject + func_testOperatorInstallationPhaseSucceeded --> check.LogInfo + func_testOperatorInstallationPhaseSucceeded --> check.LogError +``` + +#### Functions calling `testOperatorInstallationPhaseSucceeded` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testOperatorInstallationPhaseSucceeded +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOperatorInstallationPhaseSucceeded +func ExampleTestOperatorInstallation() { + // Assume we have a populated TestEnvironment and a Check instance. + env := &provider.TestEnvironment{ + Operators: []operatorInfo{ /* operators to test */ }, + } + check := checksdb.NewCheck("operator-installation-status") + testOperatorInstallationPhaseSucceeded(check, env) + + // Results can be inspected via check.GetResult() or similar API. +} +``` + +--- + +### testOperatorOlmSkipRange + +**testOperatorOlmSkipRange** - Confirms every operator in the test environment includes an `olm.skipRange` annotation on its ClusterServiceVersion (CSV). + +#### Signature (Go) + +```go +func testOperatorOlmSkipRange(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Confirms every operator in the test environment includes an `olm.skipRange` annotation on its ClusterServiceVersion (CSV). | +| **Parameters** | `check *checksdb.Check` – current check context; `env *provider.TestEnvironment` – snapshot of operators under test. | +| **Return value** | None (side‑effect only) | +| **Key dependencies** | • `LogInfo`, `LogError` on the check
• `testhelper.NewOperatorReportObject` for report objects
• `SetResult` to record compliant/non‑compliant results | +| **Side effects** | Logs informational or error messages; populates result slices via `SetResult`; no external I/O. | +| **How it fits the package** | Part of the Operator test suite, executed when the `TestOperatorOlmSkipRange` check is triggered. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate env.Operators"} + B --> C["Log operator name"] + C --> D{"Check olm.skipRange present?"} + D -- Yes --> E["Create compliant report object
Add field olm.SkipRange"] + D -- No --> F["Create non‑compliant report object"] + E --> G + F --> G + G --> H["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + testOperatorOlmSkipRange --> LogInfo + testOperatorOlmSkipRange --> LogError + testOperatorOlmSkipRange --> NewOperatorReportObject + testOperatorOlmSkipRange --> SetResult +``` + +#### Functions calling `testOperatorOlmSkipRange` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testOperatorOlmSkipRange +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOperatorOlmSkipRange +func example() { + // Assume check and env are prepared elsewhere + var check *checksdb.Check + var env *provider.TestEnvironment + + testOperatorOlmSkipRange(check, env) +} +``` + +--- + +### testOperatorOlmSubscription + +**testOperatorOlmSubscription** - Checks whether every operator in the environment has an OLM subscription; records compliant and non‑compliant results. + +#### Signature (Go) + +```go +func testOperatorOlmSubscription(check *checksdb.Check, env *provider.TestEnvironment) {} +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether every operator in the environment has an OLM subscription; records compliant and non‑compliant results. | +| **Parameters** | `check *checksdb.Check` – test context for logging and result setting.
`env *provider.TestEnvironment` – runtime data including the list of operators to inspect. | +| **Return value** | None (side‑effects only). | +| **Key dependencies** | • `LogInfo`, `LogError`
• `append` (slice operation)
• `AddField` on report objects
• `testhelper.NewOperatorReportObject`
• `check.SetResult` | +| **Side effects** | • Writes log messages.
• Mutates two slices (`compliantObjects`, `nonCompliantObjects`).
• Sets the check result via `SetResult`. | +| **How it fits the package** | Part of the Operator test suite; invoked by `LoadChecks` to verify OLM‑based installation compliance. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"For each operator in env.Operators"} + B -->|"SubscriptionName empty?"| C["Log error & add non‑compliant object"] + B -->|"Has subscription"| D["Log info & add compliant object"] + C --> E["Add SubscriptionName field"] + D --> F["Add SubscriptionName field"] + E --> G{"Next operator?"} + F --> G + G -->|"Yes"| B + G -->|"No"| H["SetResult(compliant, noncompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testOperatorOlmSubscription --> LogInfo + func_testOperatorOlmSubscription --> LogError + func_testOperatorOlmSubscription --> append + func_testOperatorOlmSubscription --> AddField + func_testOperatorOlmSubscription --> testhelper.NewOperatorReportObject + func_testOperatorOlmSubscription --> SetResult +``` + +#### Functions calling `testOperatorOlmSubscription` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testOperatorOlmSubscription +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOperatorOlmSubscription +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/provider" +) + +func main() { + check := checksdb.NewCheck("example-id") + env := &provider.TestEnvironment{ + Operators: []provider.Operator{ + {Namespace:"ns1", Name:"op1", SubscriptionName:"sub-1"}, + {Namespace:"ns2", Name:"op2", SubscriptionName:""}, + }, + } + testOperatorOlmSubscription(check, env) +} +``` + +--- + +### testOperatorPodsNoHugepages + +**testOperatorPodsNoHugepages** - Verifies that none of the operator‑managed pods request huge page memory. If a pod requests huge pages it is marked non‑compliant; otherwise it is compliant. + +#### Signature (Go) + +```go +func testOperatorPodsNoHugepages(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies that none of the operator‑managed pods request huge page memory. If a pod requests huge pages it is marked non‑compliant; otherwise it is compliant. | +| **Parameters** | `check *checksdb.Check` – test check context for logging and result reporting.
`env *provider.TestEnvironment` – environment containing CSV‑to‑pod mappings (`CSVToPodListMap`). | +| **Return value** | None (the function records results via `check.SetResult`). | +| **Key dependencies** | • `SplitCsv(csv string) CsvResult` – parses a CSV key into namespace and name.
• `logInfo`, `logError` – logging helpers from the check context.
• `HasHugepages() bool` – pod method that reports huge page usage.
• `testhelper.NewPodReportObject(...)` – constructs a report object for a pod.
• `check.SetResult(compliant, nonCompliant)` – finalises the test outcome. | +| **Side effects** | • Emits log messages for each CSV and pod examined.
• Creates and stores compliance/non‑compliance report objects in the check’s result set.
No external I/O or concurrency is performed. | +| **How it fits the package** | Part of the Operator test suite; called by `LoadChecks` to register the “Pods no hugepages” check for all operator installations. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over CSV→PodList"} + B -->|"for each csv, pods"| C["Parse CSV"] + C --> D{"For each pod"} + D -->|"HasHugepages()"| E["Log error & add non‑compliant"] + D -->|"!HasHugepages()"| F["Log info & add compliant"] + E --> G + F --> G + G --> H["SetResult(compliant, nonCompliant)"] + H --> I["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testOperatorPodsNoHugepages --> SplitCsv + func_testOperatorPodsNoHugepages --> LogInfo + func_testOperatorPodsNoHugepages --> HasHugepages + func_testOperatorPodsNoHugepages --> LogError + func_testOperatorPodsNoHugepages --> testhelper.NewPodReportObject + func_testOperatorPodsNoHugepages --> SetResult +``` + +#### Functions calling `testOperatorPodsNoHugepages` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testOperatorPodsNoHugepages +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOperatorPodsNoHugepages +check := checksdb.NewCheck("operator-pods-no-hugepages") +env := provider.NewTestEnvironment() // populated with CSVToPodListMap + +testOperatorPodsNoHugepages(check, env) + +// After execution, check.Result contains compliant and non‑compliant pod reports. +``` + +--- + +### testOperatorSemanticVersioning + +**testOperatorSemanticVersioning** - Validates that every operator in the test environment has a version string that conforms to [Semantic Versioning](https://semver.org/). + +#### Signature (Go) + +```go +func (*checksdb.Check, *provider.TestEnvironment)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates that every operator in the test environment has a version string that conforms to [Semantic Versioning](https://semver.org/). | +| **Parameters** | `check *checksdb.Check` – test context for logging and result reporting.
`env *provider.TestEnvironment` – execution environment containing the list of operators. | +| **Return value** | None (side‑effect only). | +| **Key dependencies** | • `check.LogInfo`, `check.LogError` – logging.
• `versions.IsValidSemanticVersion` – semantic‑version validation.
• `testhelper.NewOperatorReportObject` – report object construction.
• `check.SetResult` – final result aggregation. | +| **Side effects** | • Logs informational and error messages.
• Builds slices of compliant/non‑compliant `*ReportObject`s.
• Stores results via `SetResult`. No external I/O or concurrency. | +| **How it fits the package** | Part of the operator test suite; invoked by `LoadChecks` to provide a check named *TestOperatorHasSemanticVersioning*. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> LogStart["LogInfo: Starting testOperatorSemanticVersioning"] + Start --> Iterate["For each operator in env.Operators"] + Iterate --> Validate["IsValidSemanticVersion(operator.Version)?"] + Validate -- Yes --> Compliant["Add to compliantObjects"] + Validate -- No --> NonCompliant["Add to nonCompliantObjects"] + Compliant --> LogComp["LogInfo: Operator has a valid semantic version"] + NonCompliant --> LogErr["LogError: Operator has an invalid semantic version"] + End --> SetResult["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + testOperatorSemanticVersioning --> LogInfo + testOperatorSemanticVersioning --> IsValidSemanticVersion + testOperatorSemanticVersioning --> NewOperatorReportObject + testOperatorSemanticVersioning --> SetResult +``` + +#### Functions calling `testOperatorSemanticVersioning` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testOperatorSemanticVersioning +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOperatorSemanticVersioning +check := checksdb.NewCheck("example") +env := &provider.TestEnvironment{Operators: []operator.Operator{ + {Namespace: "default", Name: "foo-operator", Version: "1.2.3"}, +}} +testOperatorSemanticVersioning(check, env) +``` + +--- + +### testOperatorSingleCrdOwner + +**testOperatorSingleCrdOwner** - Ensures every CRD declared in the environment’s operators is owned by a single operator. If multiple operators own the same CRD name, the check flags it as non‑compliant. + +#### Signature (Go) + +```go +func testOperatorSingleCrdOwner(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures every CRD declared in the environment’s operators is owned by a single operator. If multiple operators own the same CRD name, the check flags it as non‑compliant. | +| **Parameters** | `check *checksdb.Check` – test context for logging and result storage; ` *provider.TestEnvironment` – collection of installed operators and their CSV specifications. | +| **Return value** | None (side effects via `check.SetResult`). | +| **Key dependencies** | • `log` methods (`LogInfo`, `LogError`, `LogDebug`)
• `strings.Join`
• `testhelper.NewCrdReportObject` and its `AddField` method
• `check.SetResult` | +| **Side effects** | Generates report objects, logs diagnostic messages, and sets the check result. | +| **How it fits the package** | Part of the operator test suite; invoked by `LoadChecks` to validate CRD ownership rules in a Kubernetes cluster. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Iterate over env.Operators"] --> B["Collect unique owned CRDs per operator"] + B --> C["Add operator name to crdOwners map"] + C --> D["Iterate over crdOwners"] + D -->|"len > 1"| E["Log error, create non‑compliant report object"] + D -->|"len == 1"| F["Log debug, create compliant report object"] + E & F --> G["Append to respective slice"] + G --> H["SetResult(compliantObjects, nonCompliantObjects)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testOperatorSingleCrdOwner --> testhelper.NewCrdReportObject + func_testOperatorSingleCrdOwner --> strings.Join + func_testOperatorSingleCrdOwner --> check.SetResult + func_testOperatorSingleCrdOwner --> log.LogInfo + func_testOperatorSingleCrdOwner --> log.LogError + func_testOperatorSingleCrdOwner --> log.LogDebug +``` + +#### Functions calling `testOperatorSingleCrdOwner` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testOperatorSingleCrdOwner +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testOperatorSingleCrdOwner +func ExampleTestOperatorSingleCrdOwner() { + env := &provider.TestEnvironment{ /* populate Operators slice */ } + check := checksdb.NewCheck("operator-crd-ownership") + testOperatorSingleCrdOwner(check, env) + // After execution, inspect check.Result for compliance status. +} +``` + +--- + +--- diff --git a/docs/tests/operator/phasecheck/phasecheck.md b/docs/tests/operator/phasecheck/phasecheck.md new file mode 100644 index 000000000..46c8d74b2 --- /dev/null +++ b/docs/tests/operator/phasecheck/phasecheck.md @@ -0,0 +1,296 @@ +# Package phasecheck + +**Path**: `tests/operator/phasecheck` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [WaitOperatorReady](#waitoperatorready) +- [Local Functions](#local-functions) + - [isOperatorPhaseFailedOrUnknown](#isoperatorphasefailedorunknown) + - [isOperatorPhaseSucceeded](#isoperatorphasesucceeded) + +## Overview + +The phasecheck package provides utilities to monitor the lifecycle of an Operator’s ClusterServiceVersion (CSV). It repeatedly polls the CSV status until it reaches a terminal state—either Succeeded or a failure/unknown condition—and reports success or terminates with an error. + +### Key Features + +- WaitOperatorReady – blocks until the CSV is in Succeeded, failed, or unknown, respecting a timeout and retry interval. +- isOperatorPhaseSucceeded – helper that checks if the CSV’s phase equals CSVPhaseSucceeded. +- isOperatorPhaseFailedOrUnknown – helper that detects terminal failure or uncertainty states. + +### Design Notes + +- A constant timeout (hidden) governs how long polling will continue before giving up. +- Polling uses context cancellation and sleeps between attempts to avoid busy‑waiting. +- The package logs each check, aiding debugging when the operator stalls or fails. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool](#waitoperatorready) | Polls the CSV until it is in `Succeeded` or terminates on failure/timeout. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func isOperatorPhaseFailedOrUnknown(csv *v1alpha1.ClusterServiceVersion) bool](#isoperatorphasefailedorunknown) | Checks whether the given `ClusterServiceVersion` (CSV) has entered a terminal state indicating failure or uncertainty. It returns `true` if the CSV’s phase is either *Failed* or *Unknown*. | +| [func (*v1alpha1.ClusterServiceVersion) bool](#isoperatorphasesucceeded) | Determines if the CSV status phase equals `v1alpha1.CSVPhaseSucceeded`. | + +## Exported Functions + +### WaitOperatorReady + +**WaitOperatorReady** - Polls the CSV until it is in `Succeeded` or terminates on failure/timeout. + +Checks whether an Operator’s ClusterServiceVersion (CSV) reaches the **Succeeded** phase within a configured timeout. + +--- + +#### Signature (Go) + +```go +func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Polls the CSV until it is in `Succeeded` or terminates on failure/timeout. | +| **Parameters** | `csv *v1alpha1.ClusterServiceVersion` – the CSV to monitor. | +| **Return value** | `bool`: `true` if the CSV reached `Succeeded`; `false` otherwise (failed phase, error fetching CSV, or timeout). | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• `time.Now()`, `time.Since()`, `time.Sleep()`
• `isOperatorPhaseSucceeded(csv)`
• `isOperatorPhaseFailedOrUnknown(csv)`
• `provider.CsvToString(csv)`
• `log.Debug(...)`, `log.Error(...)` | +| **Side effects** | • Logs debug and error messages.
• Periodically refreshes the CSV via the OLM client to capture updated status. | +| **How it fits the package** | Used by tests in the *phasecheck* package to assert that an Operator installation has completed successfully before proceeding with further checks. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Get OLM client"] + B --> C{"Time elapsed < timeout"} + C -- yes --> D{"CSV Succeeded?"} + D -- true --> E["Log ready & return true"] + D -- false --> F{"CSV Failed/Unknown?"} + F -- true --> G["Log failure & return false"] + F -- false --> H["Log waiting, Sleep(1s)"] + H --> I["Refresh CSV from API"] + I --> J["Update local CSV"] + J --> C + C -- no --> K["Log timeout"] + K --> L["Return false"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_WaitOperatorReady --> func_GetClientsHolder + func_WaitOperatorReady --> func_IsOperatorPhaseSucceeded + func_WaitOperatorReady --> func_IsOperatorPhaseFailedOrUnknown + func_WaitOperatorReady --> func_CsvToString + func_WaitOperatorReady --> func_Debug + func_WaitOperatorReady --> func_Error +``` + +--- + +#### Functions calling `WaitOperatorReady` (Mermaid) + +```mermaid +graph TD + testOperatorInstallationPhaseSucceeded --> func_WaitOperatorReady +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking WaitOperatorReady +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/operator/phasecheck" + olmapi "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func main() { + // Assume csv is obtained from somewhere (e.g., test environment) + var csv *olmapi.ClusterServiceVersion + ready := phasecheck.WaitOperatorReady(csv) + if ready { + fmt.Println("Operator is ready") + } else { + fmt.Println("Operator failed to become ready") + } +} +``` + +--- + +--- + +## Local Functions + +### isOperatorPhaseFailedOrUnknown + +**isOperatorPhaseFailedOrUnknown** - Checks whether the given `ClusterServiceVersion` (CSV) has entered a terminal state indicating failure or uncertainty. It returns `true` if the CSV’s phase is either *Failed* or *Unknown*. + +#### Signature (Go) + +```go +func isOperatorPhaseFailedOrUnknown(csv *v1alpha1.ClusterServiceVersion) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether the given `ClusterServiceVersion` (CSV) has entered a terminal state indicating failure or uncertainty. It returns `true` if the CSV’s phase is either *Failed* or *Unknown*. | +| **Parameters** | `csv *v1alpha1.ClusterServiceVersion` – pointer to the CSV whose status is inspected. | +| **Return value** | `bool` – `true` when `csv.Status.Phase` equals `v1alpha1.CSVPhaseFailed` or `v1alpha1.CSVPhaseUnknown`; otherwise `false`. | +| **Key dependencies** | • Calls `log.Debug()` from the internal logging package to emit diagnostic information.
• Relies on constants `CSVPhaseFailed` and `CSVPhaseUnknown` defined in `github.com/redhat-best-practices-for-k8s/certsuite/internal/v1alpha1`. | +| **Side effects** | None beyond logging; does not modify the CSV or any external state. | +| **How it fits the package** | Part of the operator phase‑checking utilities (`phasecheck` package). It is used by `WaitOperatorReady` to decide whether to abort waiting when a CSV cannot progress to *Succeeded*. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive csv"] --> B["Log debug message"] + B --> C{"Is phase Failed?"} + C -- Yes --> D["Return true"] + C -- No --> E{"Is phase Unknown?"} + E -- Yes --> D + E -- No --> F["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_isOperatorPhaseFailedOrUnknown --> func_LogDebug +``` + +#### Functions calling `isOperatorPhaseFailedOrUnknown` (Mermaid) + +```mermaid +graph TD + func_WaitOperatorReady --> func_isOperatorPhaseFailedOrUnknown +``` + +#### Usage example (Go) + +```go +// Minimal example invoking isOperatorPhaseFailedOrUnknown +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/v1alpha1" +) + +func main() { + csv := &v1alpha1.ClusterServiceVersion{ + Status: v1alpha1.CSVStatus{Phase: v1alpha1.CSVPhaseFailed}, + } + if isOperatorPhaseFailedOrUnknown(csv) { + fmt.Println("CSV is in a failed or unknown state.") + } else { + fmt.Println("CSV is still progressing.") + } +} +``` + +--- + +### isOperatorPhaseSucceeded + +**isOperatorPhaseSucceeded** - Determines if the CSV status phase equals `v1alpha1.CSVPhaseSucceeded`. + +Checks whether a given ClusterServiceVersion (CSV) has reached the **Succeeded** phase. + +--- + +#### Signature (Go) + +```go +func (*v1alpha1.ClusterServiceVersion) bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines if the CSV status phase equals `v1alpha1.CSVPhaseSucceeded`. | +| **Parameters** | `csv *v1alpha1.ClusterServiceVersion` – pointer to a CSV object. | +| **Return value** | `bool` – `true` when `csv.Status.Phase == v1alpha1.CSVPhaseSucceeded`, otherwise `false`. | +| **Key dependencies** | • `log.Debug` from `github.com/redhat-best-practices-for-k8s/certsuite/internal/log`
• Constant `v1alpha1.CSVPhaseSucceeded` | +| **Side effects** | Logs a debug message; no mutation of the CSV or external state. | +| **How it fits the package** | Used by `WaitOperatorReady` to poll the readiness of an operator’s CSV during tests. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Log debug: CSV name, namespace, phase"] + B --> C["Compare csv.Status.Phase with v1alpha1.CSVPhaseSucceeded"] + C -->|"true"| D["Return true"] + C -->|"false"| E["Return false"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_isOperatorPhaseSucceeded --> func_log.Debug +``` + +--- + +#### Functions calling `isOperatorPhaseSucceeded` (Mermaid) + +```mermaid +graph TD + func_WaitOperatorReady --> func_isOperatorPhaseSucceeded +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking isOperatorPhaseSucceeded +package main + +import ( + "fmt" + + v1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func main() { + csv := &v1alpha1.ClusterServiceVersion{ + Status: v1alpha1.ClusterServiceVersionStatus{Phase: v1alpha1.CSVPhaseSucceeded}, + } + if isOperatorPhaseSucceeded(csv) { + fmt.Println("CSV has succeeded") + } else { + fmt.Println("CSV not yet succeeded") + } +} +``` + +--- diff --git a/docs/tests/performance/performance.md b/docs/tests/performance/performance.md new file mode 100644 index 000000000..e40250b47 --- /dev/null +++ b/docs/tests/performance/performance.md @@ -0,0 +1,595 @@ +# Package performance + +**Path**: `tests/performance` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [LoadChecks](#loadchecks) +- [Local Functions](#local-functions) + - [filterProbeProcesses](#filterprobeprocesses) + - [getExecProbesCmds](#getexecprobescmds) + - [testExclusiveCPUPool](#testexclusivecpupool) + - [testLimitedUseOfExecProbes](#testlimiteduseofexecprobes) + - [testRtAppsNoExecProbes](#testrtappsnoexecprobes) + - [testSchedulingPolicyInCPUPool](#testschedulingpolicyincpupool) + +## Overview + +The `performance` package registers a set of runtime checks that validate CNF workloads against performance‑related requirements such as CPU scheduling policies, exclusive CPU pool usage and limits on exec probes. It is intended to be used by the CertSuite test runner when evaluating CNFs for compliance with performance guidelines. + +### Key Features + +- Registers multiple checks via `LoadChecks`, automatically attaching setup and skip logic based on pod characteristics +- Validates that containers without host PID isolation do not run exec probes while any process uses a real‑time policy +- Ensures pods containing both exclusive‑CPU and shared‑CPU assignments are flagged as non‑compliant + +### Design Notes + +- Check registration is performed once; subsequent test runs reuse the shared checks database +- Skip functions are generated dynamically to avoid running irrelevant checks for certain pod types +- The package relies heavily on helper utilities (e.g., `testhelper` and `scheduling`) which abstract complex Kubernetes interactions + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func LoadChecks()](#loadchecks) | Registers a set of performance‑related tests in the shared checks DB, attaching setup logic and skip conditions for each check. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func filterProbeProcesses(allProcesses []*crclient.Process, cut *provider.Container) (notExecProbeProcesses []*crclient.Process, compliantObjects []*testhelper.ReportObject)](#filterprobeprocesses) | Removes processes that are part of a container’s exec probes and returns the remaining processes along with report objects for the excluded ones. | +| [func getExecProbesCmds(c *provider.Container) map[string]bool](#getexecprobescmds) | Builds a lookup table of the exact command lines that are executed by any `exec` probe (`liveness`, `readiness`, or `startup`) configured in a container. The keys are normalized command strings, and the values are always `true`. | +| [func testExclusiveCPUPool(check *checksdb.Check, env *provider.TestEnvironment)](#testexclusivecpupool) | Detect pods containing containers that mix exclusive‑CPU and shared‑CPU assignments; flag such pods as non‑compliant. | +| [func testLimitedUseOfExecProbes(*checksdb.Check, *provider.TestEnvironment)()](#testlimiteduseofexecprobes) | Ensures that a CNF does not exceed the allowed number of exec probes and that each probe’s `PeriodSeconds` is ≥ 10 seconds. | +| [func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment)](#testrtappsnoexecprobes) | Ensures that containers lacking host PID isolation do not execute exec probes while any process is scheduled with a real‑time policy. It reports compliance or non‑compliance for each container and its processes. | +| [func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvironment, podContainers []*provider.Container, schedulingType string) ()](#testschedulingpolicyincpupool) | Ensures that every process inside each container’s PID namespace satisfies the CPU scheduling policy specified by `schedulingType`. It records compliant and non‑compliant processes in the test result. | + +## Exported Functions + +### LoadChecks + +**LoadChecks** - Registers a set of performance‑related tests in the shared checks DB, attaching setup logic and skip conditions for each check. + +#### Signature (Go) + +```go +func LoadChecks() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Registers a set of performance‑related tests in the shared checks DB, attaching setup logic and skip conditions for each check. | +| **Parameters** | none | +| **Return value** | none | +| **Key dependencies** | • `log.Debug` – logs loading activity.
• `checksdb.NewChecksGroup`, `.WithBeforeEachFn`, `.Add`
• `identifiers.GetTestIDAndLabels` – resolves test IDs and tags.
• `testhelper.GetNoPodsUnderTestSkipFn` – provides skip function when no pods are present.
• Individual test functions (`testExclusiveCPUPool`, `testRtAppsNoExecProbes`, etc.)
• Skip helpers (`skipIfNoGuaranteedPodContainersWithExclusiveCPUs`, …) | +| **Side effects** | • Populates the checks database with a new group keyed by `common.PerformanceTestKey`.
• Configures each check’s skip logic and execution function.
• Emits debug logs. | +| **How it fits the package** | Part of the `performance` test suite; called by `pkg/certsuite.LoadInternalChecksDB` to aggregate all internal checks during initialization. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["log.Debug"] + B --> C["Create group: NewChecksGroup(common.PerformanceTestKey)"] + C --> D["Set before‑each function: WithBeforeEachFn(beforeEachFn)"] + D --> E["Add Check 1"] + E --> F["Add Check 2"] + F --> G["Add Check 3"] + G --> H["Add Check 4"] + H --> I["Add Check 5"] + I --> J["Add Check 6"] + J --> K["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> log_Debug + func_LoadChecks --> checksdb_NewChecksGroup + func_LoadChecks --> checksdb_Check_WithBeforeEachFn + func_LoadChecks --> checksdb_Check_Add + func_LoadChecks --> identifiers_GetTestIDAndLabels + func_LoadChecks --> testhelper_GetNoPodsUnderTestSkipFn + func_LoadChecks --> testExclusiveCPUPool + func_LoadChecks --> testRtAppsNoExecProbes + func_LoadChecks --> testSchedulingPolicyInCPUPool + func_LoadChecks --> skipIfNoGuaranteedPodContainersWithExclusiveCPUs + func_LoadChecks --> skipIfNoNonGuaranteedPodContainersWithoutHostPID + func_LoadChecks --> skipIfNoGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID +``` + +#### Functions calling `LoadChecks` (Mermaid) + +```mermaid +graph TD + pkg_certsuite_LoadInternalChecksDB --> func_LoadChecks +``` + +#### Usage example (Go) + +```go +// In the package initialization phase +func init() { + // Load all performance checks into the shared database. + performance.LoadChecks() +} +``` + +--- + +## Local Functions + +### filterProbeProcesses + +**filterProbeProcesses** - Removes processes that are part of a container’s exec probes and returns the remaining processes along with report objects for the excluded ones. + +#### 1) Signature (Go) + +```go +func filterProbeProcesses(allProcesses []*crclient.Process, cut *provider.Container) (notExecProbeProcesses []*crclient.Process, compliantObjects []*testhelper.ReportObject) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Removes processes that are part of a container’s exec probes and returns the remaining processes along with report objects for the excluded ones. | +| **Parameters** | `allProcesses []*crclient.Process` – list of all running processes in the container.
`cut *provider.Container` – container metadata (namespace, pod name, etc.). | +| **Return value** | `notExecProbeProcesses []*crclient.Process` – processes that are not part of exec probes.
`compliantObjects []*testhelper.ReportObject` – report objects describing each excluded process. | +| **Key dependencies** | - `getExecProbesCmds(cut)`
- `strings.Join`, `strings.Fields`
- `strconv.Itoa`
- `slices.Contains`
- `testhelper.NewContainerReportObject`
- `AddField` (on report objects) | +| **Side effects** | No mutation of input slices; creates new report objects. No I/O or concurrency. | +| **How it fits the package** | Used by performance tests to isolate processes that should not be evaluated for real‑time scheduling when exec probes are defined. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Build probe command set"} + B --> C["getExecProbesCmds"] + C --> D["Iterate allProcesses"] + D --> E{"Is process a probe?"} + E -- Yes --> F["Add report object for probe"] + E -- Yes --> G["Mark PID as probe"] + E -- No --> H["Skip"] + G --> I["Continue loop"] + H --> J["End of loop"] + J --> K{"Remove probe processes"} + K --> L["Iterate allProcesses again"] + L --> M{"Is process part of probe?"} + M -- Yes --> N["Skip"] + M -- No --> O["Add to notExecProbeProcesses"] + O --> P["End loop"] + P --> Q["Return results"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_filterProbeProcesses --> func_getExecProbesCmds + func_filterProbeProcesses --> strings.Join + func_filterProbeProcesses --> strings.Fields + func_filterProbeProcesses --> strconv.Itoa + func_filterProbeProcesses --> slices.Contains + func_filterProbeProcesses --> testhelper.NewContainerReportObject +``` + +#### 5) Functions calling `filterProbeProcesses` (Mermaid) + +```mermaid +graph TD + func_testRtAppsNoExecProbes --> func_filterProbeProcesses +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking filterProbeProcesses +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/crclient" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance/provider" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/testhelper" +) + +func example() { + // Assume processes and container are obtained elsewhere + var allProcesses []*crclient.Process + var cut *provider.Container + + notExec, reports := filterProbeProcesses(allProcesses, cut) + + fmt.Println("Processes excluded by exec probes:", len(reports)) + fmt.Println("Remaining processes to evaluate:", len(notExec)) +} +``` + +--- + +### getExecProbesCmds + +**getExecProbesCmds** - Builds a lookup table of the exact command lines that are executed by any `exec` probe (`liveness`, `readiness`, or `startup`) configured in a container. The keys are normalized command strings, and the values are always `true`. + +#### Signature (Go) + +```go +func getExecProbesCmds(c *provider.Container) map[string]bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a lookup table of the exact command lines that are executed by any `exec` probe (`liveness`, `readiness`, or `startup`) configured in a container. The keys are normalized command strings, and the values are always `true`. | +| **Parameters** | `c *provider.Container –` the container whose probes are inspected. | +| **Return value** | `map[string]bool –` a map where each key is a probe command string and the corresponding value is `true`. | +| **Key dependencies** | • `strings.Join` (to concatenate slice elements)
• `strings.Fields` (to split and rejoin for normalization) | +| **Side effects** | None. The function performs pure computation without mutating input or external state. | +| **How it fits the package** | Used by higher‑level filtering logic (`filterProbeProcesses`) to identify processes that belong to probe execution, allowing them to be excluded from security checks. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["getExecProbesCmds"] --> B{"Check liveness probe"} + B -->|"exists"| C["Extract command"] + C --> D["Normalize with Join & Fields"] + D --> E["Add to map"] + A --> F{"Check readiness probe"} + F -->|"exists"| G["Same as above"] + A --> H{"Check startup probe"} + H -->|"exists"| I["Same as above"] + E --> J["Return map"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getExecProbesCmds --> strings.Join + func_getExecProbesCmds --> strings.Fields +``` + +#### Functions calling `getExecProbesCmds` (Mermaid) + +```mermaid +graph TD + filterProbeProcesses --> func_getExecProbesCmds +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getExecProbesCmds +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance/provider" +) + +func main() { + // Assume we have a container with probes set up. + var cont *provider.Container + // ... populate cont ... + + probeCmds := getExecProbesCmds(cont) + fmt.Printf("Probe commands: %v\n", probeCmds) +} +``` + +--- + +### testExclusiveCPUPool + +**testExclusiveCPUPool** - Detect pods containing containers that mix exclusive‑CPU and shared‑CPU assignments; flag such pods as non‑compliant. + +The function verifies that all containers within a pod belong to the same CPU pool (exclusive or shared). It logs errors for mixed pools and records compliance results. + +```go +func testExclusiveCPUPool(check *checksdb.Check, env *provider.TestEnvironment) +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Detect pods containing containers that mix exclusive‑CPU and shared‑CPU assignments; flag such pods as non‑compliant. | +| **Parameters** | `check` – the current check context; `env` – test environment holding pod data. | +| **Return value** | None (results are stored via `SetResult`). | +| **Key dependencies** | • `resources.HasExclusiveCPUsAssigned(cut, logger)`
• `check.GetLogger()`
• `strconv.Itoa`
• `check.LogError`, `check.LogInfo`
• `testhelper.NewPodReportObject`
• `check.SetResult` | +| **Side effects** | Writes log entries; appends report objects to internal slices; updates check result state. | +| **How it fits the package** | Implements a single performance test that is registered in `LoadChecks`. It ensures pods do not mix CPU pool assignments, which could affect scheduling guarantees. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Pods"} + B --> C{"For each pod"} + C --> D{"Count exclusive & shared containers"} + D --> E{"If both counts > 0?"} + E -- Yes --> F["Log error, create non‑compliant report"] + E -- No --> G["Log info, create compliant report"] + F --> H["Append to nonCompliantObjects"] + G --> I["Append to compliantObjects"] + H & I --> J{"Next pod?"} + J -- Yes --> C + J -- No --> K["SetResult(compliant, noncompliant)"] + K --> L["End"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testExclusiveCPUPool --> func_HasExclusiveCPUsAssigned + func_testExclusiveCPUPool --> func_GetLogger + func_testExclusiveCPUPool --> func_Itoa + func_testExclusiveCPUPool --> func_LogError + func_testExclusiveCPUPool --> func_NewPodReportObject + func_testExclusiveCPUPool --> func_LogInfo + func_testExclusiveCPUPool --> func_SetResult +``` + +#### Functions calling `testExclusiveCPUPool` + +```mermaid +graph TD + func_LoadChecks --> func_testExclusiveCPUPool +``` + +#### Usage example + +```go +// Minimal example invoking testExclusiveCPUPool +check := checksdb.NewCheck(...) +env := &provider.TestEnvironment{Pods: /* ... */} + +testExclusiveCPUPool(check, env) + +// After execution, check.Result holds compliance information. +``` + +--- + +### testLimitedUseOfExecProbes + +**testLimitedUseOfExecProbes** - Ensures that a CNF does not exceed the allowed number of exec probes and that each probe’s `PeriodSeconds` is ≥ 10 seconds. + +#### 1) Signature (Go) + +```go +func testLimitedUseOfExecProbes(*checksdb.Check, *provider.TestEnvironment)() +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that a CNF does not exceed the allowed number of exec probes and that each probe’s `PeriodSeconds` is ≥ 10 seconds. | +| **Parameters** | `check *checksdb.Check` – test harness for logging and result setting.
`env *provider.TestEnvironment` – contains all pods/containers under evaluation. | +| **Return value** | None; results are communicated via `check.SetResult`. | +| **Key dependencies** | - `check.LogInfo`, `check.LogError`
- `testhelper.NewContainerReportObject`, `testhelper.NewReportObject`
- `fmt.Sprintf`
- `check.SetResult` | +| **Side effects** | Emits logs, constructs report objects, and records pass/fail status on the provided check. No external state mutation. | +| **How it fits the package** | Implements the *TestLimitedUseOfExecProbes* test in the performance suite, ensuring CNF compliance with probe‑usage policies. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Iterate over Pods"] --> B{"For each Container"} + B --> C["Check LivenessProbe.Exec"] + B --> D["Check StartupProbe.Exec"] + B --> E["Check ReadinessProbe.Exec"] + C --> F{"PeriodSeconds >= 10?"} + D --> F + E --> F + F -- Yes --> G["LogInfo & append compliant object"] + F -- No --> H["LogError & append non‑compliant object"] + G & H --> I["Increment counter if probe exists"] + I --> J{"counter >= maxNumberOfExecProbes?"} + J -- Yes --> K["LogError, add CNF report object (non‑compliant)"] + J -- No --> L["LogInfo, add CNF report object (compliant)"] + K & L --> M["check.SetResult(compliantObjects, nonCompliantObjects)"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_testLimitedUseOfExecProbes --> func_LogInfo + func_testLimitedUseOfExecProbes --> func_Append + func_testLimitedUseOfExecProbes --> func_NewContainerReportObject + func_testLimitedUseOfExecProbes --> func_Sprintf + func_testLimitedUseOfExecProbes --> func_LogError + func_testLimitedUseOfExecProbes --> func_SetResult +``` + +#### 5) Functions calling `testLimitedUseOfExecProbes` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testLimitedUseOfExecProbes +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testLimitedUseOfExecProbes +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/performance" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + env := provider.NewTestEnvironment(/* populate with pods/containers */) + check := checksdb.NewCheck("test-limited-use-of-exec-probes") + performance.testLimitedUseOfExecProbes(check, env) +} +``` + +--- + +--- + +### testRtAppsNoExecProbes + +**testRtAppsNoExecProbes** - Ensures that containers lacking host PID isolation do not execute exec probes while any process is scheduled with a real‑time policy. It reports compliance or non‑compliance for each container and its processes. + +#### Signature (Go) + +```go +func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that containers lacking host PID isolation do not execute exec probes while any process is scheduled with a real‑time policy. It reports compliance or non‑compliance for each container and its processes. | +| **Parameters** | `check *checksdb.Check` – test context used for logging and result storage.
`env *provider.TestEnvironment` – environment providing access to containers under test. | +| **Return value** | None (side‑effect: sets check result). | +| **Key dependencies** | • `env.GetNonGuaranteedPodContainersWithoutHostPID()`
• `check.LogInfo`, `LogError`, `LogWarn`
• `crclient.GetContainerProcesses`
• `filterProbeProcesses`
• `scheduling.GetProcessCPUScheduling`
• `scheduling.PolicyIsRT`
• `testhelper.NewContainerReportObject` | +| **Side effects** | Logs information, errors and warnings; builds lists of compliant/non‑compliant report objects; calls `check.SetResult` to store the outcome. | +| **How it fits the package** | Part of the *performance* test suite; invoked by `LoadChecks` as the implementation for the “TestRtAppNoExecProbes” check, ensuring that real‑time scheduling is not used with exec probes in non‑guaranteed containers. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Retrieve containers"] --> B{"Iterate over each container"} + B --> C["Check HasExecProbes"] + C -- No --> D["Mark compliant, continue"] + C -- Yes --> E["Get container processes"] + E -- Error --> F["Log error, mark non‑compliant"] + E -- OK --> G["Filter exec probe processes"] + G --> H["Iterate over remaining processes"] + H --> I{"Scheduling policy"} + I -- RT --> J["Mark non‑compliant"] + I -- Non‑RT --> K["Continue"] + J --> L["Set allProcessesCompliant=false"] + K --> M["Check allProcessesCompliant"] + M -- True --> N["Mark compliant"] + M -- False --> O["No further action"] + D & J & O --> P["End loop"] + P --> Q["Set check result"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testRtAppsNoExecProbes --> env.GetNonGuaranteedPodContainersWithoutHostPID + func_testRtAppsNoExecProbes --> check.LogInfo + func_testRtAppsNoExecProbes --> check.LogError + func_testRtAppsNoExecProbes --> crclient.GetContainerProcesses + func_testRtAppsNoExecProbes --> filterProbeProcesses + func_testRtAppsNoExecProbes --> scheduling.GetProcessCPUScheduling + func_testRtAppsNoExecProbes --> scheduling.PolicyIsRT + func_testRtAppsNoExecProbes --> testhelper.NewContainerReportObject +``` + +#### Functions calling `testRtAppsNoExecProbes` (Mermaid) + +```mermaid +graph TD + LoadChecks --> func_testRtAppsNoExecProbes +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testRtAppsNoExecProbes +func runExample() { + // Assume env and check are already initialized + var env *provider.TestEnvironment // set up test environment + var check *checksdb.Check // create a new Check instance + + testRtAppsNoExecProbes(check, env) + + // After the call, results can be inspected via check.GetResult() +} +``` + +--- + +--- + +### testSchedulingPolicyInCPUPool + +**testSchedulingPolicyInCPUPool** - Ensures that every process inside each container’s PID namespace satisfies the CPU scheduling policy specified by `schedulingType`. It records compliant and non‑compliant processes in the test result. + +#### Signature (Go) + +```go +func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvironment, + podContainers []*provider.Container, schedulingType string) () +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that every process inside each container’s PID namespace satisfies the CPU scheduling policy specified by `schedulingType`. It records compliant and non‑compliant processes in the test result. | +| **Parameters** | *`check`* – Test check instance for logging and result reporting.
*`env`* – Environment providing node/pod context.
*`podContainers`* – List of containers to evaluate.
*`schedulingType`* – Desired scheduling type (`SharedCPUScheduling`, `ExclusiveCPUScheduling`, or `IsolatedCPUScheduling`). | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | • `crclient.GetContainerPidNamespace` – obtains a container’s PID namespace.
• `crclient.GetPidsFromPidNamespace` – lists PIDs in that namespace.
• `scheduling.ProcessPidsCPUScheduling` – checks each process against the desired scheduling policy.
• `testhelper.NewContainerReportObject` – creates report objects. | +| **Side effects** | Logs informational, debug and error messages; appends report objects to internal slices; finally sets test results on `check`. No external state is modified. | +| **How it fits the package** | Part of the performance suite’s CPU‑pool checks; called by higher‑level functions that load and run checks for different scheduling scenarios. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over podContainers"} + B -->|"for each container cut"| C["LogInfo"] + C --> D["GetContainerPidNamespace(cut, env)"] + D -->|"error?"| E["LogError & add non‑compliant report; continue loop"] + D -->|"ok"| F["GetPidsFromPidNamespace(pidNamespace, cut)"] + F -->|"error?"| G["LogError & add non‑compliant report"] + F -->|"ok"| H["ProcessPidsCPUScheduling(processes, cut, schedulingType, logger)"] + H --> I["Append compliantContainersPids and nonCompliantContainersPids"] + B --> J{"loop end"} + J --> K["SetResult(compliantContainersPids, nonCompliantContainersPids)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testSchedulingPolicyInCPUPool --> func_GetContainerPidNamespace + func_testSchedulingPolicyInCPUPool --> func_GetPidsFromPidNamespace + func_testSchedulingPolicyInCPUPool --> func_ProcessPidsCPUScheduling + func_testSchedulingPolicyInCPUPool --> func_NewContainerReportObject +``` + +#### Functions calling `testSchedulingPolicyInCPUPool` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testSchedulingPolicyInCPUPool +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testSchedulingPolicyInCPUPool +func runExample() { + // Assume env and containers are already prepared. + var check *checksdb.Check = checksdb.NewCheck("example") + var env *provider.TestEnvironment = provider.GetTestEnvironment() + var containers []*provider.Container = env.GetNonGuaranteedPodContainersWithoutHostPID() + + testSchedulingPolicyInCPUPool(check, env, containers, scheduling.SharedCPUScheduling) +} +``` + +--- diff --git a/docs/tests/platform/bootparams/bootparams.md b/docs/tests/platform/bootparams/bootparams.md new file mode 100644 index 000000000..224e25cac --- /dev/null +++ b/docs/tests/platform/bootparams/bootparams.md @@ -0,0 +1,343 @@ +# Package bootparams + +**Path**: `tests/platform/bootparams` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [GetMcKernelArguments](#getmckernelarguments) + - [TestBootParamsHelper](#testbootparamshelper) +- [Local Functions](#local-functions) + - [getCurrentKernelCmdlineArgs](#getcurrentkernelcmdlineargs) + - [getGrubKernelArgs](#getgrubkernelargs) + +## Overview + +The bootparams package provides utilities for verifying that the kernel arguments specified in a node’s MachineConfig match those actually present on the running system, including both the container command line and GRUB configuration. It is intended for use within CertSuite test environments to detect misconfigurations. + +### Key Features + +- GetMcKernelArguments parses a MachineConfig string of kernel arguments into a key‑value map for easy lookup. +- TestBootParamsHelper compares the expected kernel args against the current container and GRUB values, emitting warnings or debug logs when mismatches occur. +- Internal helpers getCurrentKernelCmdlineArgs and getGrubKernelArgs execute commands inside probe pods to capture live kernel argument states. + +### Design Notes + +- Assumes that executing grub commands in a probe pod reflects the host’s boot configuration. +- Functions return detailed errors on command execution failures, which may surface as test failures. +- Best practice: run TestBootParamsHelper early in a test suite after MachineConfig changes to catch drift before deployment. + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GetMcKernelArguments(env *provider.TestEnvironment, nodeName string) (aMap map[string]string)](#getmckernelarguments) | Converts the list of kernel arguments (`[]string`) from a node’s MachineConfig into a key‑value map for easy lookup. | +| [func TestBootParamsHelper(env *provider.TestEnvironment, cut *provider.Container, logger *log.Logger) error](#testbootparamshelper) | Compares expected kernel arguments from MachineConfig (`GetMcKernelArguments`) against the current command‑line arguments in the container and GRUB configuration, emitting warnings or debug logs for mismatches. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func getCurrentKernelCmdlineArgs(env *provider.TestEnvironment, nodeName string) (map[string]string, error)](#getcurrentkernelcmdlineargs) | Executes the `grubKernelArgsCommand` inside a probe pod to capture the current kernel command‑line arguments and returns them as a map of key/value pairs. | +| [func getGrubKernelArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error)](#getgrubkernelargs) | Executes `grub2-editenv list` inside a probe pod to obtain the current GRUB kernel command line arguments and returns them as a key‑value map. | + +## Exported Functions + +### GetMcKernelArguments + +**GetMcKernelArguments** - Converts the list of kernel arguments (`[]string`) from a node’s MachineConfig into a key‑value map for easy lookup. + +Retrieve the kernel argument map defined in a node’s MachineConfig. + +--- + +#### Signature (Go) + +```go +func GetMcKernelArguments(env *provider.TestEnvironment, nodeName string) (aMap map[string]string) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Converts the list of kernel arguments (`[]string`) from a node’s MachineConfig into a key‑value map for easy lookup. | +| **Parameters** | `env *provider.TestEnvironment` – test environment containing node data.
`nodeName string` – name of the target node. | +| **Return value** | `map[string]string` – mapping of kernel argument names to their values (empty string if no value). | +| **Key dependencies** | Calls `arrayhelper.ArgListToMap` from `github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper`. | +| **Side effects** | None. Pure function; only reads data from the supplied environment. | +| **How it fits the package** | Provides a helper for tests that need to compare MachineConfig kernel arguments against other sources (e.g., runtime cmdline, GRUB). | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + GetMcKernelArguments --> ArgListToMap +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetMcKernelArguments --> func_ArgListToMap +``` + +--- + +#### Functions calling `GetMcKernelArguments` (Mermaid) + +```mermaid +graph TD + func_testSysctlConfigs --> func_GetMcKernelArguments + func_TestBootParamsHelper --> func_GetMcKernelArguments +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking GetMcKernelArguments +env := &provider.TestEnvironment{ /* … populate as needed … */ } +nodeName := "worker-0" + +kernelArgs, err := bootparams.GetMcKernelArguments(env, nodeName) +if err != nil { + // Handle error if the environment is malformed (not expected in current signature) +} +for key, val := range kernelArgs { + fmt.Printf("Kernel arg %q = %q\n", key, val) +} +``` + +--- + +### TestBootParamsHelper + +**TestBootParamsHelper** - Compares expected kernel arguments from MachineConfig (`GetMcKernelArguments`) against the current command‑line arguments in the container and GRUB configuration, emitting warnings or debug logs for mismatches. + +Validates that the kernel command line arguments specified in a node’s MachineConfig match those actually present in the running container and in GRUB, logging any discrepancies. + +```go +func TestBootParamsHelper(env *provider.TestEnvironment, cut *provider.Container, logger *log.Logger) error +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Compares expected kernel arguments from MachineConfig (`GetMcKernelArguments`) against the current command‑line arguments in the container and GRUB configuration, emitting warnings or debug logs for mismatches. | +| **Parameters** | `env *provider.TestEnvironment` – test environment context.
`cut *provider.Container` – container under test.
`logger *log.Logger` – logger used for reporting. | +| **Return value** | `error` – non‑nil if the probe pod is missing or any helper call fails; otherwise nil. | +| **Key dependencies** | • `GetMcKernelArguments(env, nodeName)`
• `getCurrentKernelCmdlineArgs(env, nodeName)`
• `getGrubKernelArgs(env, nodeName)`
• `fmt.Errorf` for error construction | +| **Side effects** | Writes log entries via the supplied logger; no state mutation. | +| **How it fits the package** | Serves as the core check used by higher‑level tests (e.g., `testUnalteredBootParams`) to ensure that boot parameters are not unintentionally altered on a node. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Probe pod exists?"} + B -- No --> C["Return error"] + B -- Yes --> D["Retrieve MachineConfig args"] + D --> E["Get current container kernel args"] + E --> F{"Error?"} + F -- Yes --> G["Return error"] + F -- No --> H["Get GRUB kernel args"] + H --> I{"Error?"} + I -- Yes --> J["Return error"] + I -- No --> K["Iterate over MachineConfig keys"] + K --> L{"Key present in current args?"} + L -- Yes --> M{"Values match?"} + M -- No --> N["Warn mismatch (current)"] + M -- Yes --> O["Debug match (current)"] + L -- No --> P["Skip"] + K --> Q{"Key present in GRUB args?"} + Q -- Yes --> R{"Values match?"} + R -- No --> S["Warn mismatch (GRUB)"] + R -- Yes --> T["Debug match (GRUB)"] + Q -- No --> U["Skip"] + O & N & T & S --> V["Finish loop"] + V --> W["Return nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_TestBootParamsHelper --> func_GetMcKernelArguments + func_TestBootParamsHelper --> func_getCurrentKernelCmdlineArgs + func_TestBootParamsHelper --> func_getGrubKernelArgs + func_TestBootParamsHelper --> fmt_Errorf +``` + +#### Functions calling `TestBootParamsHelper` + +```mermaid +graph TD + func_testUnalteredBootParams --> func_TestBootParamsHelper +``` + +#### Usage example + +```go +// Minimal example invoking TestBootParamsHelper +env := provider.NewTestEnvironment(...) +cut := &provider.Container{NodeName: "node-1", ...} +logger := log.New(os.Stdout, "", log.LstdFlags) + +if err := bootparams.TestBootParamsHelper(env, cut, logger); err != nil { + fmt.Printf("Boot params check failed: %v\n", err) +} else { + fmt.Println("Boot parameters are consistent") +} +``` + +--- + +## Local Functions + +### getCurrentKernelCmdlineArgs + +**getCurrentKernelCmdlineArgs** - Executes the `grubKernelArgsCommand` inside a probe pod to capture the current kernel command‑line arguments and returns them as a map of key/value pairs. + +#### Signature (Go) + +```go +func getCurrentKernelCmdlineArgs(env *provider.TestEnvironment, nodeName string) (map[string]string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes the `grubKernelArgsCommand` inside a probe pod to capture the current kernel command‑line arguments and returns them as a map of key/value pairs. | +| **Parameters** | `env *provider.TestEnvironment` – test environment containing probe pods;
`nodeName string` – name of the node whose probe pod is queried. | +| **Return value** | `map[string]string` – parsed kernel arguments; `error` if execution fails or output cannot be parsed. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• `clientsholder.NewContext(...)`
• `o.ExecCommandContainer(ctx, kernelArgscommand)`
• `strings.Split`, `strings.TrimSuffix`
• `arrayhelper.ArgListToMap` | +| **Side effects** | No state mutation; performs I/O by executing a command inside a container. | +| **How it fits the package** | Provides low‑level data needed for boot parameter validation in the *bootparams* test suite. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["GetClientsHolder"] --> B["NewContext"] + B --> C["ExecCommandContainer"] + C --> D["TrimSuffix & Split"] + D --> E["ArgListToMap"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getCurrentKernelCmdlineArgs --> func_GetClientsHolder + func_getCurrentKernelCmdlineArgs --> func_NewContext + func_getCurrentKernelCmdlineArgs --> func_ExecCommandContainer + func_getCurrentKernelCmdlineArgs --> func_Split + func_getCurrentKernelCmdlineArgs --> func_TrimSuffix + func_getCurrentKernelCmdlineArgs --> func_ArgListToMap +``` + +#### Functions calling `getCurrentKernelCmdlineArgs` (Mermaid) + +```mermaid +graph TD + func_TestBootParamsHelper --> func_getCurrentKernelCmdlineArgs +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getCurrentKernelCmdlineArgs +env := &provider.TestEnvironment{ /* populated elsewhere */ } +nodeName := "worker-node-01" + +args, err := getCurrentKernelCmdlineArgs(env, nodeName) +if err != nil { + log.Fatalf("failed to retrieve kernel args: %v", err) +} +fmt.Printf("Kernel arguments for %s: %+v\n", nodeName, args) +``` + +--- + +### getGrubKernelArgs + +**getGrubKernelArgs** - Executes `grub2-editenv list` inside a probe pod to obtain the current GRUB kernel command line arguments and returns them as a key‑value map. + +#### Signature (Go) + +```go +func getGrubKernelArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes `grub2-editenv list` inside a probe pod to obtain the current GRUB kernel command line arguments and returns them as a key‑value map. | +| **Parameters** | `env *provider.TestEnvironment` – test environment holding probe pods.
`nodeName string` – name of the node whose probe pod will be queried. | +| **Return value** | `aMap map[string]string` – mapping of GRUB kernel argument names to values (empty string if no value).
`err error` – any execution or parsing error. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – obtains Kubernetes client holder.
• `clientsholder.NewContext(...)` – builds context for pod, namespace and container.
• `ExecCommandContainer(ctx, grubKernelArgsCommand)` – runs command inside the pod.
• `strings.Split`, `strings.HasPrefix` – parse output.
• `arrayhelper.FilterArray`, `arrayhelper.ArgListToMap` – filter & convert list to map. | +| **Side effects** | No state mutation; performs I/O by executing a container command and parsing its stdout. | +| **How it fits the package** | Provides GRUB‑level kernel parameters used in boot‑parameter validation tests within `bootparams`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Get Kubernetes client holder"] + B --> C["Build pod context with node’s probe pod"] + C --> D["Execute grub command inside container"] + D --> E{"Success?"} + E -- No --> F["Return error"] + E -- Yes --> G["Split output by newline"] + G --> H["Filter lines starting with options"] + H --> I{"Exactly one line?"} + I -- No --> J["Return error"] + I -- Yes --> K["Split options line into args"] + K --> L["Discard first empty element"] + L --> M["Convert arg list to map"] + M --> N["Return map"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getGrubKernelArgs --> clientsholder.GetClientsHolder + func_getGrubKernelArgs --> clientsholder.NewContext + func_getGrubKernelArgs --> ExecCommandContainer + func_getGrubKernelArgs --> arrayhelper.FilterArray + func_getGrubKernelArgs --> arrayhelper.ArgListToMap +``` + +#### Functions calling `getGrubKernelArgs` (Mermaid) + +```mermaid +graph TD + TestBootParamsHelper --> getGrubKernelArgs +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getGrubKernelArgs +env := &provider.TestEnvironment{ /* initialized elsewhere */ } +nodeName := "worker-0" + +grubArgs, err := getGrubKernelArgs(env, nodeName) +if err != nil { + log.Fatalf("failed to get GRUB args: %v", err) +} +fmt.Printf("GRUB kernel arguments for %s: %+v\n", nodeName, grubArgs) +``` + +--- diff --git a/docs/tests/platform/clusteroperator/clusteroperator.md b/docs/tests/platform/clusteroperator/clusteroperator.md new file mode 100644 index 000000000..ea571036d --- /dev/null +++ b/docs/tests/platform/clusteroperator/clusteroperator.md @@ -0,0 +1,111 @@ +# Package clusteroperator + +**Path**: `tests/platform/clusteroperator` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [IsClusterOperatorAvailable](#isclusteroperatoravailable) + +## Overview + +The clusteroperator package provides utilities for verifying the operational status of OpenShift Cluster Operators during tests. + +### Key Features + +- Checks an operator’s status conditions to confirm availability +- Integrates with the internal logging system for test output +- Exposes a single exported function for reuse in multiple test cases + +### Design Notes + +- Relies on the openshift/config API to inspect OperatorStatusConditions +- Assumes the presence of an "Available" condition indicates readiness; other conditions are ignored +- Best practice: call after operator deployment and before running dependent tests + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func IsClusterOperatorAvailable(co *configv1.ClusterOperator) bool](#isclusteroperatoravailable) | Checks the operator’s status conditions and returns true when the “Available” condition is present, indicating the operator is functioning correctly. | + +## Exported Functions + +### IsClusterOperatorAvailable + +**IsClusterOperatorAvailable** - Checks the operator’s status conditions and returns true when the “Available” condition is present, indicating the operator is functioning correctly. + +```go +func IsClusterOperatorAvailable(co *configv1.ClusterOperator) bool +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks the operator’s status conditions and returns true when the “Available” condition is present, indicating the operator is functioning correctly. | +| **Parameters** | `co *configv1.ClusterOperator` – reference to the operator object whose state is examined. | +| **Return value** | `bool` – `true` if an `Available` condition exists; otherwise `false`. | +| **Key dependencies** | • Calls `log.Info` from the internal logging package to record status outcomes.
• Relies on the `configv1.OperatorAvailable` constant and the operator’s `Status.Conditions` slice. | +| **Side effects** | No state mutations; only logs informational messages. | +| **How it fits the package** | Serves as a helper for health‑check tests, enabling them to quickly assess operator readiness without duplicating condition‑parsing logic. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over co.Status.Conditions"} + B -- Condition.Type == OperatorAvailable --> C["Log “Available”"] + C --> D["Return true"] + B -- Else --> E["Continue loop"] + E --> B + B -- End of list --> F["Log “Not Available”"] + F --> G["Return false"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_IsClusterOperatorAvailable --> func_log.Info +``` + +#### Functions calling `IsClusterOperatorAvailable` + +```mermaid +graph TD + test_clusteroperator_health --> func_IsClusterOperatorAvailable +``` + +#### Usage example (Go) + +```go +// Minimal example invoking IsClusterOperatorAvailable +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/clusteroperator" + configv1 "k8s.io/api/config/v1" +) + +func main() { + // Example operator with an Available condition + op := &configv1.ClusterOperator{ + Name: "example-operator", + Status: configv1.ClusterOperatorStatus{ + Conditions: []configv1.OperatorCondition{{ + Type: configv1.OperatorAvailable, + Status: configv1.ConditionTrue, + }}, + }, + } + + available := clusteroperator.IsClusterOperatorAvailable(op) + if available { + println("Operator is available") + } else { + println("Operator is not available") + } +} +``` + +--- diff --git a/docs/tests/platform/cnffsdiff/cnffsdiff.md b/docs/tests/platform/cnffsdiff/cnffsdiff.md new file mode 100644 index 000000000..fa43bebb6 --- /dev/null +++ b/docs/tests/platform/cnffsdiff/cnffsdiff.md @@ -0,0 +1,951 @@ +# Package cnffsdiff + +**Path**: `tests/platform/cnffsdiff` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [FsDiff](#fsdiff) +- [Interfaces](#interfaces) + - [FsDiffFuncs](#fsdifffuncs) +- [Exported Functions](#exported-functions) + - [FsDiff.GetResults](#fsdiff.getresults) + - [FsDiff.RunTest](#fsdiff.runtest) + - [NewFsDiffTester](#newfsdifftester) +- [Local Functions](#local-functions) + - [FsDiff.createNodeFolder](#fsdiff.createnodefolder) + - [FsDiff.deleteNodeFolder](#fsdiff.deletenodefolder) + - [FsDiff.execCommandContainer](#fsdiff.execcommandcontainer) + - [FsDiff.installCustomPodman](#fsdiff.installcustompodman) + - [FsDiff.intersectTargetFolders](#fsdiff.intersecttargetfolders) + - [FsDiff.mountProbePodmanFolder](#fsdiff.mountprobepodmanfolder) + - [FsDiff.runPodmanDiff](#fsdiff.runpodmandiff) + - [FsDiff.unmountCustomPodman](#fsdiff.unmountcustompodman) + - [FsDiff.unmountProbePodmanFolder](#fsdiff.unmountprobepodmanfolder) + - [shouldUseCustomPodman](#shouldusecustompodman) + +## Overview + +The cnffsdiff package provides a tester that runs podman diff against a target container on an OpenShift node, parses the JSON output and records which filesystem folders were added, changed or deleted. It can use either the system‑installed Podman or a custom binary depending on the cluster version. + +### Key Features + +- Runs podman diff inside a probe pod and captures JSON results +- Automatically mounts a temporary directory to expose the partner’s Podman binary if needed +- Filters output to only include predefined target folders, logging mismatches + +### Design Notes + +- Decision to use a custom Podman binary is based on OpenShift semantic version comparison; older versions lack required features. +- The diff operation retries when it encounters error code 125 (podman exit status) by sleeping and retrying once. +- Users should instantiate FsDiff via NewFsDiffTester, then call RunTest before accessing ChangedFolders/DeletedFolders or GetResults. + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**FsDiff**](#fsdiff) | Struct definition | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func (f *FsDiff) GetResults() int](#fsdiff.getresults) | Returns the integer status code stored in the `FsDiff` instance, representing the outcome of a filesystem diff operation. | +| [func (f *FsDiff) RunTest(containerUID string)](#fsdiff.runtest) | Runs `podman diff` against the specified container, handles custom podman installation, retries on known error codes, parses JSON output, and records changed or deleted folders. | +| [func NewFsDiffTester( check *checksdb.Check, client clientsholder.Command, ctxt clientsholder.Context, ocpVersion string, ) *FsDiff](#newfsdifftester) | Instantiates an `FsDiff` tester configured for the specified OpenShift version and client context. Determines whether to use a custom Podman binary based on the cluster’s version. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func (f *FsDiff) createNodeFolder() error](#fsdiff.createnodefolder) | Creates a temporary directory (`nodeTmpMountFolder`) inside the probe pod’s filesystem by executing `mkdir` on the target node. | +| [func (f *FsDiff) deleteNodeFolder() error](#fsdiff.deletenodefolder) | Removes the temporary directory used for mounting podman on the target node. It issues a `rmdir` command inside the container and reports any unexpected output or errors. | +| [func (f *FsDiff) execCommandContainer(cmd, errorStr string) error](#fsdiff.execcommandcontainer) | Runs `cmd` inside the container that hosts the probe pod. Any non‑empty stdout or stderr, or a failure of the underlying execution, causes an error containing the supplied `errorStr` and diagnostic information. | +| [func (f *FsDiff) installCustomPodman() error](#fsdiff.installcustompodman) | Creates a temporary directory on the node, mounts the partner’s Podman binary into it, and prepares the environment for subsequent `podman diff` invocations. | +| [func (f *FsDiff) intersectTargetFolders(src []string) []string](#fsdiff.intersecttargetfolders) | Returns only those folder paths from `src` that match any of the pre‑defined `targetFolders`. Logs a warning for each matched path. | +| [func (f *FsDiff) mountProbePodmanFolder() error](#fsdiff.mountprobepodmanfolder) | Binds the `partnerPodmanFolder` inside the probe container to a local temporary folder (`nodeTmpMountFolder`) so that files can be inspected or manipulated from the host. | +| [func (f *FsDiff) runPodmanDiff(containerUID string) (string, error)](#fsdiff.runpodmandiff) | Runs `podman diff --format json` for the specified container and returns its JSON output. Handles both system‑wide and custom Podman binaries. | +| [func (f *FsDiff) unmountCustomPodman()](#fsdiff.unmountcustompodman) | Detaches the host mount point that was created for the custom Podman instance and removes its temporary directory. It is invoked automatically when a test finishes using a custom Podman binary. | +| [func (f *FsDiff) unmountProbePodmanFolder() error](#fsdiff.unmountprobepodmanfolder) | Executes an `umount` command inside the target container to detach the temporary probe directory (`nodeTmpMountFolder`). | +| [func shouldUseCustomPodman(check *checksdb.Check, ocpVersion string) bool](#shouldusecustompodman) | Decides whether the probe should run with a custom‑compiled podman binary or rely on the preinstalled podman that ships with OpenShift nodes. The decision is based on the supplied OpenShift (OCP) version. | + +## Structs + +### FsDiff + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `ChangedFolders` | `[]string` | List of target folders that were modified (added or changed). | +| `Error` | `error` | Last error encountered during test execution, if any. | +| `check` | `*checksdb.Check` | Logger used to record progress and warnings. | +| `result` | `int` | Test outcome (`testhelper.SUCCESS`, `FAILURE`, or `ERROR`). | +| `clientHolder` | `clientsholder.Command` | Interface for executing commands inside the container. | +| `ctxt` | `clientsholder.Context` | Execution context passed to command invocations. | +| `useCustomPodman` | `bool` | Flag indicating whether a custom podman binary should be mounted from the probe pod. | +| `DeletedFolders` | `[]string` | List of target folders that were removed in the container’s filesystem. | + +--- + +## Interfaces + +### FsDiffFuncs + + +**Purpose**: + +**Methods**: + +| Method | Description | +|--------|--------------| +| `RunTest` | Method documentation | +| `GetResults` | Method documentation | + +--- + +## Exported Functions + +### FsDiff.GetResults + +**GetResults** - Returns the integer status code stored in the `FsDiff` instance, representing the outcome of a filesystem diff operation. + +#### Signature (Go) + +```go +func (f *FsDiff) GetResults() int +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns the integer status code stored in the `FsDiff` instance, representing the outcome of a filesystem diff operation. | +| **Parameters** | None | +| **Return value** | `int` – the result code held by the receiver (`f.result`). | +| **Key dependencies** | • accesses the exported field `result` on the `FsDiff` struct. | +| **Side effects** | No state mutation or I/O; purely a getter. | +| **How it fits the package** | Provides external callers with access to the diff result after an `FsDiff` object has performed its analysis. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + f --> result["Retrieve f.result"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `FsDiff.GetResults` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking FsDiff.GetResults +diff := &cnffsdiff.FsDiff{result: 0} // normally populated by diff logic +status := diff.GetResults() +fmt.Println("Diff status:", status) +``` + +--- + +### FsDiff.RunTest + +**RunTest** - Runs `podman diff` against the specified container, handles custom podman installation, retries on known error codes, parses JSON output, and records changed or deleted folders. + +#### 1) Signature (Go) + +```go +func (f *FsDiff) RunTest(containerUID string) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs `podman diff` against the specified container, handles custom podman installation, retries on known error codes, parses JSON output, and records changed or deleted folders. | +| **Parameters** | `containerUID string` – The unique identifier of the target container. | +| **Return value** | None; results are stored in the receiver’s fields (`result`, `DeletedFolders`, `ChangedFolders`, etc.). | +| **Key dependencies** | • `FsDiff.installCustomPodman()`
• `FsDiff.unmountCustomPodman()`
• `FsDiff.runPodmanDiff(containerUID)`
• `strings.Contains`
• `time.Sleep`
• `json.Unmarshal`
• `FsDiff.intersectTargetFolders` | +| **Side effects** | • Logs informational, warning, and debug messages.
• May create/delete temporary directories when using custom podman.
• Sets `f.Error`, `f.result`, `DeletedFolders`, and `ChangedFolders`. | +| **How it fits the package** | Core entry point for performing filesystem integrity checks on a container; orchestrates setup, execution, parsing, and result classification. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Check custom podman"] -->|"yes"| B["installCustomPodman"] + B --> C["defer unmountCustomPodman"] + A -->|"no"| D["continue"] + D --> E["Log “Running podman diff”"] + E --> F{"Retry loop (5×)"} + F --> G["runPodmanDiff"] + G --> H{"Success?"} + H -->|"yes"| I["Parse JSON output"] + H -->|"no"| J{"Error contains code 125?"} + J -->|"yes"| K["Log retry, sleep"] + J -->|"no"| L["Break loop"] + I --> M["intersectTargetFolders for Deleted"] + I --> N["intersectTargetFolders for Changed"] + M & N --> O{"Any changes?"} + O -->|"yes"| P["Set result to FAILURE"] + O -->|"no"| Q["Set result to SUCCESS"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + FsDiff.RunTest --> FsDiff.installCustomPodman + FsDiff.RunTest --> FsDiff.unmountCustomPodman + FsDiff.RunTest --> FsDiff.runPodmanDiff + FsDiff.RunTest --> FsDiff.intersectTargetFolders +``` + +#### 5) Functions calling `FsDiff.RunTest` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking FsDiff.RunTest +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff" +) + +// Assume fsDiff is an initialized *FsDiff instance. +var fsDiff *cnffsdiff.FsDiff + +// Run the diff test for a container with UID "abc123". +fsDiff.RunTest("abc123") + +// Inspect results +if fsDiff.result == cnffsdiff.SUCCESS { + fmt.Println("No filesystem changes detected.") +} else if fsDiff.result == cnffsdiff.FAILURE { + fmt.Printf("Changed folders: %v\nDeleted folders: %v\n", fsDiff.ChangedFolders, fsDiff.DeletedFolders) +} else { + fmt.Printf("Error running diff: %v\n", fsDiff.Error) +} +``` + +--- + +--- + +### NewFsDiffTester + +**NewFsDiffTester** - Instantiates an `FsDiff` tester configured for the specified OpenShift version and client context. Determines whether to use a custom Podman binary based on the cluster’s version. + +#### 1) Signature (Go) + +```go +func NewFsDiffTester( + check *checksdb.Check, + client clientsholder.Command, + ctxt clientsholder.Context, + ocpVersion string, +) *FsDiff +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates an `FsDiff` tester configured for the specified OpenShift version and client context. Determines whether to use a custom Podman binary based on the cluster’s version. | +| **Parameters** | *check* – test check object;
*client* – command holder for executing container commands;
*ctxt* – execution context (namespace, pod, container);
*ocpVersion* – OpenShift version string used to decide Podman strategy. | +| **Return value** | Pointer to an `FsDiff` struct initialized with the provided parameters and a default result of `testhelper.ERROR`. | +| **Key dependencies** | • Calls `shouldUseCustomPodman(check, ocpVersion)`
• Calls `check.LogDebug(...)` | +| **Side effects** | No state mutations beyond constructing the struct; logs debug information. | +| **How it fits the package** | Provides the entry point for creating a diff tester that other test functions (e.g., `testContainersFsDiff`) use to verify container file‑system integrity. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Determine podman usage"} + B -->|"true"| C["Set useCustomPodman = true"] + B -->|"false"| D["Set useCustomPodman = false"] + C & D --> E["Log debug message"] + E --> F["Return FsDiff instance"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_NewFsDiffTester --> func_shouldUseCustomPodman + func_NewFsDiffTester --> func_LogDebug +``` + +#### 5) Functions calling `NewFsDiffTester` (Mermaid) + +```mermaid +graph TD + func_testContainersFsDiff --> func_NewFsDiffTester +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking NewFsDiffTester +check := checksdb.NewCheck("example") +client := clientsholder.GetClientsHolder() +ctx := clientsholder.NewContext("default", "probe-pod", "probe-container") +ocpVer := "4.12" + +fsDiffTester := cnffsdiff.NewFsDiffTester(check, client, ctx, ocpVer) +``` + +--- + +--- + +## Local Functions + +### FsDiff.createNodeFolder + +**createNodeFolder** - Creates a temporary directory (`nodeTmpMountFolder`) inside the probe pod’s filesystem by executing `mkdir` on the target node. + +#### Signature (Go) + +```go +func (f *FsDiff) createNodeFolder() error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a temporary directory (`nodeTmpMountFolder`) inside the probe pod’s filesystem by executing `mkdir` on the target node. | +| **Parameters** | `f *FsDiff` – receiver containing client and context for container operations. | +| **Return value** | `error` – non‑nil if the command fails or returns unexpected output. | +| **Key dependencies** | • `FsDiff.execCommandContainer(cmd, errorStr string) error`
• `fmt.Sprintf` (for command & error message construction) | +| **Side effects** | Executes a shell command inside a container; may create a directory on the node’s filesystem and return any execution errors. No other state is modified. | +| **How it fits the package** | Part of the setup phase for `FsDiff`, ensuring a mount point exists before attempting to bind‑mount Podman binaries into it. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Execute `mkdir`"} + B -->|"Success"| C["Return nil"] + B -->|"Failure"| D["Return error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_FsDiff.createNodeFolder --> func_FsDiff.execCommandContainer +``` + +#### Functions calling `FsDiff.createNodeFolder` (Mermaid) + +```mermaid +graph TD + func_FsDiff.installCustomPodman --> func_FsDiff.createNodeFolder +``` + +#### Usage example (Go) + +```go +// Minimal example invoking FsDiff.createNodeFolder +f := &FsDiff{ /* fields initialized elsewhere */ } +if err := f.createNodeFolder(); err != nil { + fmt.Printf("Failed to create node folder: %v\n", err) +} +``` + +--- + +### FsDiff.deleteNodeFolder + +**deleteNodeFolder** - Removes the temporary directory used for mounting podman on the target node. It issues a `rmdir` command inside the container and reports any unexpected output or errors. + +#### Signature (Go) + +```go +func (f *FsDiff) deleteNodeFolder() error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Removes the temporary directory used for mounting podman on the target node. It issues a `rmdir` command inside the container and reports any unexpected output or errors. | +| **Parameters** | None | +| **Return value** | `error` – non‑nil if the deletion fails or produces output; otherwise `nil`. | +| **Key dependencies** | • `FsDiff.execCommandContainer` – runs a shell command in the node container.
• `fmt.Sprintf` – formats error strings. | +| **Side effects** | Executes an external command inside the test pod; may alter filesystem state on the target node by deleting the directory. | +| **How it fits the package** | Utility function used during setup and teardown of custom Podman mounts in `FsDiff`. It ensures that temporary directories do not persist between tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Format rmdir command"] + B --> C["Call execCommandContainer(cmd, errMsg)"] + C --> D{"Success?"} + D -- Yes --> E["Return nil"] + D -- No --> F["Return error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_FsDiff.deleteNodeFolder --> func_FsDiff.execCommandContainer + func_FsDiff.deleteNodeFolder --> fmt.Sprintf +``` + +#### Functions calling `FsDiff.deleteNodeFolder` (Mermaid) + +```mermaid +graph TD + func_FsDiff.installCustomPodman --> func_FsDiff.deleteNodeFolder + func_FsDiff.unmountCustomPodman --> func_FsDiff.deleteNodeFolder +``` + +#### Usage example (Go) + +```go +// Minimal example invoking FsDiff.deleteNodeFolder +func example() { + f := &FsDiff{} + if err := f.deleteNodeFolder(); err != nil { + fmt.Printf("Failed to delete node folder: %v\n", err) + } else { + fmt.Println("Node folder deleted successfully.") + } +} +``` + +--- + +### FsDiff.execCommandContainer + +**execCommandContainer** - Runs `cmd` inside the container that hosts the probe pod. Any non‑empty stdout or stderr, or a failure of the underlying execution, causes an error containing the supplied `errorStr` and diagnostic information. + +#### Signature (Go) + +```go +func (f *FsDiff) execCommandContainer(cmd, errorStr string) error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs `cmd` inside the container that hosts the probe pod. Any non‑empty stdout or stderr, or a failure of the underlying execution, causes an error containing the supplied `errorStr` and diagnostic information. | +| **Parameters** | `cmd string –` command to run; `errorStr string –` prefix for the returned error message | +| **Return value** | `error –` nil on success, otherwise an error describing unexpected output or execution failure | +| **Key dependencies** | • `f.clientHolder.ExecCommandContainer` (executes command in pod)
• `errors.New` (creates error object)
• `fmt.Sprintf` (formats diagnostic message) | +| **Side effects** | No state changes; only I/O to the container’s stdout/stderr and potential propagation of the execution error. | +| **How it fits the package** | Provides a low‑level helper used by higher‑level operations that manipulate temporary directories or bind mounts on the node through the probe pod. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["execCommandContainer"] --> B{"ExecCommandContainer"} + B --> C["Check err, output, outerr"] + C -- success --> D["Return nil"] + C -- failure --> E["Format errorStr + diagnostics"] + E --> F["errors.New"] + F --> G["Return error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_FsDiff.execCommandContainer --> ExecCommandContainer + func_FsDiff.execCommandContainer --> errors_New + func_FsDiff.execCommandContainer --> fmt_Sprintf +``` + +#### Functions calling `FsDiff.execCommandContainer` (Mermaid) + +```mermaid +graph TD + FsDiff.createNodeFolder --> FsDiff.execCommandContainer + FsDiff.deleteNodeFolder --> FsDiff.execCommandContainer + FsDiff.mountProbePodmanFolder --> FsDiff.execCommandContainer + FsDiff.unmountProbePodmanFolder --> FsDiff.execCommandContainer +``` + +#### Usage example (Go) + +```go +// Minimal example invoking execCommandContainer +func (f *FsDiff) example() error { + // Create a temporary directory on the node via the probe pod. + return f.execCommandContainer( + fmt.Sprintf("mkdir %s", "/tmp/example"), + "failed or unexpected output when creating /tmp/example", + ) +} +``` + +--- + +### FsDiff.installCustomPodman + +**installCustomPodman** - Creates a temporary directory on the node, mounts the partner’s Podman binary into it, and prepares the environment for subsequent `podman diff` invocations. + +#### Signature (Go) + +```go +func (f *FsDiff) installCustomPodman() error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a temporary directory on the node, mounts the partner’s Podman binary into it, and prepares the environment for subsequent `podman diff` invocations. | +| **Parameters** | None | +| **Return value** | `error`: non‑nil if folder creation, mounting or cleanup fails; otherwise `nil`. | +| **Key dependencies** | • `FsDiff.createNodeFolder()` – executes `mkdir` inside the container.
• `FsDiff.mountProbePodmanFolder()` – performs a bind mount of the partner Podman folder.
• `FsDiff.deleteNodeFolder()` – removes the temporary directory if mounting fails.
• `f.check.LogInfo()` – logs informational messages.
• `fmt.Errorf` – formats error strings. | +| **Side effects** | • Filesystem changes inside the container (directory creation, bind mount).
• Logging output via the test checker. | +| **How it fits the package** | The function is invoked by `FsDiff.RunTest` when a custom Podman binary must be used to run `podman diff`. It prepares the environment so that subsequent operations can access the correct binary without interfering with the host system. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Log “Creating temp folder”"] + B --> C{"createNodeFolder()"} + C -- success --> D["Log “Mounting podman folder”"] + D --> E{"mountProbePodmanFolder()"} + E -- success --> F["Return nil"] + E -- failure --> G["Call deleteNodeFolder()"] + G -- success --> H["Return mount error"] + G -- failure --> I["Return combined error"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_FsDiff.installCustomPodman --> func_FsDiff.createNodeFolder + func_FsDiff.installCustomPodman --> func_FsDiff.mountProbePodmanFolder + func_FsDiff.installCustomPodman --> func_FsDiff.deleteNodeFolder +``` + +#### Functions calling `FsDiff.installCustomPodman` (Mermaid) + +```mermaid +graph TD + func_FsDiff.RunTest --> func_FsDiff.installCustomPodman +``` + +#### Usage example (Go) + +```go +// Minimal example invoking FsDiff.installCustomPodman +f := &cnffsdiff.FsDiff{ /* fields initialized elsewhere */ } +if err := f.installCustomPodman(); err != nil { + fmt.Printf("Failed to set up custom Podman: %v\n", err) +} +``` + +--- + +### FsDiff.intersectTargetFolders + +**intersectTargetFolders** - Returns only those folder paths from `src` that match any of the pre‑defined `targetFolders`. Logs a warning for each matched path. + +#### 1) Signature (Go) + +```go +func (f *FsDiff) intersectTargetFolders(src []string) []string +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns only those folder paths from `src` that match any of the pre‑defined `targetFolders`. Logs a warning for each matched path. | +| **Parameters** | `src []string –` list of folder paths returned by a podman diff operation. | +| **Return value** | `[]string –` slice containing only the folders that are present in `targetFolders`. | +| **Key dependencies** | • `github.com/redhat-best-practices-for-k8s/certsuite/pkg/stringhelper.StringInSlice`
• `f.check.LogWarn`
• Built‑in `append` | +| **Side effects** | Logs warnings via the test checker; no state mutation beyond local slice construction. | +| **How it fits the package** | Used by `FsDiff.RunTest` to narrow down diff results to a small, relevant set of directories for verification. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over src"} + B -->|"folder matches targetFolders"| C["Log warning"] + C --> D["Append folder to dst"] + B -->|"no match"| E["Skip"] + D --> F["Continue loop"] + E --> F + F --> G["Return dst"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_FsDiff.intersectTargetFolders --> func_StringInSlice + func_FsDiff.intersectTargetFolders --> func_LogWarn +``` + +#### 5) Functions calling `FsDiff.intersectTargetFolders` (Mermaid) + +```mermaid +graph TD + func_FsDiff.RunTest --> func_FsDiff.intersectTargetFolders +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking FsDiff.intersectTargetFolders + +func main() { + // Assume we have a pre‑configured FsDiff instance `fd`. + diffOutput := []string{"/etc/passwd", "/var/log/app.log", "/home/user/.bashrc"} + relevant := fd.intersectTargetFolders(diffOutput) + fmt.Println("Relevant folders:", relevant) +} +``` + +--- + +### FsDiff.mountProbePodmanFolder + +**mountProbePodmanFolder** - Binds the `partnerPodmanFolder` inside the probe container to a local temporary folder (`nodeTmpMountFolder`) so that files can be inspected or manipulated from the host. + +#### Signature (Go) + +```go +func (f *FsDiff) mountProbePodmanFolder() error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Binds the `partnerPodmanFolder` inside the probe container to a local temporary folder (`nodeTmpMountFolder`) so that files can be inspected or manipulated from the host. | +| **Parameters** | None | +| **Return value** | `error` – non‑nil if the bind mount command fails or produces unexpected output. | +| **Key dependencies** | • Calls `FsDiff.execCommandContainer` to run the mount command inside the container.
• Uses `fmt.Sprintf` for error message formatting. | +| **Side effects** | Executes a shell command within the probe pod; if successful, the folder becomes accessible at `nodeTmpMountFolder`. No state mutation on Go objects occurs. | +| **How it fits the package** | Part of the test harness that prepares the environment for filesystem diffing between two container runtimes (podman vs. CRI‑O). It is invoked during installation of a custom podman instance. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Run mount command"} + B -->|"Success"| C["Mount completed"] + B -->|"Failure"| D["Return error"] +``` + +#### Function dependencies + +```mermaid +graph TD + FsDiff.mountProbePodmanFolder --> FsDiff.execCommandContainer + FsDiff.mountProbePodmanFolder --> fmt.Sprintf +``` + +#### Functions calling `FsDiff.mountProbePodmanFolder` (Mermaid) + +```mermaid +graph TD + FsDiff.installCustomPodman --> FsDiff.mountProbePodmanFolder +``` + +#### Usage example (Go) + +```go +// Minimal example invoking FsDiff.mountProbePodmanFolder +f := &FsDiff{ /* assume clientHolder and other fields are initialized */ } +if err := f.mountProbePodmanFolder(); err != nil { + log.Fatalf("Failed to mount probe podman folder: %v", err) +} +``` + +--- + +### FsDiff.runPodmanDiff + +**runPodmanDiff** - Runs `podman diff --format json` for the specified container and returns its JSON output. Handles both system‑wide and custom Podman binaries. + +#### Signature (Go) + +```go +func (f *FsDiff) runPodmanDiff(containerUID string) (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs `podman diff --format json` for the specified container and returns its JSON output. Handles both system‑wide and custom Podman binaries. | +| **Parameters** | *containerUID* string – identifier of the target container. | +| **Return value** | *output* string – JSON diff result; *error* if command execution or output parsing fails. | +| **Key dependencies** | • `fmt.Sprintf` (formatting path)
• `clientHolder.ExecCommandContainer` (remote command execution)
• `fmt.Errorf` (error wrapping) | +| **Side effects** | No state mutation; performs I/O by executing a remote shell command and capturing stdout/stderr. | +| **How it fits the package** | Provides the low‑level diff data that higher‑level methods (`RunTest`) parse to determine file system changes in a container. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Determine podmanPath"] --> B{"Use custom Podman?"} + B -- Yes --> C["Set path: tmpMountDestFolder/podman"] + B -- No --> D["Default path: podman"] + subgraph ExecuteCommand + E["Build command string"] --> F["clientHolder.ExecCommandContainer"] + end + G{"Check err"} -->|"non‑nil"| H["Return wrapped error"] + G --> I{"Check stderr"} + I -->|"non‑empty"| H + I -->|"empty"| J["Return stdout, nil"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_FsDiff.runPodmanDiff --> fmt.Sprintf + func_FsDiff.runPodmanDiff --> clientHolder.ExecCommandContainer + func_FsDiff.runPodmanDiff --> fmt.Errorf +``` + +#### Functions calling `FsDiff.runPodmanDiff` (Mermaid) + +```mermaid +graph TD + FsDiff.RunTest --> func_FsDiff.runPodmanDiff +``` + +#### Usage example (Go) + +```go +// Minimal example invoking FsDiff.runPodmanDiff +f := &FsDiff{useCustomPodman: false, clientHolder: someClient} +output, err := f.runPodmanDiff("my-container-id") +if err != nil { + log.Fatalf("podman diff failed: %v", err) +} +fmt.Println("Podman diff output:", output) +``` + +--- + +### FsDiff.unmountCustomPodman + +**unmountCustomPodman** - Detaches the host mount point that was created for the custom Podman instance and removes its temporary directory. It is invoked automatically when a test finishes using a custom Podman binary. + +#### Signature (Go) + +```go +func (f *FsDiff) unmountCustomPodman() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Detaches the host mount point that was created for the custom Podman instance and removes its temporary directory. It is invoked automatically when a test finishes using a custom Podman binary. | +| **Parameters** | `f *FsDiff` – receiver holding test state, logger, error accumulator, and configuration flags. | +| **Return value** | None (the function updates the receiver’s fields instead of returning values). | +| **Key dependencies** | • `LogInfo` – logs progress.
• `unmountProbePodmanFolder` – executes `umount` on the mount point.
• `deleteNodeFolder` – removes the temporary directory. | +| **Side effects** | *I/O: runs system commands to unmount and delete a directory.
* State mutation: sets `f.Error` and `f.result` if any step fails.
* Logging output via `f.check`. | +| **How it fits the package** | Part of the cleanup routine for tests that install a custom Podman binary (`FsDiff.installCustomPodman`). It ensures that resources allocated during the test are released, preventing interference with subsequent tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Unmount probe folder"} + B -- success --> C["Delete temporary folder"] + B -- failure --> D["Set error & result to ERROR"] + C -- success --> E["End"] + C -- failure --> F["Set error & result to ERROR"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + FsDiff.unmountCustomPodman --> LogInfo + FsDiff.unmountCustomPodman --> FsDiff.unmountProbePodmanFolder + FsDiff.unmountCustomPodman --> FsDiff.deleteNodeFolder +``` + +#### Functions calling `FsDiff.unmountCustomPodman` (Mermaid) + +```mermaid +graph TD + FsDiff.RunTest --> FsDiff.unmountCustomPodman +``` + +#### Usage example (Go) + +```go +// Minimal example invoking unmountCustomPodman as part of a test run. +func example() { + diff := &FsDiff{useCustomPodman: true} + // ... install custom podman, run tests ... + defer diff.unmountCustomPodman() +} +``` + +--- + +### FsDiff.unmountProbePodmanFolder + +**unmountProbePodmanFolder** - Executes an `umount` command inside the target container to detach the temporary probe directory (`nodeTmpMountFolder`). + +#### Signature (Go) + +```go +func (f *FsDiff) unmountProbePodmanFolder() error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes an `umount` command inside the target container to detach the temporary probe directory (`nodeTmpMountFolder`). | +| **Parameters** | None. The method relies on receiver fields such as `f.clientHolder`, `f.ctxt`, and the global constant `nodeTmpMountFolder`. | +| **Return value** | `error` – wrapped with contextual information if the command fails or produces unexpected output. | +| **Key dependencies** | • `FsDiff.execCommandContainer`
• `fmt.Sprintf` (twice) | +| **Side effects** | Performs a container‑injected system call that may modify filesystem state inside the container; returns error on failure. | +| **How it fits the package** | Used during cleanup in `unmountCustomPodman`, ensuring the probe mount does not persist after tests complete. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Prepare umount command"} + B --> C["fmt.Sprintf(umount %s, nodeTmpMountFolder)"] + C --> D["execCommandContainer(command, errorMessage)"] + D --> E["Return result"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_FsDiff.unmountProbePodmanFolder --> func_FsDiff.execCommandContainer + func_FsDiff.unmountProbePodmanFolder --> fmt.Sprintf +``` + +#### Functions calling `FsDiff.unmountProbePodmanFolder` (Mermaid) + +```mermaid +graph TD + func_FsDiff.unmountCustomPodman --> func_FsDiff.unmountProbePodmanFolder +``` + +#### Usage example (Go) + +```go +// Minimal example invoking FsDiff.unmountProbePodmanFolder +fd := &FsDiff{ /* initialize fields as required */ } +if err := fd.unmountProbePodmanFolder(); err != nil { + fmt.Printf("Failed to unmount probe folder: %v\n", err) +} +``` + +--- + +--- + +### shouldUseCustomPodman + +**shouldUseCustomPodman** - Decides whether the probe should run with a custom‑compiled podman binary or rely on the preinstalled podman that ships with OpenShift nodes. The decision is based on the supplied OpenShift (OCP) version. + +#### Signature (Go) + +```go +func shouldUseCustomPodman(check *checksdb.Check, ocpVersion string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Decides whether the probe should run with a custom‑compiled podman binary or rely on the preinstalled podman that ships with OpenShift nodes. The decision is based on the supplied OpenShift (OCP) version. | +| **Parameters** | `check *checksdb.Check` – logger and context object for reporting;
`ocpVersion string` – semantic version string of the target OCP cluster. | +| **Return value** | `bool` – `true` if the custom podman should be used, otherwise `false`. | +| **Key dependencies** | • `semver.NewVersion` – parses the version string.
• `check.LogError` – logs parsing failures.
• `version.Major`, `version.Minor` – semantic‑version helpers. | +| **Side effects** | Emits an error log when the OCP version cannot be parsed; otherwise purely computational. No external I/O or state changes. | +| **How it fits the package** | Used by `NewFsDiffTester` to configure a filesystem diff test runner with the appropriate podman binary, ensuring compatibility across different RHEL‑based OpenShift releases. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Parse ocpVersion"} + B -- success --> C{"Major > 4?"} + B -- failure --> D["Log error, use preinstalled podman"] + C -- yes --> D + C -- no --> E{"Major == 4?"} + E -- yes --> F{"Minor < 13?"} + F -- yes --> G["Use custom podman (return true)"] + F -- no --> D + E -- no --> D + D --> H["Return false"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_shouldUseCustomPodman --> func_NewVersion + func_shouldUseCustomPodman --> func_LogError + func_shouldUseCustomPodman --> func_Major + func_shouldUseCustomPodman --> func_Minor +``` + +#### Functions calling `shouldUseCustomPodman` + +```mermaid +graph TD + func_NewFsDiffTester --> func_shouldUseCustomPodman +``` + +#### Usage example (Go) + +```go +// Minimal example invoking shouldUseCustomPodman +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/cnffsdiff" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/checksdb" +) + +// Assume we have a check instance and an OCP version string. +var check *checksdb.Check +ocpVersion := "4.12.3" + +useCustom := cnffsdiff.shouldUseCustomPodman(check, ocpVersion) +if useCustom { + fmt.Println("Running with custom podman binary") +} else { + fmt.Println("Using preinstalled podman on the node") +} +``` + +--- + +--- diff --git a/docs/tests/platform/hugepages/hugepages.md b/docs/tests/platform/hugepages/hugepages.md new file mode 100644 index 000000000..c45e640bb --- /dev/null +++ b/docs/tests/platform/hugepages/hugepages.md @@ -0,0 +1,926 @@ +# Package hugepages + +**Path**: `tests/platform/hugepages` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [Tester](#tester) +- [Exported Functions](#exported-functions) + - [NewTester](#newtester) + - [Tester.HasMcSystemdHugepagesUnits](#tester.hasmcsystemdhugepagesunits) + - [Tester.Run](#tester.run) + - [Tester.TestNodeHugepagesWithKernelArgs](#tester.testnodehugepageswithkernelargs) + - [Tester.TestNodeHugepagesWithMcSystemd](#tester.testnodehugepageswithmcsystemd) + - [hugepagesByNuma.String](#hugepagesbynuma.string) +- [Local Functions](#local-functions) + - [Tester.getNodeNumaHugePages](#tester.getnodenumahugepages) + - [getMcHugepagesFromMcKernelArguments](#getmchugepagesfrommckernelarguments) + - [getMcSystemdUnitsHugepagesConfig](#getmcsystemdunitshugepagesconfig) + - [hugepageSizeToInt](#hugepagesizetoint) + - [logMcKernelArgumentsHugepages](#logmckernelargumentshugepages) + +## Overview + +Manages validation of huge‑page configuration on a node against the MachineConfig used in the cluster, supporting both kernel argument and systemd unit mechanisms. + +### Key Features + +- Parses MachineConfig kernel arguments or systemd unit files to extract expected huge‑page sizes, counts and NUMA mapping +- Executes commands inside a probe pod to read actual per‑NUMA huge‑page allocation on the node +- Compares expected versus observed values and reports mismatches via structured logging + +### Design Notes + +- Assumes MachineConfig is present in the pod’s filesystem and that kernel arguments follow a specific pattern; errors are surfaced as fmt.Errorf +- The comparison logic treats missing entries as zero, ensuring strict alignment with defaults +- Best practice: run Tester.Run after node has finished booting so systemd units are active + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**Tester**](#tester) | One‑line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func NewTester(node *provider.Node, probePod *corev1.Pod, commander clientsholder.Command) (*Tester, error)](#newtester) | Instantiates a `Tester` that gathers huge‑page information from a node and its MachineConfig. | +| [func (tester *Tester) HasMcSystemdHugepagesUnits() bool](#tester.hasmcsystemdhugepagesunits) | Returns `true` if the tester has at least one Systemd hugepage unit mapped to a NUMA node, otherwise `false`. | +| [func (tester *Tester) Run() error](#tester.run) | Orchestrates comparison of MachineConfig (MC) huge‑page settings with the node’s actual configuration, choosing between Systemd units or kernel arguments based on availability. | +| [func (tester *Tester) TestNodeHugepagesWithKernelArgs() (bool, error)](#tester.testnodehugepageswithkernelargs) | Validates that the hugepage sizes and counts declared in a node’s `MachineConfig` kernel arguments match the actual hugepage allocation observed on the node. For each size present in the kernel arguments, the sum of node‑level allocations must equal the specified count; other sizes should have zero allocation. | +| [func (tester *Tester) TestNodeHugepagesWithMcSystemd() (bool, error)](#tester.testnodehugepageswithmcsystemd) | Validates that each node‑specific hugepage size and count matches the MachineConfig systemd units; ensures missing entries are zeroed. | +| [func (numaHps hugepagesByNuma) String() string](#hugepagesbynuma.string) | Formats `hugepagesByNuma` as a string where each NUMA node is listed with its page sizes and counts, sorted by node ID. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func (tester *Tester) getNodeNumaHugePages() (hugepages hugepagesByNuma, err error)](#tester.getnodenumahugepages) | Reads the node’s current hugepage allocation per NUMA node by executing a command inside the probe pod and parses its output. | +| [func getMcHugepagesFromMcKernelArguments(mc *provider.MachineConfig) (hugepagesPerSize map[int]int, defhugepagesz int)](#getmchugepagesfrommckernelarguments) | Parses kernel‑argument strings in a `MachineConfig` to build a mapping of hugepage size (in kB) → count, and returns the default hugepage size. | +| [func getMcSystemdUnitsHugepagesConfig(mc *provider.MachineConfig) (hugepagesByNuma, error)](#getmcsystemdunitshugepagesconfig) | Parses systemd unit files in a MachineConfig to extract huge‑page count, size, and NUMA node information. | +| [func hugepageSizeToInt(s string) int](#hugepagesizetoint) | Parses a string such as `"1M"` or `"2G"` and returns the size in kilobytes. The function supports megabyte (`'M'`) and gigabyte (`'G'`) units, converting them to the appropriate number of kilobytes. | +| [func logMcKernelArgumentsHugepages(hugepagesPerSize map[int]int, defhugepagesz int) {}](#logmckernelargumentshugepages) | Formats and logs the hugepage size‑to‑count mapping along with the default hugepage size extracted from a MachineConfig’s kernel arguments. | + +## Structs + +### Tester + +The `Tester` struct orchestrates validation of a node’s hugepage configuration against the corresponding MachineConfig specifications (either via kernel arguments or systemd unit files). + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `node` | `*provider.Node` | Reference to the target node being tested. | +| `context` | `clientsholder.Context` | Execution context for running commands inside the probe pod on the node. | +| `commander` | `clientsholder.Command` | Interface used to execute shell commands on the node. | +| `nodeHugepagesByNuma` | `hugepagesByNuma` | Map of NUMA index → (hugepage size → count) representing actual node values obtained via `getNodeNumaHugePages`. | +| `mcSystemdHugepagesByNuma` | `hugepagesByNuma` | Map of NUMA index → (hugepage size → count) parsed from the MachineConfig’s systemd unit files. | + +#### Purpose + +`Tester` encapsulates all data and logic required to compare a node’s runtime hugepage allocation against what is declared in its MachineConfig. It supports two comparison modes: + +1. **Kernel arguments** – when the MachineConfig only specifies hugepages via `kernelArguments`. +2. **Systemd units** – when the MachineConfig declares per‑NUMA hugepage settings through systemd unit files. + +The struct provides methods to perform the comparison, report mismatches, and return a success flag or detailed error information. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NewTester` | Constructs a `Tester`, populating node data, context, commander, and loading both node‑side and MachineConfig‑side hugepage configurations. | +| `HasMcSystemdHugepagesUnits` | Indicates whether the MachineConfig contains systemd unit entries for hugepages (used to choose comparison strategy). | +| `Run` | Executes the appropriate validation routine (`TestNodeHugepagesWithKernelArgs` or `TestNodeHugepagesWithMcSystemd`) and returns any errors. | +| `TestNodeHugepagesWithKernelArgs` | Compares node NUMA‑level hugepage counts against MachineConfig kernel arguments, ensuring sizes match globally. | +| `TestNodeHugepagesWithMcSystemd` | Performs a detailed per‑NUMA and per‑size comparison between node values and MachineConfig systemd unit specifications. | +| `getNodeNumaHugePages` | Helper that runs a command on the node to retrieve actual hugepage counts per NUMA node, populating `nodeHugepagesByNuma`. | + +--- + +--- + +## Exported Functions + +### NewTester + +**NewTester** - Instantiates a `Tester` that gathers huge‑page information from a node and its MachineConfig. + +#### Signature (Go) + +```go +func NewTester(node *provider.Node, probePod *corev1.Pod, commander clientsholder.Command) (*Tester, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates a `Tester` that gathers huge‑page information from a node and its MachineConfig. | +| **Parameters** | `node *provider.Node` – target node;
`probePod *corev1.Pod` – probe pod running on the node;
`commander clientsholder.Command` – command executor interface. | +| **Return value** | `(*Tester, error)` – a fully initialised tester or an error if data collection fails. | +| **Key dependencies** | • `clientsholder.NewContext` – builds execution context.
• `tester.getNodeNumaHugePages()` – fetches node‑level huge‑page stats.
• `getMcSystemdUnitsHugepagesConfig(&tester.node.Mc)` – parses MachineConfig for huge‑page config. | +| **Side effects** | Logs progress via the package logger; performs remote command execution inside the probe pod. No global state is modified. | +| **How it fits the package** | Serves as a factory function used by test harnesses (e.g., `testHugepages`) to create per‑node testers that validate huge‑page compliance. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Create Tester"} + B --> C["Build context"] + C --> D["Get node huge pages"] + D -->|"error?"| E["Return error"] + D --> F["Parse MachineConfig"] + F -->|"error?"| E + F --> G["Return tester"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_NewTester --> func_NewContext + func_NewTester --> func_getNodeNumaHugePages + func_NewTester --> func_getMcSystemdUnitsHugepagesConfig +``` + +#### Functions calling `NewTester` + +```mermaid +graph TD + func_testHugepages --> func_NewTester +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewTester +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" + corev1 "k8s.io/api/core/v1" +) + +func example() error { + // Assume node, pod, and command executor are already available + var node *provider.Node + var probePod *corev1.Pod + commander := clientsholder.GetClientsHolder() + + tester, err := hugepages.NewTester(node, probePod, commander) + if err != nil { + return err + } + // Run the test logic + return tester.Run() +} +``` + +--- + +### Tester.HasMcSystemdHugepagesUnits + +**HasMcSystemdHugepagesUnits** - Returns `true` if the tester has at least one Systemd hugepage unit mapped to a NUMA node, otherwise `false`. + +Checks whether the tester has any MachineConfig Systemd hugepage units collected for NUMA nodes. + +#### Signature (Go) + +```go +func (tester *Tester) HasMcSystemdHugepagesUnits() bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` if the tester has at least one Systemd hugepage unit mapped to a NUMA node, otherwise `false`. | +| **Parameters** | None. | +| **Return value** | `bool` – presence of Systemd hugepage units (`>0`). | +| **Key dependencies** | • `len` (built‑in function)
• `tester.mcSystemdHugepagesByNuma` (map field) | +| **Side effects** | None. Purely reads internal state. | +| **How it fits the package** | Determines which path of hugepage comparison logic to execute in `Tester.Run`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"len(tester.mcSystemdHugepagesByNuma) > 0"} + B -- Yes --> C["Return true"] + B -- No --> D["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Tester.HasMcSystemdHugepagesUnits --> len +``` + +#### Functions calling `Tester.HasMcSystemdHugepagesUnits` (Mermaid) + +```mermaid +graph TD + func_Tester.Run --> func_Tester.HasMcSystemdHugepagesUnits +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Tester.HasMcSystemdHugepagesUnits +func main() { + tester := &Tester{} + // Assume mcSystemdHugepagesByNuma is initialized elsewhere + if tester.HasMcSystemdHugepagesUnits() { + fmt.Println("MachineConfig Systemd hugepage units present.") + } else { + fmt.Println("No MachineConfig Systemd hugepage units found.") + } +} +``` + +--- + +### Tester.Run + +**Run** - Orchestrates comparison of MachineConfig (MC) huge‑page settings with the node’s actual configuration, choosing between Systemd units or kernel arguments based on availability. + +#### Signature (Go) + +```go +func (tester *Tester) Run() error +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Orchestrates comparison of MachineConfig (MC) huge‑page settings with the node’s actual configuration, choosing between Systemd units or kernel arguments based on availability. | +| **Parameters** | *none* – operates on the receiver `tester`. | +| **Return value** | `error` – non‑nil if any validation fails; otherwise nil. | +| **Key dependencies** | • `Tester.HasMcSystemdHugepagesUnits()`
• `log.Info` (internal logger)
• `Tester.TestNodeHugepagesWithMcSystemd()`
• `Tester.TestNodeHugepagesWithKernelArgs()`
• `fmt.Errorf` | +| **Side effects** | Emits informational logs; may return an error that includes diagnostic messages. No state mutation occurs beyond logging. | +| **How it fits the package** | Entry point for huge‑page validation tests in the `hugepages` package; decides which comparison routine to invoke based on MC configuration. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Tester.Run --> HasMcSystemdHugepagesUnits + HasMcSystemdHugepagesUnits -- true --> log.Info("Comparing MachineConfig Systemd hugepages info against node values.") + HasMcSystemdHugepagesUnits -- true --> TestNodeHugepagesWithMcSystemd + TestNodeHugepagesWithMcSystemd -- pass=false --> fmt.Errorf + HasMcSystemdHugepagesUnits -- false --> log.Info("Comparing MC KernelArguments hugepages info against node values.") + HasMcSystemdHugepagesUnits -- false --> TestNodeHugepagesWithKernelArgs + TestNodeHugepagesWithKernelArgs -- pass=false --> fmt.Errorf + Tester.Run --> nil +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Tester_Run --> func_Tester_HasMcSystemdHugepagesUnits + func_Tester_Run --> func_log_Info + func_Tester_Run --> func_Tester_TestNodeHugepagesWithMcSystemd + func_Tester_Run --> func_Tester_TestNodeHugepagesWithKernelArgs + func_Tester_Run --> func_fmt_Errorf +``` + +#### Functions calling `Tester.Run` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking Tester.Run +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages" +) + +func main() { + tester := &hugepages.Tester{ + // Populate tester fields as required for the test environment. + } + if err := tester.Run(); err != nil { + fmt.Printf("Huge‑page validation failed: %v\n", err) + } else { + fmt.Println("All huge‑page checks passed.") + } +} +``` + +--- + +### Tester.TestNodeHugepagesWithKernelArgs + +**TestNodeHugepagesWithKernelArgs** - Validates that the hugepage sizes and counts declared in a node’s `MachineConfig` kernel arguments match the actual hugepage allocation observed on the node. For each size present in the kernel arguments, the sum of node‑level allocations must equal the specified count; other sizes should have zero allocation. + +#### 1) Signature (Go) + +```go +func (tester *Tester) TestNodeHugepagesWithKernelArgs() (bool, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates that the hugepage sizes and counts declared in a node’s `MachineConfig` kernel arguments match the actual hugepage allocation observed on the node. For each size present in the kernel arguments, the sum of node‑level allocations must equal the specified count; other sizes should have zero allocation. | +| **Parameters** | `tester *Tester` – receiver containing node information and precomputed node‑by‑NUMA hugepage data (`nodeHugepagesByNuma`). | +| **Return value** | `bool, error` – `true` if all checks pass; otherwise `false` with an explanatory error. | +| **Key dependencies** | • `getMcHugepagesFromMcKernelArguments(&tester.node.Mc)` – parses kernel arguments into a map of size→count.
• `log.Info` – records successful matches.
• `fmt.Errorf` – constructs detailed failure messages. | +| **Side effects** | Emits informational logs; no state mutation occurs. | +| **How it fits the package** | Part of the *hugepages* test suite, invoked by `Tester.Run()` to verify that a node’s runtime hugepage configuration aligns with its MachineConfig specification. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Parse kernel arguments"] --> B["Validate each node NUMA size exists"] + B --> C{"All sizes valid?"} + C -- Yes --> D["Sum node counts per size"] + D --> E{"Totals match kernel args?"} + E -- Yes --> F["Return success"] + E -- No --> G["Return error"] + C -- No --> G +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_Tester.TestNodeHugepagesWithKernelArgs --> func_getMcHugepagesFromMcKernelArguments + func_Tester.TestNodeHugepagesWithKernelArgs --> func_log.Info + func_Tester.TestNodeHugepagesWithKernelArgs --> func_fmt.Errorf +``` + +#### 5) Functions calling `Tester.TestNodeHugepagesWithKernelArgs` (Mermaid) + +```mermaid +graph TD + func_TestRunner.Run --> func_Tester.TestNodeHugepagesWithKernelArgs +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking Tester.TestNodeHugepagesWithKernelArgs +tester := &Tester{node: nodeInfo, nodeHugepagesByNuma: numaMap} +pass, err := tester.TestNodeHugepagesWithKernelArgs() +if !pass { + log.Fatalf("hugepage test failed: %v", err) +} +fmt.Println("Hugepage configuration matches kernel arguments.") +``` + +--- + +### Tester.TestNodeHugepagesWithMcSystemd + +**TestNodeHugepagesWithMcSystemd** - Validates that each node‑specific hugepage size and count matches the MachineConfig systemd units; ensures missing entries are zeroed. + +Compare the hugepage configuration reported by a node against the values defined in its MachineConfig systemd units. + +--- + +#### Signature (Go) + +```go +func (tester *Tester) TestNodeHugepagesWithMcSystemd() (bool, error) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates that each node‑specific hugepage size and count matches the MachineConfig systemd units; ensures missing entries are zeroed. | +| **Parameters** | `tester *Tester` – receiver containing cached node and MC hugepage maps (`nodeHugepagesByNuma`, `mcSystemdHugepagesByNuma`). | +| **Return value** | `bool, error` – `true` if all comparisons succeed; otherwise `false` with a descriptive error. | +| **Key dependencies** | • `log.Warn(msg string, args ...any)` (internal logger)
• `fmt.Errorf(format string, args ...interface{})` (error formatting) | +| **Side effects** | Emits warning logs when a NUMA node exists in the node but not in MC; otherwise only returns errors. No state mutation occurs. | +| **How it fits the package** | Part of the hugepages test suite that checks consistency between node runtime and MachineConfig specifications before reporting success to higher‑level orchestrators. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Iterate over nodeHugepagesByNuma"] --> B{"Check NUMA exists in MC"} + B -- No --> C["Warn: missing NUMA"] + C --> D["Verify all node counts are zero"] + B -- Yes --> E["For each size in node"] + E --> F{"Size exists in MC & non‑zero count?"} + F -- True --> G["Return error"] + F -- False --> H["Continue"] + H --> I["Iterate over mcSystemdHugepagesByNuma"] + I --> J{"NUMA exists in node?"} + J -- No --> K["Return error"] + J -- Yes --> L["For each size in MC"] + L --> M{"Size exists in node?"} + M -- No --> N["Return error"] + M -- Yes --> O{"Counts match?"} + O -- No --> P["Return error"] + O -- Yes --> Q["Continue"] + Q --> R["All checks passed"] + R --> S["Return true, nil"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_Tester.TestNodeHugepagesWithMcSystemd --> func_log.Warn + func_Tester.TestNodeHugepagesWithMcSystemd --> func_fmt.Errorf +``` + +--- + +#### Functions calling `Tester.TestNodeHugepagesWithMcSystemd` (Mermaid) + +```mermaid +graph TD + func_Tester.Run --> func_Tester.TestNodeHugepagesWithMcSystemd +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking Tester.TestNodeHugepagesWithMcSystemd +tester := &Tester{ + nodeHugepagesByNuma: map[int]map[uint64]int{0: {2048: 4}}, + mcSystemdHugepagesByNuma: map[int]map[uint64]int{0: {2048: 4}}, +} +pass, err := tester.TestNodeHugepagesWithMcSystemd() +if !pass { + fmt.Printf("Hugepage mismatch: %v\n", err) +} else { + fmt.Println("All hugepage settings match.") +} +``` + +--- + +### hugepagesByNuma.String + +**String** - Formats `hugepagesByNuma` as a string where each NUMA node is listed with its page sizes and counts, sorted by node ID. + +Provides a human‑readable string representation of the mapping from NUMA node IDs to page size counts, used for debugging and logging. + +```go +func (numaHps hugepagesByNuma) String() string +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Formats `hugepagesByNuma` as a string where each NUMA node is listed with its page sizes and counts, sorted by node ID. | +| **Parameters** | None (receiver: `numaHps hugepagesByNuma`) | +| **Return value** | `string` – concatenated representation of all nodes and their size/count pairs. | +| **Key dependencies** | *`sort.Ints`* – sorts NUMA indexes.
*`strings.Builder`* – efficient string construction.
*`fmt.Sprintf`* – formatting each component. | +| **Side effects** | None; pure function that only reads the receiver. | +| **How it fits the package** | Implements `Stringer` for `hugepagesByNuma`, enabling concise logging of hugepage distributions across NUMA nodes. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Collect numaIndexes"] + B --> C["Sort indexes"] + C --> D["Iterate over sorted indexes"] + D --> E["Append node header"] + E --> F["Iterate sizeCounts"] + F --> G["Append size/count pair"] + G --> H["Continue loop"] + H --> I["Return string builder contents"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_hugepagesByNuma.String --> sort.Ints + func_hugepagesByNuma.String --> strings.Builder + func_hugepagesByNuma.String --> fmt.Sprintf +``` + +#### Functions calling `hugepagesByNuma.String` (Mermaid) + +```mermaid +graph TD + func_logMcKernelArgumentsHugepages --> func_hugepagesByNuma.String +``` + +#### Usage example (Go) + +```go +// Minimal example invoking hugepagesByNuma.String +var numaPages = hugepagesByNuma{ + 0: {2048: 10, 1048576: 2}, + 1: {2048: 5}, +} +fmt.Println(numaPages.String()) +// Output: Numa=0 [Size=2048kB Count=10] [Size=1048576kB Count=2] Numa=1 [Size=2048kB Count=5] +``` + +--- + +## Local Functions + +### Tester.getNodeNumaHugePages + +**getNodeNumaHugePages** - Reads the node’s current hugepage allocation per NUMA node by executing a command inside the probe pod and parses its output. + +#### Signature (Go) + +```go +func (tester *Tester) getNodeNumaHugePages() (hugepages hugepagesByNuma, err error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads the node’s current hugepage allocation per NUMA node by executing a command inside the probe pod and parses its output. | +| **Parameters** | `tester *Tester` – receiver holding node, commander, and context information. | +| **Return value** | `hugepagesByNuma` – map of NUMA node to hugepage size/count pairs; `error` on failure to execute command or parse output. | +| **Key dependencies** | • `ExecCommandContainer` (runs shell command inside pod)
• `log.Debug`, `log.Info` (logging)
• `regexp.MustCompile`, `strings.Split`, `strconv.Atoi` (parsing)
• `errors.New`, `fmt.Errorf` (error handling) | +| **Side effects** | No state mutation; only I/O via command execution and logging. | +| **How it fits the package** | Provides the baseline hugepage configuration used by tests that compare desired vs actual settings on a Kubernetes node. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Execute command inside probe pod"] --> B{"Command succeeded?"} + B -- Yes --> C["Split stdout into lines"] + B -- No --> D["Return error"] + C --> E{"Non‑empty line?"} + E -- Yes --> F["Regex match"] + F --> G{"Match found?"} + G -- Yes --> H["Parse NUMA, size, count"] + H --> I["Update map"] + G -- No --> J["Error: parse failure"] + I --> K["Log result"] + K --> L["Return map"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_Tester.getNodeNumaHugePages --> func_ExecCommandContainer + func_Tester.getNodeNumaHugePages --> func_log.Debug + func_Tester.getNodeNumaHugePages --> func_regexp.MustCompile + func_Tester.getNodeNumaHugePages --> func_strings.Split + func_Tester.getNodeNumaHugePages --> func_strconv.Atoi + func_Tester.getNodeNumaHugePages --> func_errors.New + func_Tester.getNodeNumaHugePages --> func_fmt.Errorf + func_Tester.getNodeNumaHugePages --> func_log.Info +``` + +#### Functions calling `Tester.getNodeNumaHugePages` + +```mermaid +graph TD + func_NewTester --> func_Tester.getNodeNumaHugePages +``` + +#### Usage example (Go) + +```go +// Minimal example invoking Tester.getNodeNumaHugePages +tester, err := NewTester(node, probePod, commander) +if err != nil { + log.Fatalf("cannot create tester: %v", err) +} +hugepages, err := tester.getNodeNumaHugePages() +if err != nil { + log.Fatalf("failed to get hugepages: %v", err) +} +fmt.Printf("Hugepage config: %+v\n", hugepages) +``` + +--- + +### getMcHugepagesFromMcKernelArguments + +**getMcHugepagesFromMcKernelArguments** - Parses kernel‑argument strings in a `MachineConfig` to build a mapping of hugepage size (in kB) → count, and returns the default hugepage size. + +#### Signature (Go) + +```go +func getMcHugepagesFromMcKernelArguments(mc *provider.MachineConfig) (hugepagesPerSize map[int]int, defhugepagesz int) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses kernel‑argument strings in a `MachineConfig` to build a mapping of hugepage size (in kB) → count, and returns the default hugepage size. | +| **Parameters** | `mc *provider.MachineConfig` – machine configuration containing kernel arguments. | +| **Return value** | `hugepagesPerSize map[int]int` – map from page size to count; `defhugepagesz int` – default hugepage size in kB. | +| **Key dependencies** | • `strings.Split`, `len`
• `strconv.Atoi`
• helper `hugepageSizeToInt` (parses `"1G"` → 1048576)
• logging via `log.Warn`, `logMcKernelArgumentsHugepages` | +| **Side effects** | Emits warning logs if no hugepage info is found; logs parsed configuration. No state mutation outside its local variables. | +| **How it fits the package** | Provides a central routine for tests that need to validate kernel‑argument based hugepage settings against node reports. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over mc.Spec.KernelArguments"} + B -->|"Parse key=value"| C["Split argument"] + C --> D{"Key is hugepages && value not empty"} + D -->|"Add count to map"| E["Update hugepagesPerSize for current size"] + D -->|"Else use default size"| F["Use RhelDefaultHugepagesz"] + B --> G{"Key is hugepagesz"} + G -->|"Set current size"| H["Convert value → int via hugepageSizeToInt"] + H --> I["Create map entry with count = 0"] + B --> J{"Key is default_hugepagesz"} + J -->|"Set default size"| K["Convert value → int"] + K --> L["Overwrite map entry for default size"] + B --> M["Continue loop"] + M --> B + B --> N{"Map empty?"} + N -->|"Yes"| O["Insert RhelDefaultHugepagesz→RhelDefaultHugepages"] + N -->|"No"| Q["Proceed normally"] + O --> P["Warn log"] + Q --> S["Log parsed config via logMcKernelArgumentsHugepages"] + S --> T["Return map, default size"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getMcHugepagesFromMcKernelArguments --> strings_Split + func_getMcHugepagesFromMcKernelArguments --> len + func_getMcHugepagesFromMcKernelArguments --> strconv_Atoi + func_getMcHugepagesFromMcKernelArguments --> hugepageSizeToInt + func_getMcHugepagesFromMcKernelArguments --> log_Warn + func_getMcHugepagesFromMcKernelArguments --> logMcKernelArgumentsHugepages +``` + +#### Functions calling `getMcHugepagesFromMcKernelArguments` (Mermaid) + +```mermaid +graph TD + func_TestNodeHugepagesWithKernelArgs --> func_getMcHugepagesFromMcKernelArguments +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getMcHugepagesFromMcKernelArguments +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func main() { + // Assume cfg is a populated provider.MachineConfig + var cfg *provider.MachineConfig + + hugepages, defaultSize := getMcHugepagesFromMcKernelArguments(cfg) + fmt.Printf("Default size: %d kB\n", defaultSize) + for sz, cnt := range hugepages { + fmt.Printf("Size %d kB → count %d\n", sz, cnt) + } +} +``` + +--- + +### getMcSystemdUnitsHugepagesConfig + +**getMcSystemdUnitsHugepagesConfig** - Parses systemd unit files in a MachineConfig to extract huge‑page count, size, and NUMA node information. + +#### 1) Signature (Go) + +```go +func getMcSystemdUnitsHugepagesConfig(mc *provider.MachineConfig) (hugepagesByNuma, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses systemd unit files in a MachineConfig to extract huge‑page count, size, and NUMA node information. | +| **Parameters** | `mc *provider.MachineConfig` – the machine configuration containing systemd units. | +| **Return value** | `hugepagesByNuma` – a map keyed by NUMA node with values mapping page sizes to counts; `error` if parsing fails. | +| **Key dependencies** | • `regexp.MustCompile`, `strings.Trim`, `strings.Contains`, `log.Logger.Info`,
• `strconv.Atoi`,
• `fmt.Errorf` | +| **Side effects** | Logs progress and errors via the internal logger; no mutation of the input MachineConfig. | +| **How it fits the package** | Provides a helper for the huge‑pages tester to compare node‑level values with those declared in MachineConfig. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CompileRegex["Compile regex"] + CompileRegex --> LoopUnits{"Iterate over mc.Config.Systemd.Units"} + LoopUnits -->|"unit.Name contains hugepages-allocation"| ProcessUnit + LoopUnits -->|"otherwise"| SkipUnit + ProcessUnit --> TrimName["Trim unit name"] + ProcessUnit --> ContainsCheck["Check for hugepages string"] + ContainsCheck --> LogInfo["Log unit details"] + LogInfo --> TrimContent["Trim unit contents"] + TrimContent --> ExtractValues["Find regex submatches"] + ExtractValues -->|"invalid"| ReturnError["Return fmt.Errorf"] + ExtractValues --> ParseInts["Parse NUMA, size, count"] + ParseInts --> UpdateMap{"Update hugepages map"} + UpdateMap --> LoopUnits + LoopUnits --> EndLoop{"All units processed?"} + EndLoop -->|"yes"| LogSummary["Log final map or missing info"] + LogSummary --> ReturnResult["Return hugepages, nil"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + getMcSystemdUnitsHugepagesConfig --> regexp.MustCompile + getMcSystemdUnitsHugepagesConfig --> strings.Trim + getMcSystemdUnitsHugepagesConfig --> strings.Contains + getMcSystemdUnitsHugepagesConfig --> log.Logger.Info + getMcSystemdUnitsHugepagesConfig --> strconv.Atoi + getMcSystemdUnitsHugepagesConfig --> fmt.Errorf +``` + +#### 5) Functions calling `getMcSystemdUnitsHugepagesConfig` (Mermaid) + +```mermaid +graph TD + NewTester --> getMcSystemdUnitsHugepagesConfig +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking getMcSystemdUnitsHugepagesConfig +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func main() { + // Assume mc is obtained elsewhere (e.g., from a MachineConfig CR) + var mc *provider.MachineConfig + hugepages, err := hugepages.getMcSystemdUnitsHugepagesConfig(mc) + if err != nil { + panic(err) + } + fmt.Printf("Parsed hugepages: %+v\n", hugepages) +} +``` + +--- + +### hugepageSizeToInt + +**hugepageSizeToInt** - Parses a string such as `"1M"` or `"2G"` and returns the size in kilobytes. The function supports megabyte (`'M'`) and gigabyte (`'G'`) units, converting them to the appropriate number of kilobytes. + +```go +func hugepageSizeToInt(s string) int +``` + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses a string such as `"1M"` or `"2G"` and returns the size in kilobytes. The function supports megabyte (`'M'`) and gigabyte (`'G'`) units, converting them to the appropriate number of kilobytes. | +| **Parameters** | `s string –` a numeric value followed by a single unit character (`M` or `G`). | +| **Return value** | `int –` size in kilobytes; returns 0 if parsing fails or an unsupported unit is supplied. | +| **Key dependencies** | • `strconv.Atoi` – converts the numeric part of the string to an integer.
• `len` – used twice: once to slice off the trailing unit character, and again to index that last character. | +| **Side effects** | None; pure function with no external state changes or I/O. | +| **How it fits the package** | This helper is used by other functions in the *hugepages* test suite (e.g., `getMcHugepagesFromMcKernelArguments`) to interpret kernel argument values that specify huge‑page sizes. It centralizes the unit conversion logic for consistency across tests. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Trim last char"} + B --> C["Convert numeric part"] + C --> D{"Unit = M or G"} + D -->|"M"| E["Multiply by 1024"] + D -->|"G"| F["Multiply by 1,048,576"] + D -->|"Other"| G["Return num (unchanged)"] + E & F & G --> H["Return result"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_hugepageSizeToInt --> func_Atoi + func_hugepageSizeToInt --> len + func_hugepageSizeToInt --> len +``` + +#### Functions calling `hugepageSizeToInt` + +```mermaid +graph TD + func_getMcHugepagesFromMcKernelArguments --> func_hugepageSizeToInt +``` + +#### Usage example (Go) + +```go +// Minimal example invoking hugepageSizeToInt +package main + +import ( + "fmt" +) + +func main() { + fmt.Println(hugepageSizeToInt("1M")) // 1024 + fmt.Println(hugepageSizeToInt("2G")) // 2097152 +} +``` + +--- + +### logMcKernelArgumentsHugepages + +**logMcKernelArgumentsHugepages** - Formats and logs the hugepage size‑to‑count mapping along with the default hugepage size extracted from a MachineConfig’s kernel arguments. + +#### Signature (Go) + +```go +func logMcKernelArgumentsHugepages(hugepagesPerSize map[int]int, defhugepagesz int) {} +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Formats and logs the hugepage size‑to‑count mapping along with the default hugepage size extracted from a MachineConfig’s kernel arguments. | +| **Parameters** | `hugepagesPerSize map[int]int` – mapping of hugepage size (kB) to its count.
`defhugepagesz int` – default hugepage size in kB. | +| **Return value** | None (side‑effect only). | +| **Key dependencies** | • `strings.Builder`
• `fmt.Sprintf`
• `log.Info` from the internal logging package | +| **Side effects** | Writes a formatted string to the global logger; no state mutation outside the log. | +| **How it fits the package** | Used by the hugepages test suite to record parsed kernel argument values for debugging and audit purposes. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Create strings.Builder"] + B --> C["Write header with default size"] + C --> D{"Iterate over hugepagesPerSize"} + D -->|"for each size,count"| E["Append , size=%dkB - count=%d"] + D --> F["End loop"] + F --> G["Log the built string via log.Info"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_logMcKernelArgumentsHugepages --> fmt.Sprintf + func_logMcKernelArgumentsHugepages --> strings.Builder + func_logMcKernelArgumentsHugepages --> internal/log.Logger.Info +``` + +#### Functions calling `logMcKernelArgumentsHugepages` (Mermaid) + +```mermaid +graph TD + func_getMcHugepagesFromMcKernelArguments --> func_logMcKernelArgumentsHugepages +``` + +#### Usage example (Go) + +```go +// Minimal example invoking logMcKernelArgumentsHugepages +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/hugepages" +) + +func main() { + // Example hugepage configuration: 2 sizes with counts + config := map[int]int{ + 2048: 4, // 2 MB pages, count 4 + 1048576: 1, // 1 GB page, count 1 + } + defaultSize := 2048 + + hugepages.logMcKernelArgumentsHugepages(config, defaultSize) +} +``` + +--- diff --git a/docs/tests/platform/isredhat/isredhat.md b/docs/tests/platform/isredhat/isredhat.md new file mode 100644 index 000000000..accea946e --- /dev/null +++ b/docs/tests/platform/isredhat/isredhat.md @@ -0,0 +1,366 @@ +# Package isredhat + +**Path**: `tests/platform/isredhat` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [BaseImageInfo](#baseimageinfo) +- [Exported Functions](#exported-functions) + - [BaseImageInfo.TestContainerIsRedHatRelease](#baseimageinfo.testcontainerisredhatrelease) + - [IsRHEL](#isrhel) + - [NewBaseImageTester](#newbaseimagetester) +- [Local Functions](#local-functions) + - [BaseImageInfo.runCommand](#baseimageinfo.runcommand) + +## Overview + +Provides utilities for determining whether a container image is an official Red Hat distribution by executing commands inside the image and parsing its release information. + +### Key Features + +- Executes shell snippets in containers to read `/etc/redhat-release` via a command executor +- Parses release text using precompiled regular expressions to detect official Red Hat releases +- Exposes a constructor that bundles client holder and context for testing images + +### Design Notes + +- Runs commands through an external client holder, assuming container access and `exec` capability +- Only checks `/etc/redhat-release`; non‑Red Hat images lacking this file will return false +- Typical usage: create a BaseImageInfo via NewBaseImageTester then call TestContainerIsRedHatRelease to assert image type + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**BaseImageInfo**](#baseimageinfo) | One-line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func (b *BaseImageInfo) TestContainerIsRedHatRelease() (bool, error)](#baseimageinfo.testcontainerisredhatrelease) | Executes a shell snippet inside the container to read `/etc/redhat-release` and determines if the image is an official Red Hat distribution. | +| [func IsRHEL(output string) bool](#isrhel) | Returns `true` if the supplied text matches patterns that identify an official Red Hat release; otherwise returns `false`. | +| [func NewBaseImageTester(client clientsholder.Command, ctx clientsholder.Context) *BaseImageInfo](#newbaseimagetester) | Instantiates a `BaseImageInfo` object that holds the necessary context and command executor for testing container base images. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func (b *BaseImageInfo) runCommand(cmd string) (string, error)](#baseimageinfo.runcommand) | Runs an arbitrary shell command in the target container via `ExecCommandContainer`, returning stdout or reporting errors. | + +## Structs + +### BaseImageInfo + +Represents a helper that executes commands inside an OpenShift container to discover the base image of a pod. + +#### Fields + +| Field | Type | Description | +|-------------|----------------------|-------------| +| ClientHolder | `clientsholder.Command` | Holds the client interface used to run shell commands in a container. | +| OCPContext | `clientsholder.Context` | Provides the execution context (e.g., namespace, pod name) for command runs. | + +#### Purpose + +The struct encapsulates the dependencies required to interrogate a running container. It is primarily used by test functions that need to determine whether the image inside a pod is based on Red Hat Enterprise Linux. By delegating command execution through `ClientHolder`, it abstracts away the underlying client implementation and keeps tests focused on logic rather than connection details. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NewBaseImageTester` | Constructs a new `BaseImageInfo` with the provided command executor and context. | +| `runCommand` | Executes an arbitrary shell command inside the target container, returning its output or error. | +| `TestContainerIsRedHatRelease` | Uses `runCommand` to read `/etc/redhat-release`, logs the result, and returns whether the image is RHEL‑based. | + +--- + +--- + +## Exported Functions + +### BaseImageInfo.TestContainerIsRedHatRelease + +**TestContainerIsRedHatRelease** - Executes a shell snippet inside the container to read `/etc/redhat-release` and determines if the image is an official Red Hat distribution. + +Checks whether a container’s base image is an official Red Hat release. + +--- + +#### Signature (Go) + +```go +func (b *BaseImageInfo) TestContainerIsRedHatRelease() (bool, error) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes a shell snippet inside the container to read `/etc/redhat-release` and determines if the image is an official Red Hat distribution. | +| **Parameters** | `b *BaseImageInfo` – receiver containing client and context information. | +| **Return value** | `bool` – true when the base image matches a known Red Hat release; `error` – any execution or parsing error. | +| **Key dependencies** | • `BaseImageInfo.runCommand` – runs the shell command inside the container.
• `log.Info` – logs command output.
• `IsRHEL` – evaluates the command result. | +| **Side effects** | No state mutation; only performs I/O by executing a command in the target container and logging the output. | +| **How it fits the package** | Part of the `isredhat` test suite, providing a reusable check for Red Hat base images used across other tests. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Run /etc/redhat-release command"} + B --> C["Capture output"] + C --> D{"Check error"} + D -- No --> E["Call IsRHEL(output)"] + D -- Yes --> F["Return false, err"] + E --> G["Return true/false, nil"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_BaseImageInfo.TestContainerIsRedHatRelease --> func_BaseImageInfo.runCommand + func_BaseImageInfo.TestContainerIsRedHatRelease --> func_log.Info + func_BaseImageInfo.TestContainerIsRedHatRelease --> func_IsRHEL +``` + +--- + +#### Functions calling `BaseImageInfo.TestContainerIsRedHatRelease` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking BaseImageInfo.TestContainerIsRedHatRelease +b := &isredhat.BaseImageInfo{ + ClientHolder: /* client implementation */, + OCPContext: /* context string */, +} +isRHEL, err := b.TestContainerIsRedHatRelease() +if err != nil { + log.Fatalf("error checking base image: %v", err) +} +fmt.Printf("Container is Red Hat release: %t\n", isRHEL) +``` + +--- + +### IsRHEL + +**IsRHEL** - Returns `true` if the supplied text matches patterns that identify an official Red Hat release; otherwise returns `false`. + +#### Signature (Go) + +```go +func IsRHEL(output string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` if the supplied text matches patterns that identify an official Red Hat release; otherwise returns `false`. | +| **Parameters** | `output string –` textual content of `/etc/redhat-release` or a similar source. | +| **Return value** | `bool – true if output indicates a Red Hat image, false otherwise.` | +| **Key dependencies** | • `regexp.MustCompile` (to compile regexes)
• `regexp.FindAllString` (to search matches)
• `len` (to count matches)
• `log.Logger.Info` (for debug logging) | +| **Side effects** | Emits informational logs via the package logger; no state mutation or I/O beyond logging. | +| **How it fits the package** | Part of the `isredhat` test helper package, used to validate container base images in the Certsuite platform tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Receive output string"] --> B{"Does output contain “Unknown Base Image”?"} + B -- Yes --> C["Return false"] + B -- No --> D["Compile red‑hat version regex"] + D --> E["Search for matches in output"] + E --> F{"Any matches found?"} + F -- Yes --> G["Return true"] + F -- No --> H["Return false"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_IsRHEL --> func_MustCompile1:::pkg_regexp + func_IsRHEL --> func_FindAllString1:::pkg_regexp + func_IsRHEL --> func_len1 + func_IsRHEL --> func_Info:::pkg_log + func_IsRHEL --> func_MustCompile2:::pkg_regexp + func_IsRHEL --> func_FindAllString2:::pkg_regexp + func_IsRHEL --> func_len2 + + classDef pkg_regexp fill:#f9f,stroke:#333; + classDef pkg_log fill:#cff,stroke:#333; +``` + +#### Functions calling `IsRHEL` (Mermaid) + +```mermaid +graph TD + func_TestContainerIsRedHatRelease --> func_IsRHEL +``` + +#### Usage example (Go) + +```go +// Minimal example invoking IsRHEL +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/isredhat" +) + +func main() { + output := "Red Hat Enterprise Linux Server release 7.9 (Maipo)" + isRHEL := isredhat.IsRHEL(output) + fmt.Printf("Is Red Hat? %t\n", isRHEL) +} +``` + +--- + +### NewBaseImageTester + +**NewBaseImageTester** - Instantiates a `BaseImageInfo` object that holds the necessary context and command executor for testing container base images. + +#### Signature (Go) + +```go +func NewBaseImageTester(client clientsholder.Command, ctx clientsholder.Context) *BaseImageInfo +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates a `BaseImageInfo` object that holds the necessary context and command executor for testing container base images. | +| **Parameters** | `client` – A `clientsholder.Command` used to run Kubernetes client commands.
`ctx` – A `clientsholder.Context` providing namespace, pod name, and container name information. | +| **Return value** | Pointer to a newly created `BaseImageInfo`. | +| **Key dependencies** | *`clientsholder.Command` – command execution interface.
* `clientsholder.Context` – contextual data holder. | +| **Side effects** | No external state changes; only constructs an in‑memory struct. | +| **How it fits the package** | Serves as a constructor for the test harness that validates Red‑Hat base images. It is invoked by higher‑level tests (e.g., `testIsRedHatRelease`) to prepare the tester with appropriate context and command capabilities. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Receive client & ctx"] --> B{"Create BaseImageInfo"} + B --> C["Return pointer"] +``` + +#### Function dependencies + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `NewBaseImageTester` (Mermaid) + +```mermaid +graph TD + func_testIsRedHatRelease --> func_NewBaseImageTester +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewBaseImageTester +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/clientsholder" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/isredhat" +) + +func main() { + // Assume we have concrete implementations of Command and Context + client := clientsholder.GetClientsHolder() + ctx := clientsholder.NewContext("default", "my-pod", "my-container") + + tester := isredhat.NewBaseImageTester(client, ctx) + _ = tester // use the tester for further checks +} +``` + +--- + +## Local Functions + +### BaseImageInfo.runCommand + +**runCommand** - Runs an arbitrary shell command in the target container via `ExecCommandContainer`, returning stdout or reporting errors. + +#### Signature (Go) + +```go +func (b *BaseImageInfo) runCommand(cmd string) (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Runs an arbitrary shell command in the target container via `ExecCommandContainer`, returning stdout or reporting errors. | +| **Parameters** | `cmd` *string* – The shell command to execute inside the container. | +| **Return value** | `output` *string* – Captured standard output.
`error` *error* – Non‑nil if execution fails or stderr contains data. | +| **Key dependencies** | • `b.ClientHolder.ExecCommandContainer(b.OCPContext, cmd)`
• `log.Error` (from internal/log)
• `errors.New` | +| **Side effects** | Logs error messages via the global logger; no state mutation on `BaseImageInfo`. | +| **How it fits the package** | Provides a low‑level helper for other test methods to query container properties (e.g., checking `/etc/redhat-release`). | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start: runCommand(cmd)"] --> B["Call ExecCommandContainer"] + B --> C{"Error from Exec?"} + C -- Yes --> D["Log error via log.Error"] + D --> E["Return empty string, err"] + C -- No --> F{"stderr non‑empty?"} + F -- Yes --> G["Log stderr via log.Error"] + G --> H["Return empty string, errors.New(outerr)"] + F -- No --> I["Return stdout, nil"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_BaseImageInfo.runCommand --> ExecCommandContainer + func_BaseImageInfo.runCommand --> Logger.Error + func_BaseImageInfo.runCommand --> errors.New +``` + +#### Functions calling `BaseImageInfo.runCommand` + +```mermaid +graph TD + BaseImageInfo.TestContainerIsRedHatRelease --> func_BaseImageInfo.runCommand +``` + +#### Usage example (Go) + +```go +// Minimal example invoking BaseImageInfo.runCommand +b := &BaseImageInfo{ /* fields initialized elsewhere */ } +output, err := b.runCommand("echo hello") +if err != nil { + log.Error("command failed: %v", err) +} +fmt.Println("Command output:", output) +``` + +--- diff --git a/docs/tests/platform/nodetainted/nodetainted.md b/docs/tests/platform/nodetainted/nodetainted.md new file mode 100644 index 000000000..8c2fe8523 --- /dev/null +++ b/docs/tests/platform/nodetainted/nodetainted.md @@ -0,0 +1,905 @@ +# Package nodetainted + +**Path**: `tests/platform/nodetainted` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [KernelTaint](#kerneltaint) + - [NodeTainted](#nodetainted) +- [Exported Functions](#exported-functions) + - [DecodeKernelTaintsFromBitMask](#decodekerneltaintsfrombitmask) + - [DecodeKernelTaintsFromLetters](#decodekerneltaintsfromletters) + - [GetOtherTaintedBits](#getothertaintedbits) + - [GetTaintMsg](#gettaintmsg) + - [GetTaintedBitsByModules](#gettaintedbitsbymodules) + - [NewNodeTaintedTester](#newnodetaintedtester) + - [NodeTainted.GetKernelTaintsMask](#nodetainted.getkerneltaintsmask) + - [NodeTainted.GetTainterModules](#nodetainted.gettaintermodules) + - [RemoveAllExceptNumbers](#removeallexceptnumbers) +- [Local Functions](#local-functions) + - [NodeTainted.getAllTainterModules](#nodetainted.getalltaintermodules) + - [getBitPosFromLetter](#getbitposfromletter) + +## Overview + +The nodetainted package supplies utilities for inspecting and interpreting kernel taint information on a specific Kubernetes node, enabling tests to verify that no unwanted taints are present. + +### Key Features + +- Decodes a 64‑bit taint bitmask into human‑readable messages via GetTaintMsg and DecodeKernelTaintsFromBitMask. + +### Design Notes + +- Relies on reading /proc/sys/kernel/tainted and module taint files, assuming a Linux environment. + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**KernelTaint**](#kerneltaint) | One-line purpose | +| [**NodeTainted**](#nodetainted) | Holds information about tainted nodes | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func DecodeKernelTaintsFromBitMask(bitmask uint64) []string](#decodekerneltaintsfrombitmask) | Translates each set bit in a 64‑bit kernel taint mask into its descriptive message using `GetTaintMsg`. | +| [func DecodeKernelTaintsFromLetters(letters string) []string](#decodekerneltaintsfromletters) | Translates each character in `letters` into a descriptive taint string, including the original letter and its bit index. Unknown letters are reported explicitly. | +| [func GetOtherTaintedBits(taintsMask uint64, taintedBitsByModules map[int]bool) []int](#getothertaintedbits) | Returns a slice of bit positions that are set in `taintsMask` but are not recorded as being set by any module (i.e., keys absent or false in `taintedBitsByModules`). | +| [func GetTaintMsg(bit int) string](#gettaintmsg) | Returns a descriptive string for a kernel taint bit. If the bit is defined in the `kernelTaints` map, the message includes its description; otherwise it indicates that the bit is reserved. | +| [func GetTaintedBitsByModules(tainters map[string]string) (map[int]bool, error)](#gettaintedbitsbymodules) | For every module name and its associated taint letters, determine the corresponding kernel‑taint bit positions and aggregate them into a single set. | +| [func NewNodeTaintedTester(context *clientsholder.Context, node string) *NodeTainted](#newnodetaintedtester) | Instantiates a `NodeTainted` helper that provides methods for querying kernel taint information on a specific Kubernetes node. | +| [func (nt *NodeTainted) GetKernelTaintsMask() (uint64, error)](#nodetainted.getkerneltaintsmask) | Reads `/proc/sys/kernel/tainted` to obtain the current kernel taints bitmask and returns it as a `uint64`. | +| [func (nt *NodeTainted) GetTainterModules(allowList map[string]bool) (tainters map[string]string, taintBits map[int]bool, err error)](#nodetainted.gettaintermodules) | Gathers all kernel modules that set taint bits on a node, filters out allow‑listed modules, and returns the remaining module names with their taint letters plus a map of all taint bit positions used. | +| [func RemoveAllExceptNumbers(incomingStr string) string](#removeallexceptnumbers) | Returns a new string containing only the numeric characters of `incomingStr`. Useful for normalizing taint identifiers that may include letters or punctuation. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func (nt *NodeTainted) getAllTainterModules() (map[string]string, error)](#nodetainted.getalltaintermodules) | Executes a shell script to list every kernel module present in `/sys/module`, reads each module’s taint file, and returns a map of module names to their associated taint letters. | +| [func getBitPosFromLetter(letter string) (int, error)](#getbitposfromletter) | Maps a single‑character taint letter to its corresponding kernel taint bit position (zero‑based). Validates input length and existence of the letter in known taints. | + +## Structs + +### KernelTaint + +A lightweight representation of a Linux kernel taint entry used in node‑tainted tests. + +#### Fields + +| Field | Type | Description | +|-------------|--------|-------------| +| `Description` | `string` | Human‑readable explanation of why the node is considered tainted. | +| `Letters` | `string` | One or more kernel taint letters that identify the specific taint condition (e.g., `"S"` for bad system call). | + +#### Purpose + +The `KernelTaint` struct encapsulates information about a kernel taint applied to a node during testing. The `Description` field provides context for test logs, while the `Letters` field holds the canonical taint identifier used by Kubernetes to mark the node’s state. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| *none* | No functions directly interact with this struct in the current codebase. | + +--- + +--- + +### NodeTainted + +#### Fields + +| Field | Type | Description | +|-------|------|-------------| +| `ctx` | `*clientsholder.Context` | Context used to execute commands on the target node; contains client configuration and authentication details. | +| `node` | `string` | Name or identifier of the Kubernetes node whose taint status is being inspected. | + +#### Purpose + +The `NodeTainted` struct represents a specific node in a cluster that may be marked as tainted by the Linux kernel. It encapsulates both the node identity and the context required to query the node’s state (e.g., reading `/proc/sys/kernel/tainted` or module taint files). Methods on this type retrieve kernel‑level taint masks, list modules responsible for taints, and filter those modules against an allow‑list. + +#### Related functions + +| Function | Purpose | +|----------|---------| +| `NewNodeTaintedTester(context *clientsholder.Context, node string)` | Creates a new `NodeTainted` instance initialized with the given context and node name. | +| `GetKernelTaintsMask() (uint64, error)` | Reads `/proc/sys/kernel/tainted`, parses the decimal value into a 64‑bit mask representing active kernel taints on the node. | +| `GetTainterModules(allowList map[string]bool) (tainters map[string]string, taintBits map[int]bool, err error)` | Enumerates all kernel modules that have set taint bits on the node, applies an allow‑list filter, and returns both module names with their taints and a bitset of tainted bits. | +| `getAllTainterModules() (map[string]string, error)` | Internal helper that lists each loaded module in `/sys/module`, reads its `taint` file if present, and builds a map from module name to the string of taint letters it has set. | + +--- + +--- + +## Exported Functions + +### DecodeKernelTaintsFromBitMask + +**DecodeKernelTaintsFromBitMask** - Translates each set bit in a 64‑bit kernel taint mask into its descriptive message using `GetTaintMsg`. + +#### Signature (Go) + +```go +func DecodeKernelTaintsFromBitMask(bitmask uint64) []string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Translates each set bit in a 64‑bit kernel taint mask into its descriptive message using `GetTaintMsg`. | +| **Parameters** | `bitmask uint64` – raw kernel taint bitmask. | +| **Return value** | `[]string` – slice of messages corresponding to all bits that are set. | +| **Key dependencies** | • `append` (slice manipulation)
• `GetTaintMsg(int)` (message lookup) | +| **Side effects** | None; pure function that only reads input and returns a new slice. | +| **How it fits the package** | Utility in `nodetainted` for decoding kernel taint information obtained from nodes, used by higher‑level tests to report compliance. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start["Start"] --> Init["taints = []string{}"] + Init --> Loop["for i := 0; i < 64; i++"] + Loop --> Shift["bit = (bitmask >> i) & 1"] + Shift --> Check["if bit == 1"] + Check -- Yes --> Msg["msg = GetTaintMsg(i)"] + Msg --> Append["taints = append(taints, msg)"] + Append --> Loop + Check -- No --> Loop + Loop -- End --> Return["return taints"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_DecodeKernelTaintsFromBitMask --> append + func_DecodeKernelTaintsFromBitMask --> GetTaintMsg +``` + +#### Functions calling `DecodeKernelTaintsFromBitMask` (Mermaid) + +```mermaid +graph TD + testTainted --> DecodeKernelTaintsFromBitMask +``` + +#### Usage example (Go) + +```go +// Minimal example invoking DecodeKernelTaintsFromBitMask +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted" +) + +func main() { + mask := uint64(0x5) // bits 0 and 2 set + messages := nodetainted.DecodeKernelTaintsFromBitMask(mask) + fmt.Println(messages) // e.g., ["taint0 (bit 0)", "taint2 (bit 2)"] +} +``` + +--- + +### DecodeKernelTaintsFromLetters + +**DecodeKernelTaintsFromLetters** - Translates each character in `letters` into a descriptive taint string, including the original letter and its bit index. Unknown letters are reported explicitly. + +#### Signature (Go) + +```go +func DecodeKernelTaintsFromLetters(letters string) []string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Translates each character in `letters` into a descriptive taint string, including the original letter and its bit index. Unknown letters are reported explicitly. | +| **Parameters** | `letters string –` sequence of single‑character taint identifiers to decode. | +| **Return value** | `[]string` – slice containing a formatted description for each input character. Each entry follows: ` (taint letter:, bit:)` or `unknown taint (letter )`. | +| **Key dependencies** | • `strings.Contains` from the standard library
• `fmt.Sprintf` for formatting
• Reference to the package‑wide `kernelTaints` slice, which maps indices to known taints | +| **Side effects** | None. Pure function; only reads global data and returns a new slice. | +| **How it fits the package** | Used by `NodeTainted.GetTainterModules` and test logic to interpret kernel taint letters returned from nodes into user‑friendly messages for reporting and logging. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over each letter"} + B --> C["Convert rune to string"] + C --> D["Search kernelTaints list"] + D -->|"Found"| E["Append formatted description"] + D -->|"Not Found"| F["Append unknown taint message"] + E & F --> G["Continue loop"] + G --> H{"Loop finished?"} + H -->|"Yes"| I["Return slice of descriptions"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_DecodeKernelTaintsFromLetters --> strings.Contains + func_DecodeKernelTaintsFromLetters --> fmt.Sprintf +``` + +#### Functions calling `DecodeKernelTaintsFromLetters` (Mermaid) + +```mermaid +graph TD + nodetainted.GetTainterModules --> func_DecodeKernelTaintsFromLetters + testTainted --> func_DecodeKernelTaintsFromLetters +``` + +#### Usage example (Go) + +```go +// Minimal example invoking DecodeKernelTaintsFromLetters +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted" +) + +func main() { + // Suppose a node reports the taint letters "abc" + letters := "abc" + + decoded := nodetainted.DecodeKernelTaintsFromLetters(letters) + fmt.Println("Decoded taints:") + for _, t := range decoded { + fmt.Println(t) + } +} +``` + +--- + +### GetOtherTaintedBits + +**GetOtherTaintedBits** - Returns a slice of bit positions that are set in `taintsMask` but are not recorded as being set by any module (i.e., keys absent or false in `taintedBitsByModules`). + +#### Signature (Go) + +```go +func GetOtherTaintedBits(taintsMask uint64, taintedBitsByModules map[int]bool) []int +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a slice of bit positions that are set in `taintsMask` but are not recorded as being set by any module (i.e., keys absent or false in `taintedBitsByModules`). | +| **Parameters** | `taintsMask uint64` – 64‑bit mask representing kernel taint bits.
`taintedBitsByModules map[int]bool` – mapping of bit index to whether that bit was set by a module. | +| **Return value** | `[]int` – indices (0‑63) of bits that are set but unaccounted for by modules. | +| **Key dependencies** | • Standard library: basic arithmetic, slice append.
• No external packages or side effects. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used in node‑taint verification to flag unexpected taints that cannot be traced back to a known kernel module. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate i=0..63"} + B --> C["Compute bitIsSet"] + C --> D{"bitIsSet AND NOT taintedBitsByModules[i]"} + D -->|"Yes"| E["Append i to otherTaintedBits"] + D -->|"No"| F["Skip"] + B --> G["End loop"] + G --> H["Return otherTaintedBits"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetOtherTaintedBits --> append +``` + +#### Functions calling `GetOtherTaintedBits` (Mermaid) + +```mermaid +graph TD + testTainted --> GetOtherTaintedBits +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetOtherTaintedBits +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted" +) + +func main() { + // Example taintsMask with bits 1, 3, and 5 set (binary: 0b00101010) + taintsMask := uint64(42) // 0x2A + + // Suppose modules only accounted for bit 1 + taintedBitsByModules := map[int]bool{ + 1: true, + 3: false, + 5: false, + } + + otherBits := nodetainted.GetOtherTaintedBits(taintsMask, taintedBitsByModules) + fmt.Printf("Unaccounted taint bits: %v\n", otherBits) // Output: [3 5] +} +``` + +--- + +### GetTaintMsg + +**GetTaintMsg** - Returns a descriptive string for a kernel taint bit. If the bit is defined in the `kernelTaints` map, the message includes its description; otherwise it indicates that the bit is reserved. + +#### Signature (Go) + +```go +func GetTaintMsg(bit int) string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a descriptive string for a kernel taint bit. If the bit is defined in the `kernelTaints` map, the message includes its description; otherwise it indicates that the bit is reserved. | +| **Parameters** | `bit int – index of the taint bit (0‑63)` | +| **Return value** | `string – human‑readable taint description or “reserved” message` | +| **Key dependencies** | - `kernelTaints` map (package‑level)
- `fmt.Sprintf` from the standard library | +| **Side effects** | None. Pure function; no state mutation, I/O, or concurrency. | +| **How it fits the package** | Utility helper used by other functions (e.g., `DecodeKernelTaintsFromBitMask`) to translate raw bit positions into meaningful labels for reporting and logging. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"bit exists in kernelTaints?"} + B -- Yes --> C["Return formatted description"] + B -- No --> D["Return reserved message"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetTaintMsg --> fmt.Sprintf +``` + +#### Functions calling `GetTaintMsg` (in the same package) + +```mermaid +graph TD + func_DecodeKernelTaintsFromBitMask --> func_GetTaintMsg +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetTaintMsg +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted" +) + +func main() { + // Example: get description for bit 3 + msg := nodetainted.GetTaintMsg(3) + fmt.Println(msg) // Output depends on kernelTaints map contents +} +``` + +--- + +--- + +### GetTaintedBitsByModules + +**GetTaintedBitsByModules** - For every module name and its associated taint letters, determine the corresponding kernel‑taint bit positions and aggregate them into a single set. + +Retrieves the set of kernel taint bits that are active for each module in a given map. + +--- + +#### Signature (Go) + +```go +func GetTaintedBitsByModules(tainters map[string]string) (map[int]bool, error) +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | For every module name and its associated taint letters, determine the corresponding kernel‑taint bit positions and aggregate them into a single set. | +| **Parameters** | `tainters map[string]string` – keys are module names; values are strings of one‑letter taints (e.g., `"M"`, `"S"`). | +| **Return value** | `map[int]bool` – a mapping where the key is a bit position and the value indicates that the bit is set. Returns an error if any letter cannot be resolved to a known taint. | +| **Key dependencies** | • `string` conversion of byte to string.
• `getBitPosFromLetter(letter string) (int, error)` – translates a single‑letter taint into its bit index.
• `fmt.Errorf` for error wrapping. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by `NodeTainted.GetTainterModules` to produce the overall set of active taint bits from individual modules’ letters. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A(tainter) --> B(letters) + B --> C["Loop each letter"] + C --> D{"letter=string(letters[i])"} + D --> E{"bit,err=getBitPosFromLetter(letter)"} + E --|"err"| F(Return error) + E --> G(taintedBits["bit"]=true) + G --> H(End of loops) + H --> I(Return taintedBits,nil) +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetTaintedBitsByModules --> func_getBitPosFromLetter + func_GetTaintedBitsByModules --> func_Errorf +``` + +--- + +#### Functions calling `GetTaintedBitsByModules` (Mermaid) + +```mermaid +graph TD + func_NodeTainted.GetTainterModules --> func_GetTaintedBitsByModules +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking GetTaintedBitsByModules +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted" +) + +func main() { + modules := map[string]string{ + "moduleA": "M", + "moduleB": "S", + } + bits, err := nodetainted.GetTaintedBitsByModules(modules) + if err != nil { + fmt.Println("error:", err) + return + } + fmt.Println("Active taint bits:", bits) +} +``` + +--- + +### NewNodeTaintedTester + +**NewNodeTaintedTester** - Instantiates a `NodeTainted` helper that provides methods for querying kernel taint information on a specific Kubernetes node. + +#### Signature (Go) + +```go +func NewNodeTaintedTester(context *clientsholder.Context, node string) *NodeTainted +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Instantiates a `NodeTainted` helper that provides methods for querying kernel taint information on a specific Kubernetes node. | +| **Parameters** | `context *clientsholder.Context –` the client context used to communicate with the node;
`node string –` the name of the target node. | +| **Return value** | `*NodeTainted –` a pointer to a new `NodeTainted` struct initialized with the supplied context and node name. | +| **Key dependencies** | *Uses the `clientsholder.Context` type from the test framework.
* Stores values in the `NodeTainted` struct (no external calls at construction). | +| **Side effects** | None – only creates an object; does not perform I/O or modify global state. | +| **How it fits the package** | Provides the entry point for all taint‑related checks performed by the *nodetainted* test suite, allowing other functions to retrieve kernel taints and module information via the returned `NodeTainted` instance. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Create NodeTainted"} + B --> C["Return &NodeTainted"] +``` + +#### Function dependencies (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### Functions calling `NewNodeTaintedTester` (Mermaid) + +```mermaid +graph TD + func_testTainted --> nodetainted.NewNodeTaintedTester +``` + +#### Usage example (Go) + +```go +// Minimal example invoking NewNodeTaintedTester +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/clientsholder" +) + +func main() { + ctx := clientsholder.NewContext("namespace", "podName", "containerName") + nodeTester := nodetainted.NewNodeTaintedTester(ctx, "worker-node-1") + + // Use nodeTester to query kernel taints, e.g.: + taintsMask, err := nodeTester.GetKernelTaintsMask() + if err != nil { + panic(err) + } + println("Kernel taint mask:", taintsMask) +} +``` + +--- + +### NodeTainted.GetKernelTaintsMask + +**GetKernelTaintsMask** - Reads `/proc/sys/kernel/tainted` to obtain the current kernel taints bitmask and returns it as a `uint64`. + +#### Signature (Go) + +```go +func (nt *NodeTainted) GetKernelTaintsMask() (uint64, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads `/proc/sys/kernel/tainted` to obtain the current kernel taints bitmask and returns it as a `uint64`. | +| **Parameters** | *None* – uses the receiver’s context (`nt.ctx`). | +| **Return value** | `uint64` – the parsed taint mask; `error` if command execution or parsing fails. | +| **Key dependencies** | • `runCommand(nt.ctx, ...)`
• `strings.ReplaceAll` (×3)
• `strconv.ParseUint`
• `fmt.Errorf` | +| **Side effects** | None; the function performs read‑only operations and returns values. | +| **How it fits the package** | Provides a helper for tests to determine whether a node is tainted, enabling conditional logic based on kernel state. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Run `cat /proc/sys/kernel/tainted`"] --> B{"Command success?"} + B -- Yes --> C["Clean output (remove \n,\r,\t)"] + B -- No --> D["Return error"] + C --> E["ParseUint(output,10,64)"] + E -- Success --> F["Return mask, nil"] + E -- Failure --> G["Wrap and return parsing error"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_NodeTainted.GetKernelTaintsMask --> func_runCommand + func_NodeTainted.GetKernelTaintsMask --> strings.ReplaceAll + func_NodeTainted.GetKernelTaintsMask --> strconv.ParseUint + func_NodeTainted.GetKernelTaintsMask --> fmt.Errorf +``` + +#### Functions calling `NodeTainted.GetKernelTaintsMask` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking NodeTainted.GetKernelTaintsMask +ctx := context.Background() +nt := &nodetainted.NodeTainted{Ctx: ctx} +mask, err := nt.GetKernelTaintsMask() +if err != nil { + log.Fatalf("cannot read kernel taints mask: %v", err) +} +fmt.Printf("Kernel taints mask: 0x%X\n", mask) +``` + +--- + +### NodeTainted.GetTainterModules + +**GetTainterModules** - Gathers all kernel modules that set taint bits on a node, filters out allow‑listed modules, and returns the remaining module names with their taint letters plus a map of all taint bit positions used. + +#### 1) Signature (Go) + +```go +func (nt *NodeTainted) GetTainterModules(allowList map[string]bool) (tainters map[string]string, taintBits map[int]bool, err error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Gathers all kernel modules that set taint bits on a node, filters out allow‑listed modules, and returns the remaining module names with their taint letters plus a map of all taint bit positions used. | +| **Parameters** | `allowList map[string]bool` – module names to ignore (typically known safe modules). | +| **Return value** | `tainters map[string]string` – module → string of taint‑letter codes for non‑allow‑listed modules.
`taintBits map[int]bool` – set of bit positions corresponding to all detected taints, including allow‑listed ones.
`err error` – failure reason if any step fails. | +| **Key dependencies** | • `NodeTainted.getAllTainterModules()`
• `DecodeKernelTaintsFromLetters(string)`
• `GetTaintedBitsByModules(map[string]string)`
• `log.Debug` for logging | +| **Side effects** | None. The function only reads state from the node and logs debug messages. | +| **How it fits the package** | Provides high‑level, user‑friendly data about kernel taints, used by tests to assert that no unexpected modules are tainting the system. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Call getAllTainterModules"} + B --> C["allTainters map"] + C --> D{"Iterate over allTainters"} + D --> E["Decode taint letters → slice of taints"] + E --> F["Log module taints"] + F --> G{"Is module allow‑listed?"} + G -- Yes --> H["Skip adding to filteredTainters"] + G -- No --> I["Add to filteredTainters"] + D --> J["End loop"] + J --> K{"Call GetTaintedBitsByModules(allTainters)"} + K --> L["taintBits map"] + L --> M["Return filteredTainters, taintBits, nil"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_NodeTainted.GetTainterModules --> func_NodeTainted.getAllTainterModules + func_NodeTainted.GetTainterModules --> func_DecodeKernelTaintsFromLetters + func_NodeTainted.GetTainterModules --> func_GetTaintedBitsByModules +``` + +#### 5) Functions calling `NodeTainted.GetTainterModules` (Mermaid) + +None – this function is currently not referenced elsewhere in the package. + +#### 6) Usage example (Go) + +```go +// Minimal example invoking NodeTainted.GetTainterModules + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted" +) + +func main() { + // Assume `nodeTainted` is an initialized *nodetainted.NodeTainted instance. + allowList := map[string]bool{ + "module_safe": true, + } + + tainters, taintBits, err := nodeTainted.GetTainterModules(allowList) + if err != nil { + // handle error + } + + fmt.Printf("Filtered tainters: %+v\n", tainters) + fmt.Printf("All taint bits: %+v\n", taintBits) +} +``` + +--- + +### RemoveAllExceptNumbers + +**RemoveAllExceptNumbers** - Returns a new string containing only the numeric characters of `incomingStr`. Useful for normalizing taint identifiers that may include letters or punctuation. + +#### Signature (Go) + +```go +func RemoveAllExceptNumbers(incomingStr string) string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns a new string containing only the numeric characters of `incomingStr`. Useful for normalizing taint identifiers that may include letters or punctuation. | +| **Parameters** | `incomingStr` string – the raw input from which non‑numeric characters will be removed. | +| **Return value** | A string composed solely of digits extracted from `incomingStr`. | +| **Key dependencies** | • `regexp.MustCompile`\n• `regexp.ReplaceAllString` | +| **Side effects** | None – purely functional; no state mutation or I/O. | +| **How it fits the package** | Used by the taint‑analysis logic to convert taint identifiers like `"bit:10)"` into a clean numeric representation (`"10"`), facilitating further processing and reporting. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Compile regex `\D+`"} + B --> C["Replace all non‑digit sequences with"] + C --> D["Return cleaned string"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_RemoveAllExceptNumbers --> func_MustCompile + func_RemoveAllExceptNumbers --> func_ReplaceAllString +``` + +#### Functions calling `RemoveAllExceptNumbers` (Mermaid) + +```mermaid +graph TD + func_testTainted --> func_RemoveAllExceptNumbers +``` + +#### Usage example (Go) + +```go +// Minimal example invoking RemoveAllExceptNumbers +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/nodetainted" +) + +func main() { + raw := "bit:10)" + cleaned := nodetainted.RemoveAllExceptNumbers(raw) + fmt.Println(cleaned) // Output: 10 +} +``` + +--- + +## Local Functions + +### NodeTainted.getAllTainterModules + +**getAllTainterModules** - Executes a shell script to list every kernel module present in `/sys/module`, reads each module’s taint file, and returns a map of module names to their associated taint letters. + +#### 1) Signature (Go) + +```go +func (nt *NodeTainted) getAllTainterModules() (map[string]string, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes a shell script to list every kernel module present in `/sys/module`, reads each module’s taint file, and returns a map of module names to their associated taint letters. | +| **Parameters** | `nt *NodeTainted` – the receiver holding context for command execution (`ctx`). | +| **Return value** | `` mapping module name → taint string; `error` if command execution or parsing fails. | +| **Key dependencies** | • `runCommand(ctx, cmd)` to execute shell code
• `fmt.Errorf` for error formatting
• `strings.Split` to split command output and lines | +| **Side effects** | No state mutation on the receiver; only I/O via external command execution. | +| **How it fits the package** | Provides low‑level data used by higher‑level APIs (`GetTainterModules`) to filter allowlisted modules and compute taint bits for the node. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Build shell command"] --> B["runCommand(ctx, cmd)"] + B --> C{"Success?"} + C -- Yes --> D["Split output into lines"] + D --> E["Parse each line: module taints"] + E --> F["Populate map"] + F --> G["Return map"] + C -- No --> H["Return error via fmt.Errorf"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_NodeTainted.getAllTainterModules --> func_runCommand + func_NodeTainted.getAllTainterModules --> fmt_Errorf + func_NodeTainted.getAllTainterModules --> strings_Split +``` + +#### 5) Functions calling `NodeTainted.getAllTainterModules` (Mermaid) + +```mermaid +graph TD + func_NodeTainted.GetTainterModules --> func_NodeTainted.getAllTainterModules +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking NodeTainted.getAllTainterModules +nt := &nodetainted.NodeTainted{Ctx: context.Background()} +modules, err := nt.getAllTainterModules() +if err != nil { + log.Fatalf("cannot list tainting modules: %v", err) +} +for mod, taints := range modules { + fmt.Printf("%s taints: %s\n", mod, taints) +} +``` + +--- + +--- + +### getBitPosFromLetter + +**getBitPosFromLetter** - Maps a single‑character taint letter to its corresponding kernel taint bit position (zero‑based). Validates input length and existence of the letter in known taints. + +#### Signature (Go) + +```go +func getBitPosFromLetter(letter string) (int, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Maps a single‑character taint letter to its corresponding kernel taint bit position (zero‑based). Validates input length and existence of the letter in known taints. | +| **Parameters** | `letter string` – A one‑character string representing a kernel taint letter. | +| **Return value** | `(int, error)` – The zero‑based bit index on success; otherwise an error explaining the failure. | +| **Key dependencies** | • `len` (builtin)
• `fmt.Errorf`
• `strings.Contains` | +| **Side effects** | None. Pure function with no state mutation or I/O. | +| **How it fits the package** | Supports the public helper `GetTaintedBitsByModules`, translating module‑supplied taint letters into bit indices for internal use. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Validate input length"] --> B{"Is single letter?"} + B -- Yes --> C["Iterate over kernelTaints"] + C --> D{"Letter found in taint.Letters?"} + D -- Yes --> E["Return bit index, nil"] + D -- No --> F["Continue loop"] + F --> C + D -- End of list --> G["Return error: unknown letter"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getBitPosFromLetter --> builtin_len + func_getBitPosFromLetter --> fmt_Errorf + func_getBitPosFromLetter --> strings_Contains +``` + +#### Functions calling `getBitPosFromLetter` (Mermaid) + +```mermaid +graph TD + func_GetTaintedBitsByModules --> func_getBitPosFromLetter +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getBitPosFromLetter +bit, err := getBitPosFromLetter("X") +if err != nil { + fmt.Printf("error: %v\n", err) +} else { + fmt.Printf("taint bit position for 'X': %d\n", bit) +} +``` + +--- diff --git a/docs/tests/platform/operatingsystem/operatingsystem.md b/docs/tests/platform/operatingsystem/operatingsystem.md new file mode 100644 index 000000000..08f964f3f --- /dev/null +++ b/docs/tests/platform/operatingsystem/operatingsystem.md @@ -0,0 +1,166 @@ +# Package operatingsystem + +**Path**: `tests/platform/operatingsystem` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [GetRHCOSMappedVersions](#getrhcosmappedversions) + - [GetShortVersionFromLong](#getshortversionfromlong) + +## Overview + +Provides utilities for mapping Red Hat CoreOS (RHCOS) release strings between long and short forms, enabling other packages to retrieve concise version identifiers. + +### Key Features + +- Parses a newline‑separated embedded file containing RHCOS version mappings into a map of short→long + +### Design Notes + +- Mappings are loaded from an embedded file at build time; no runtime I/O occurs +- If a lookup fails the functions return a sentinel string and an error to signal absence +- Users should cache the returned map when multiple lookups are needed for efficiency + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GetRHCOSMappedVersions(rhcosVersionMap string) (map[string]string, error)](#getrhcosmappedversions) | Parses a newline‑separated list of RHCOS version mappings (`short / long`) and returns a map where the short version is the key and the long form is the value. | +| [func GetShortVersionFromLong(longVersion string) (string, error)](#getshortversionfromlong) | Looks up the short RHCOS version that corresponds to a supplied long‑form release string. If no match is found it returns a sentinel value. | + +## Exported Functions + +### GetRHCOSMappedVersions + +**GetRHCOSMappedVersions** - Parses a newline‑separated list of RHCOS version mappings (`short / long`) and returns a map where the short version is the key and the long form is the value. + +#### Signature (Go) + +```go +func GetRHCOSMappedVersions(rhcosVersionMap string) (map[string]string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses a newline‑separated list of RHCOS version mappings (`short / long`) and returns a map where the short version is the key and the long form is the value. | +| **Parameters** | `rhcosVersionMap string` – raw text containing lines such as “4.10.14 / 410.84.202205031645‑0”. | +| **Return value** | `map[string]string, error` – a mapping from short to long version; the function currently never returns an error, but the signature allows for future extensions. | +| **Key dependencies** | • `strings.Split`, `strings.TrimSpace` (standard library)
• `make` (built‑in) | +| **Side effects** | None – purely functional: creates and returns a new map without mutating globals or performing I/O. | +| **How it fits the package** | Provides data for other helpers (e.g., `GetShortVersionFromLong`) to resolve between short and long RHCOS identifiers. | + +#### Internal workflow + +```mermaid +flowchart TD + InputString --> SplitLines + SplitLines --> ForEachLine + ForEachLine --> TrimEmpty + TrimEmpty -- not empty --> SplitPair + SplitPair --> StoreInMap + StoreInMap --> End +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetRHCOSMappedVersions --> make + func_GetRHCOSMappedVersions --> strings.Split + func_GetRHCOSMappedVersions --> strings.TrimSpace +``` + +#### Functions calling `GetRHCOSMappedVersions` + +```mermaid +graph TD + func_GetShortVersionFromLong --> func_GetRHCOSMappedVersions +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetRHCOSMappedVersions +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/operatingsystem" +) + +func main() { + data := `4.9.21 / 49.84.202202081504-0 +4.10.14 / 410.84.202205031645-0` + mapping, err := operatingsystem.GetRHCOSMappedVersions(data) + if err != nil { + fmt.Println("error:", err) + return + } + fmt.Printf("%+v\n", mapping) // Output: map[4.9.21:49.84.202202081504-0 4.10.14:410.84.202205031645-0] +} +``` + +--- + +### GetShortVersionFromLong + +**GetShortVersionFromLong** - Looks up the short RHCOS version that corresponds to a supplied long‑form release string. If no match is found it returns a sentinel value. + +#### Signature (Go) + +```go +func GetShortVersionFromLong(longVersion string) (string, error) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Looks up the short RHCOS version that corresponds to a supplied long‑form release string. If no match is found it returns a sentinel value. | +| **Parameters** | `longVersion` (string) – the full RHCOS build identifier (e.g., `"410.84.202205031645-0"`). | +| **Return value** | `(string, error)` – the matching short version string or `"version-not-found"` with a nil error if no match exists; an error is returned only when mapping data cannot be retrieved. | +| **Key dependencies** | • `GetRHCOSMappedVersions` – retrieves the map of short → long versions.
• `rhcosVersionMap` (global string constant) – source text for mapping. | +| **Side effects** | None; purely functional lookup. | +| **How it fits the package** | Provides a helper used by node‑level logic to translate OS image identifiers into concise version tags required elsewhere in the suite. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Retrieve mapping"} + B --> C["GetRHCOSMappedVersions(rhcosVersionMap)"] + C --> D{"Iterate over map"} + D -->|"match found"| E["Return short version"] + D -->|"no match"| F["Return NotFoundStr"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_GetShortVersionFromLong --> func_GetRHCOSMappedVersions +``` + +#### Functions calling `GetShortVersionFromLong` + +```mermaid +graph TD + func_Node.GetRHCOSVersion --> func_GetShortVersionFromLong +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetShortVersionFromLong +short, err := operatingsystem.GetShortVersionFromLong("410.84.202205031645-0") +if err != nil { + log.Fatalf("mapping failed: %v", err) +} +fmt.Printf("short version: %s\n", short) // prints the matching short tag or "version-not-found" +``` + +--- diff --git a/docs/tests/platform/platform.md b/docs/tests/platform/platform.md new file mode 100644 index 000000000..3f9383ba1 --- /dev/null +++ b/docs/tests/platform/platform.md @@ -0,0 +1,1226 @@ +# Package platform + +**Path**: `tests/platform` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [LoadChecks](#loadchecks) +- [Local Functions](#local-functions) + - [testClusterOperatorHealth](#testclusteroperatorhealth) + - [testContainersFsDiff](#testcontainersfsdiff) + - [testHugepages](#testhugepages) + - [testHyperThreadingEnabled](#testhyperthreadingenabled) + - [testIsRedHatRelease](#testisredhatrelease) + - [testIsSELinuxEnforcing](#testisselinuxenforcing) + - [testNodeOperatingSystemStatus](#testnodeoperatingsystemstatus) + - [testOCPStatus](#testocpstatus) + - [testPodHugePagesSize](#testpodhugepagessize) + - [testServiceMesh](#testservicemesh) + - [testSysctlConfigs](#testsysctlconfigs) + - [testTainted](#testtainted) + - [testUnalteredBootParams](#testunalteredbootparams) + +## Overview + +The `platform` package registers a collection of platform‑alteration tests (e.g., hyper‑threading, kernel taints, SELinux state, OS compatibility) into the checks database for use by Certsuite’s test runner. + +### Key Features + +- Registers a suite of platform‑specific compliance checks under the common platform alteration group +- Provides helper functions to validate node and container configurations against OpenShift and Red Hat requirements +- Integrates with Certsuite’s reporting system to record compliant or non‑compliant objects + +### Design Notes + +- All tests are defined as standalone functions that accept a `*checksdb.Check` and `*provider.TestEnvironment`; this keeps the test logic decoupled from registration logic +- The package relies on external helpers (e.g., `testhelper`, `clientsholder`) to gather runtime data, so it is intentionally thin and focused on orchestration rather than low‑level implementation +- Skips are applied per check via skip functions derived from environment state or node attributes, ensuring irrelevant checks do not run in unsupported contexts + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func LoadChecks() ()](#loadchecks) | Registers a collection of platform‑alteration tests (hyper‑threading, kernel taints, SELinux, etc.) into the checks database under the `common.PlatformAlterationTestKey` group. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func (*checksdb.Check, *provider.TestEnvironment)()](#testclusteroperatorhealth) | Determines whether every ClusterOperator on the cluster reports an `Available` status. If any operator is not available, the check fails and records non‑compliant objects. | +| [func testContainersFsDiff(check *checksdb.Check, env *provider.TestEnvironment)](#testcontainersfsdiff) | Verifies each container in the environment has an unmodified file system by running a diff against a probe pod and records compliance status. | +| [func testHugepages(check *checksdb.Check, env *provider.TestEnvironment)()](#testhugepages) | Verifies that each worker node’s hugepage settings are unchanged and reports compliance. | +| [func testHyperThreadingEnabled(check *checksdb.Check, env *provider.TestEnvironment)](#testhyperthreadingenabled) | Checks each bare‑metal node in the environment to determine whether hyper‑threading is enabled, logs the outcome, and records compliant/non‑compliant results. | +| [func testIsRedHatRelease(check *checksdb.Check, env *provider.TestEnvironment)](#testisredhatrelease) | Validates each container in the test environment is built from a Red Hat Enterprise Linux base image. It records compliance or non‑compliance for reporting. | +| [func testIsSELinuxEnforcing(check *checksdb.Check, env *provider.TestEnvironment)](#testisselinuxenforcing) | Executes `getenforce` inside each probe pod to confirm SELinux is in *enforcing* state on the host node. Reports compliance per node and aggregates results into the test check. | +| [func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvironment)](#testnodeoperatingsystemstatus) | Ensures every control‑plane node uses RHCOS or CentOS Stream CoreOS and that worker nodes use a supported OS (RHCOS, RHEL, or CSCC). Checks version compatibility against the OpenShift release. | +| [func testOCPStatus(check *checksdb.Check, env *provider.TestEnvironment)](#testocpstatus) | Inspects `env.OCPStatus` to decide if the cluster’s OpenShift version is in end‑of‑life (EOL). Logs an appropriate message and creates a compliance report object. | +| [func testPodHugePagesSize(check *checksdb.Check, env *provider.TestEnvironment, size string)](#testpodhugepagessize) | Iterates over all huge‑pages pods in the test environment and verifies each pod’s resource requests/limits match the expected page size (`size`). Logs results and records compliant/non‑compliant objects. | +| [func testServiceMesh(check *checksdb.Check, env *provider.TestEnvironment)](#testservicemesh) | Verifies each pod has at least one Istio‑proxy container. Pods lacking the proxy are flagged as non‑compliant; those with it are marked compliant. | +| [func testSysctlConfigs(check *checksdb.Check, env *provider.TestEnvironment)](#testsysctlconfigs) | Ensures that each node’s runtime sysctl settings match the corresponding kernel arguments defined in its machine config. Non‑compliant nodes are recorded for reporting. | +| [func testTainted(check *checksdb.Check, env *provider.TestEnvironment)](#testtainted) | Determines whether each node in the environment has kernel taints that are either unapproved or caused by modules not on an allow‑list. It records compliant and non‑compliant findings for reporting. | +| [func testUnalteredBootParams(check *checksdb.Check, env *provider.TestEnvironment)](#testunalteredbootparams) | For every distinct node in the test environment, it runs `bootparams.TestBootParamsHelper` to ensure that kernel command‑line arguments are unchanged from their configured MachineConfig values. It records compliant and non‑compliant nodes for reporting. | + +## Exported Functions + +### LoadChecks + +**LoadChecks** - Registers a collection of platform‑alteration tests (hyper‑threading, kernel taints, SELinux, etc.) into the checks database under the `common.PlatformAlterationTestKey` group. + +#### Signature (Go) + +```go +func LoadChecks() () +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Registers a collection of platform‑alteration tests (hyper‑threading, kernel taints, SELinux, etc.) into the checks database under the `common.PlatformAlterationTestKey` group. | +| **Parameters** | None | +| **Return value** | None | +| **Key dependencies** | • `log.Debug`
• `WithBeforeEachFn`
• `checksdb.NewChecksGroup`
• `checksdb.Check.Add`, `WithCheckFn`, `WithSkipCheckFn`
• Test identifiers (`identifiers.GetTestIDAndLabels`)
• Skip functions from `testhelper` (e.g., `GetNoBareMetalNodesSkipFn`)
• Individual test implementations (e.g., `testHyperThreadingEnabled`, `testTainted`, etc.) | +| **Side effects** | Adds checks to the global checks database; logs a debug message. No I/O beyond logging. | +| **How it fits the package** | Called by `certsuite.LoadInternalChecksDB` during test suite initialization, populating the platform alteration group of checks that will be executed in tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["log.Debug(Loading %s suite checks, common.PlatformAlterationTestKey)"] + B --> C["Create checksGroup = checksdb.NewChecksGroup(common.PlatformAlterationTestKey).WithBeforeEachFn(beforeEachFn)"] + C --> D["Add HyperThreading check"] + D --> E["Add Unaltered Base Image check"] + E --> F["Add Non‑tainted Node Kernels check"] + F --> G["Add RedHat Release check"] + G --> H["Add SELinux Enforcing check"] + H --> I["Add Hugepages not manually manipulated check"] + I --> J["Add Unaltered Startup Boot Params check"] + J --> K["Add Sysctl Configs check"] + K --> L["Add Service Mesh check"] + L --> M["Add OCP Lifecycle check"] + M --> N["Add Node Operating System check"] + N --> O["Add Pod HugePages (2Mi) check"] + O --> P["Add Pod HugePages (1Gi) check"] + P --> Q["Add Cluster Operator Health check"] + Q --> R["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> log_Debug + func_LoadChecks --> WithBeforeEachFn + func_LoadChecks --> checksdb_NewChecksGroup + func_LoadChecks --> checksdb_Check_Add + func_LoadChecks --> checksdb_WithCheckFn + func_LoadChecks --> checksdb_WithSkipCheckFn + func_LoadChecks --> identifiers_GetTestIDAndLabels + func_LoadChecks --> testhelper_GetNoBareMetalNodesSkipFn + func_LoadChecks --> testhelper_GetNonOCPClusterSkipFn + func_LoadChecks --> testhelper_GetDaemonSetFailedToSpawnSkipFn + func_LoadChecks --> testhelper_GetNoContainersUnderTestSkipFn + func_LoadChecks --> testhelper_GetNoIstioSkipFn + func_LoadChecks --> testhelper_GetNoPodsUnderTestSkipFn + func_LoadChecks --> testhelper_GetNoHugepagesPodsSkipFn + func_LoadChecks --> testHyperThreadingEnabled + func_LoadChecks --> testContainersFsDiff + func_LoadChecks --> testTainted + func_LoadChecks --> testIsRedHatRelease + func_LoadChecks --> testIsSELinuxEnforcing + func_LoadChecks --> testHugepages + func_LoadChecks --> testUnalteredBootParams + func_LoadChecks --> testSysctlConfigs + func_LoadChecks --> testServiceMesh + func_LoadChecks --> testOCPStatus + func_LoadChecks --> testNodeOperatingSystemStatus + func_LoadChecks --> testPodHugePagesSize + func_LoadChecks --> testClusterOperatorHealth +``` + +#### Functions calling `LoadChecks` (Mermaid) + +```mermaid +graph TD + certsuite_LoadInternalChecksDB --> func_LoadChecks +``` + +#### Usage example (Go) + +```go +// Minimal example invoking LoadChecks +func main() { + // Initialise the checks database (usually done during test setup) + platform.LoadChecks() +} +``` + +--- + +## Local Functions + +### testClusterOperatorHealth + +**testClusterOperatorHealth** - Determines whether every ClusterOperator on the cluster reports an `Available` status. If any operator is not available, the check fails and records non‑compliant objects. + +#### Signature (Go) + +```go +func (*checksdb.Check, *provider.TestEnvironment)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether every ClusterOperator on the cluster reports an `Available` status. If any operator is not available, the check fails and records non‑compliant objects. | +| **Parameters** | `check *checksdb.Check` – the test instance; `` unused.
`env *provider.TestEnvironment` – holds discovered ClusterOperator objects. | +| **Return value** | None (the result is stored via `SetResult`). | +| **Key dependencies** | • `log.LogInfo`
• `clusteroperator.IsClusterOperatorAvailable`
• `testhelper.NewClusterOperatorReportObject`
• `check.SetResult` | +| **Side effects** | • Writes log messages.
• Mutates the `Check` object by setting its result. | +| **How it fits the package** | Part of the platform test suite; executed when the “TestClusterOperatorHealth” check is loaded in `LoadChecks`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start["Start"] --> Loop{"for each ClusterOperator"} + Loop --> LogInfo["LogInfo: testing %q"] + LogInfo --> IsAvailable["IsClusterOperatorAvailable"] + IsAvailable -- true --> CompliantObj["Append compliant ReportObject"] + IsAvailable -- false --> NonCompliantObj["Append non‑compliant ReportObject"] + Loop --> End{"end loop"} + End --> SetResult["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testClusterOperatorHealth --> log_LogInfo + func_testClusterOperatorHealth --> clusteroperator_IsClusterOperatorAvailable + func_testClusterOperatorHealth --> testhelper_NewClusterOperatorReportObject + func_testClusterOperatorHealth --> check_SetResult +``` + +#### Functions calling `testClusterOperatorHealth` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testClusterOperatorHealth +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testClusterOperatorHealth +func ExampleTest() { + // Assume env is a populated TestEnvironment with ClusterOperators. + var check *checksdb.Check + var env provider.TestEnvironment + // The function mutates the check’s result internally. + testClusterOperatorHealth(check, &env) +} +``` + +--- + +### testContainersFsDiff + +**testContainersFsDiff** - Verifies each container in the environment has an unmodified file system by running a diff against a probe pod and records compliance status. + +#### Signature (Go) + +```go +func testContainersFsDiff(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies each container in the environment has an unmodified file system by running a diff against a probe pod and records compliance status. | +| **Parameters** | `check *checksdb.Check` – test context for logging and result storage.
`env *provider.TestEnvironment` – holds containers, probe pods, OpenShift version, etc. | +| **Return value** | None; results are stored via `check.SetResult`. | +| **Key dependencies** | • `clientsholder.NewContext`
• `cnffsdiff.NewFsDiffTester`
• `fsDiffTester.RunTest`, `GetResults`, fields `ChangedFolders`, `DeletedFolders`, `Error`
• `testhelper.NewContainerReportObject` and its methods (`AddField`)
• Logging helpers: `check.LogInfo`, `check.LogError` | +| **Side effects** | Creates report objects, logs information/errors, updates the test result in `check`. No external I/O beyond logging. | +| **How it fits the package** | Called by `LoadChecks` as one of many checks for the platform suite; contributes to overall compliance assessment. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate env.Containers"} + B --> C["Log container name"] + C --> D{"Probe pod exists?"} + D -- No --> E["Log error, create non‑compliant report"] + D -- Yes --> F{"Probe pod has containers?"} + F -- No --> G["Log error, create non‑compliant report"] + F -- Yes --> H["Create client context"] + H --> I["Instantiate FsDiffTester"] + I --> J["Run fs-diff on container UID"] + J --> K{"Result"} + K -- SUCCESS --> L["Log success, create compliant report"] + K -- FAILURE --> M["Log failure, add changed/deleted folders to report"] + K -- ERROR --> N["Log error, include err field in report"] + L & M & N --> O["Append to corresponding slice"] + O --> P{"Next container?"} + P -- Yes --> B + P -- No --> Q["Set final result via check.SetResult"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + testContainersFsDiff --> clientsholder.NewContext + testContainersFsDiff --> cnffsdiff.NewFsDiffTester + testContainersFsDiff --> fsDiffTester.RunTest + testContainersFsDiff --> fsDiffTester.GetResults + testContainersFsDiff --> testhelper.NewContainerReportObject + testContainersFsDiff --> check.LogInfo + testContainersFsDiff --> check.LogError +``` + +#### Functions calling `testContainersFsDiff` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testContainersFsDiff +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testContainersFsDiff +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/provider" +) + +func main() { + // Assume we have a populated Check and TestEnvironment + var check *checksdb.Check + var env *provider.TestEnvironment + + platform.testContainersFsDiff(check, env) +} +``` + +--- + +### testHugepages + +**testHugepages** - Verifies that each worker node’s hugepage settings are unchanged and reports compliance. + +#### Signature (Go) + +```go +func testHugepages(check *checksdb.Check, env *provider.TestEnvironment)() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies that each worker node’s hugepage settings are unchanged and reports compliance. | +| **Parameters** | `check *checksdb.Check` – the test check object; `
` `env *provider.TestEnvironment` – environment containing nodes, pods, and other context. | +| **Return value** | None (updates the `Check` result). | +| **Key dependencies** | • `log.LogInfo`, `log.LogError`
• `node.IsWorkerNode()`
• `env.ProbePods[nodeName]` lookup
• `hugepages.NewTester` and its `Run` method
• `testhelper.NewNodeReportObject` | +| **Side effects** | • Emits log messages.
• Appends compliant/non‑compliant `ReportObject`s to the check result. | +| **How it fits the package** | Part of the platform test suite; invoked by `LoadChecks()` for the “Hugepages not manually manipulated” check. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> IterateNodes["for each node in env.Nodes"] + IterateNodes --> CheckWorker["node.IsWorkerNode() ?"] + CheckWorker -- No --> LogInfo["Log non‑worker"] + CheckWorker -- Yes --> FindProbe["probePod, exist := env.ProbePods(nodeName)"] + FindProbe -- Not found --> LogError["Missing probe pod"] + FindProbe -- Found --> CreateTester["hugepages.NewTester(&node, probePod, clientsholder.GetClientsHolder())"] + CreateTester -- Error --> LogError["Tester creation failed"] + CreateTester -- Success --> RunTester["hpTester.Run()"] + RunTester -- Error --> LogError["Hugepages check failed"] + RunTester -- Success --> LogInfo["Node passed hugepages check"] + AllPaths --> AppendReport["append to compliant/non‑compliant list"] + AppendReport --> SetResult["check.SetResult(compliantObjects, nonCompliantObjects)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testHugepages --> func_LogInfo + func_testHugepages --> func_IsWorkerNode + func_testHugepages --> func_Append + func_testHugepages --> testhelper.NewNodeReportObject + func_testHugepages --> func_LogError + func_testHugepages --> hugepages.NewTester + func_testHugepages --> clientsholder.GetClientsHolder + func_testHugepages --> hugepages.Tester.Run +``` + +#### Functions calling `testHugepages` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testHugepages +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testHugepages +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/provider" +) + +func main() { + // Assume env and check are initialized elsewhere + var env *provider.TestEnvironment + var check *checksdb.Check + + platform.testHugepages(check, env) +} +``` + +--- + +### testHyperThreadingEnabled + +**testHyperThreadingEnabled** - Checks each bare‑metal node in the environment to determine whether hyper‑threading is enabled, logs the outcome, and records compliant/non‑compliant results. + +#### Signature (Go) + +```go +func testHyperThreadingEnabled(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks each bare‑metal node in the environment to determine whether hyper‑threading is enabled, logs the outcome, and records compliant/non‑compliant results. | +| **Parameters** | `check *checksdb.Check` – The check instance used for logging and result reporting.
`env *provider.TestEnvironment` – Test environment providing node information. | +| **Return value** | None (the function reports via `check.SetResult`). | +| **Key dependencies** | • `env.GetBaremetalNodes()`
• `node.IsHyperThreadNode(env)`
• `check.LogInfo` / `check.LogError`
• `testhelper.NewNodeReportObject`
• `check.SetResult` | +| **Side effects** | *Mutates the supplied `check` by calling `SetResult` with lists of compliant and non‑compliant report objects.
* Emits log entries via the check’s logger. | +| **How it fits the package** | Implements the “Hyper‑Threading Enabled” test within the platform test suite; invoked from `LoadChecks` during test registration. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Retrieve bare‑metal nodes"] --> B{"Iterate over nodes"} + B -->|"For each node"| C["Log “Testing node”"] + C --> D["Call IsHyperThreadNode(env)"] + D --> E{"Result"} + E -- enabled --> F["Create compliant report object"] + E -- error --> G["Create non‑compliant report object with error reason"] + E -- disabled --> H["Create non‑compliant report object"] + F & G & H --> I["Append to respective slice"] + I --> J["After loop: set check result"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testHyperThreadingEnabled --> func_GetBaremetalNodes + func_testHyperThreadingEnabled --> func_LogInfo + func_testHyperThreadingEnabled --> func_IsHyperThreadNode + func_testHyperThreadingEnabled --> func_NewNodeReportObject + func_testHyperThreadingEnabled --> func_LogError + func_testHyperThreadingEnabled --> func_SetResult +``` + +#### Functions calling `testHyperThreadingEnabled` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testHyperThreadingEnabled +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testHyperThreadingEnabled +func ExampleTest() { + // Assume env and check are already initialized + var env provider.TestEnvironment + var check checksdb.Check + + // Run the hyper‑threading check + testHyperThreadingEnabled(&check, &env) + + // Inspect results + fmt.Println("Compliant nodes:", len(check.CompliantObjects)) + fmt.Println("Non‑compliant nodes:", len(check.NonCompliantObjects)) +} +``` + +--- + +--- + +### testIsRedHatRelease + +**testIsRedHatRelease** - Validates each container in the test environment is built from a Red Hat Enterprise Linux base image. It records compliance or non‑compliance for reporting. + +#### Signature (Go) + +```go +func testIsRedHatRelease(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Validates each container in the test environment is built from a Red Hat Enterprise Linux base image. It records compliance or non‑compliance for reporting. | +| **Parameters** | `check *checksdb.Check` – The check instance used to log and store results.
`env *provider.TestEnvironment` – Holds metadata about containers under test. | +| **Return value** | None (void). Results are stored via `check.SetResult`. | +| **Key dependencies** | • `isredhat.NewBaseImageTester` – creates a tester for a container’s base image.
• `clientsholder.GetClientsHolder()` & `clientsholder.NewContext(...)` – provide Kubernetes client context.
• `testhelper.NewContainerReportObject` – constructs report objects.
• Logging methods on the check (`LogInfo`, `LogError`). | +| **Side effects** | • Logs information and errors to the check’s logger.
• Appends compliant/non‑compliant containers to result slices.
• Calls `check.SetResult` to store final lists. No external I/O or concurrency is performed. | +| **How it fits the package** | Part of the *platform* test suite, this function implements the logic for the “Is Red Hat Release” check (TestID: `TestIsRedHatReleaseIdentifier`). It is invoked by `LoadChecks` when registering the check. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over env.Containers"} + B -->|"For each container"| C["Log “Testing Container”"] + C --> D["Create BaseImageTester"] + D --> E["TestContainerIsRedHatRelease()"] + E --> F{"Result?"} + F -->|"true"| G["Log “passed”, add to compliantObjects"] + F -->|"false"| H["Log “failed”, add to nonCompliantObjects"] + G & H --> I["Continue loop"] + I --> J{"Loop finished?"} + J -->|"yes"| K["check.SetResult(compliant, nonCompliant)"] + K --> L["End"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testIsRedHatRelease --> func_LogInfo + func_testIsRedHatRelease --> func_NewBaseImageTester + func_testIsRedHatRelease --> func_GetClientsHolder + func_testIsRedHatRelease --> func_NewContext + func_testIsRedHatRelease --> func_TestContainerIsRedHatRelease + func_testIsRedHatRelease --> func_LogError + func_testIsRedHatRelease --> func_NewContainerReportObject + func_testIsRedHatRelease --> func_SetResult +``` + +#### Functions calling `testIsRedHatRelease` + +```mermaid +graph TD + func_LoadChecks --> func_testIsRedHatRelease +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testIsRedHatRelease +env := &provider.TestEnvironment{ + Containers: []*provider.Container{ + {Namespace: "default", Podname: "pod-1", Name: "app"}, + }, +} +check := checksdb.NewCheck(...) +testIsRedHatRelease(check, env) +``` + +--- + +### testIsSELinuxEnforcing + +**testIsSELinuxEnforcing** - Executes `getenforce` inside each probe pod to confirm SELinux is in *enforcing* state on the host node. Reports compliance per node and aggregates results into the test check. + +#### 1) Signature (Go) + +```go +func testIsSELinuxEnforcing(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes `getenforce` inside each probe pod to confirm SELinux is in *enforcing* state on the host node. Reports compliance per node and aggregates results into the test check. | +| **Parameters** | `check *checksdb.Check` – the test definition that receives results.
`env *provider.TestEnvironment` – contains the list of probe pods used to reach each node. | +| **Return value** | None; results are stored via `check.SetResult`. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()`
• `clientsholder.NewContext(...)`
• `o.ExecCommandContainer(ctx, cmd)`
• `testhelper.NewPodReportObject`, `testhelper.NewNodeReportObject`
• `check.LogError`, `check.LogInfo`, `check.SetResult` | +| **Side effects** | Executes a command inside each probe pod; logs errors and info; mutates the test check with compliance data. No external state is altered. | +| **How it fits the package** | Part of the platform tests suite, specifically the SELinux enforcement check that ensures cluster nodes run with correct security posture. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over probe pods"} + B --> C["Create exec context"] + C --> D["Execute `getenforce` in pod"] + D --> E{"Command succeeded?"} + E -- No --> F["Log error, record non‑compliant pod, increment nodesError"] + E -- Yes --> G{"Output == Enforcing\n?"} + G -- No --> H["Log node not enforcing, record non‑compliant node, increment nodesFailed"] + G -- Yes --> I["Record compliant node"] + F & H & I --> J["Continue loop"] + J --> K{"All pods processed?"} + K -- No --> B + K -- Yes --> L["Set result on check"] + L --> M["End"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_testIsSELinuxEnforcing --> clientsholder.GetClientsHolder + func_testIsSELinuxEnforcing --> clientsholder.NewContext + func_testIsSELinuxEnforcing --> ExecCommandContainer + func_testIsSELinuxEnforcing --> LogError + func_testIsSELinuxEnforcing --> LogInfo + func_testIsSELinuxEnforcing --> append + func_testIsSELinuxEnforcing --> testhelper.NewPodReportObject + func_testIsSELinuxEnforcing --> testhelper.NewNodeReportObject + func_testIsSELinuxEnforcing --> SetResult +``` + +#### 5) Functions calling `testIsSELinuxEnforcing` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testIsSELinuxEnforcing +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testIsSELinuxEnforcing +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/provider" +) + +// Assume `check` and `env` are already initialized. +func runExample() { + check := checksdb.NewCheck(...) + env := &provider.TestEnvironment{ProbePods: ...} + testIsSELinuxEnforcing(check, env) +} +``` + +--- + +### testNodeOperatingSystemStatus + +**testNodeOperatingSystemStatus** - Ensures every control‑plane node uses RHCOS or CentOS Stream CoreOS and that worker nodes use a supported OS (RHCOS, RHEL, or CSCC). Checks version compatibility against the OpenShift release. + +#### Signature (Go) + +```go +func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures every control‑plane node uses RHCOS or CentOS Stream CoreOS and that worker nodes use a supported OS (RHCOS, RHEL, or CSCC). Checks version compatibility against the OpenShift release. | +| **Parameters** | `check *checksdb.Check` – test context for logging and result handling.
`env *provider.TestEnvironment` – environment containing node list and OpenShift version. | +| **Return value** | None (results are stored via `check.SetResult`). | +| **Key dependencies** | *Logging*: `LogInfo`, `LogError`, `LogDebug`.
*Node checks*: `IsControlPlaneNode`, `IsWorkerNode`, `IsRHCOS`, `IsCSCOS`, `IsRHEL`.
*Version retrieval*: `GetRHCOSVersion`, `GetCSCOSVersion`, `GetRHELVersion`.
*Compatibility checks*: `compatibility.IsRHCOSCompatible`, `compatibility.IsRHELCompatible`.
*Reporting*: `testhelper.NewNodeReportObject`, `AddField`. | +| **Side effects** | Modifies the test result via `check.SetResult`; logs diagnostic messages; appends to internal slices of compliant/non‑compliant report objects. | +| **How it fits the package** | Implements the *TestNodeOperatingSystemIdentifier* check within the platform test suite, ensuring cluster nodes comply with Red Hat operating system requirements before proceeding with further tests. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Iterate over env.Nodes"] --> B{"node.IsControlPlaneNode()"} + B -- yes --> C{"node.IsRHCOS() || node.IsCSCOS()"} + C -- no --> D["Log error, record non‑compliant"] + C -- yes --> E["continue"] + B -- no --> F{"node.IsWorkerNode()"} + F -- yes --> G{"node.IsRHCOS()"} + G -- yes --> H["Get RHCOS version"] + H --> I{"error"} + I -- yes --> J["Log error, record non‑compliant"] + I -- no --> K{"shortVersion == NotFoundStr"} + K -- yes --> L["Skip node"] + K -- no --> M["Check compatibility via IsRHCOSCompatible"] + M -- incompatible --> N["Log error, record non‑compliant"] + M -- compatible --> O["Record compliant"] + G -- no --> P{"node.IsCSCOS()"} + P -- yes --> Q["Get CSCC version"] --> R["Log debug (unreleased)"] + P -- no --> S{"node.IsRHEL()"} + S -- yes --> T["Get RHEL version"] --> U{"error"} + U -- yes --> V["Log error, record non‑compliant"] + U -- no --> W["Check compatibility via IsRHELCompatible"] + W -- incompatible --> X["Log error, record non‑compliant"] + W -- compatible --> Y["Record compliant"] + S -- no --> Z["Log error, record non‑compliant"] + ... +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testNodeOperatingSystemStatus --> LogInfo + func_testNodeOperatingSystemStatus --> LogError + func_testNodeOperatingSystemStatus --> LogDebug + func_testNodeOperatingSystemStatus --> IsControlPlaneNode + func_testNodeOperatingSystemStatus --> IsWorkerNode + func_testNodeOperatingSystemStatus --> IsRHCOS + func_testNodeOperatingSystemStatus --> IsCSCOS + func_testNodeOperatingSystemStatus --> IsRHEL + func_testNodeOperatingSystemStatus --> GetRHCOSVersion + func_testNodeOperatingSystemStatus --> GetCSCOSVersion + func_testNodeOperatingSystemStatus --> GetRHELVersion + func_testNodeOperatingSystemStatus --> compatibility.IsRHCOSCompatible + func_testNodeOperatingSystemStatus --> compatibility.IsRHELCompatible + func_testNodeOperatingSystemStatus --> testhelper.NewNodeReportObject + func_testNodeOperatingSystemStatus --> AddField +``` + +#### Functions calling `testNodeOperatingSystemStatus` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testNodeOperatingSystemStatus +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testNodeOperatingSystemStatus +check := checksdb.NewCheck(...) +env := &provider.TestEnvironment{ /* populate Nodes and OpenshiftVersion */ } +testNodeOperatingSystemStatus(check, env) +// check.Result now contains compliant/non‑compliant node reports +``` + +--- + +### testOCPStatus + +**testOCPStatus** - Inspects `env.OCPStatus` to decide if the cluster’s OpenShift version is in end‑of‑life (EOL). Logs an appropriate message and creates a compliance report object. + +#### 1) Signature (Go) + +```go +func testOCPStatus(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Inspects `env.OCPStatus` to decide if the cluster’s OpenShift version is in end‑of‑life (EOL). Logs an appropriate message and creates a compliance report object. | +| **Parameters** | • `check *checksdb.Check` – test instance used for logging and setting results.
• `env *provider.TestEnvironment` – environment data containing the cluster’s OpenShift version and lifecycle status. | +| **Return value** | None (void). The function records its result via `check.SetResult`. | +| **Key dependencies** | • `LogError`, `LogInfo` on the check instance.
• `testhelper.NewClusterVersionReportObject` to build report objects.
• `SetResult` to store compliant/non‑compliant results. | +| **Side effects** | • Emits log messages.
• Mutates the check’s result state via `SetResult`. | +| **How it fits the package** | Part of the OpenShift platform test suite; called by `LoadChecks` when registering the “OCPLifecycle” check. It validates that a cluster is not running an EOL version. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"env.OCPStatus"} + B -- OCPStatusEOL --> C["LogError, set non‑compliant"] + B -- OCPStatusMS --> D["LogInfo"] + B -- OCPStatusGA --> D + B -- OCPStatusPreGA --> D + B -- default --> E["LogInfo"] + C --> F["Create non‑compliant report object"] + D --> G["Create compliant report object"] + E --> G + F & G --> H["check.SetResult(compliant, nonCompliant)"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_testOCPStatus --> func_LogError + func_testOCPStatus --> func_LogInfo + func_testOCPStatus --> testhelper.NewClusterVersionReportObject + func_testOCPStatus --> func_SetResult +``` + +#### 5) Functions calling `testOCPStatus` (Mermaid) + +```mermaid +graph TD + checksdb.NewCheck --> func_testOCPStatus +``` + +> *Note: The only caller in the package is the registration of the “OCPLifecycle” check inside `LoadChecks`.* + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testOCPStatus +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" +) + +func main() { + // Create a dummy check and environment + c := &checksdb.Check{} + env := &provider.TestEnvironment{ + OpenshiftVersion: "4.12", + OCPStatus: compatibility.OCPStatusGA, // assume GA for illustration + } + + // Run the status test + platform.testOCPStatus(c, env) + + // Results are now stored in c.Result +} +``` + +*The example demonstrates how to call `testOCPStatus` directly with a check instance and environment data.* + +--- + +### testPodHugePagesSize + +**testPodHugePagesSize** - Iterates over all huge‑pages pods in the test environment and verifies each pod’s resource requests/limits match the expected page size (`size`). Logs results and records compliant/non‑compliant objects. + +#### Signature (Go) + +```go +func testPodHugePagesSize(check *checksdb.Check, env *provider.TestEnvironment, size string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Iterates over all huge‑pages pods in the test environment and verifies each pod’s resource requests/limits match the expected page size (`size`). Logs results and records compliant/non‑compliant objects. | +| **Parameters** | `check *checksdb.Check` – test check context.
`env *provider.TestEnvironment` – execution environment providing huge‑pages pod list.
`size string` – expected huge‑pages size (e.g., `"2Mi"` or `"1Gi"`). | +| **Return value** | None. Side effects are recorded via `check.SetResult`. | +| **Key dependencies** | • `env.GetHugepagesPods()`
• `put.CheckResourceHugePagesSize(size)`
• `check.LogInfo`, `check.LogError`
• `testhelper.NewPodReportObject`
• `check.SetResult` | +| **Side effects** | Logs informational or error messages; builds slices of report objects and stores them in the check result. No external I/O beyond logging. | +| **How it fits the package** | Used by platform test suite to assert correct huge‑pages configuration for pods, supporting tests `TestPodHugePages2M` and `TestPodHugePages1G`. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate env.GetHugepagesPods()"} + B -->|"for each pod"| C["LogInfo “Testing Pod …”"] + C --> D["CheckResourceHugePagesSize(size)"] + D --> E{"Result"} + E -- false --> F["LogError “Pod … incorrect size”"] + F --> G["Append to nonCompliantObjects"] + E -- true --> H["LogInfo “Pod … correct size”"] + H --> I["Append to compliantObjects"] + I --> J["Continue loop"] + J --> B + B -- end --> K["SetResult(compliant, nonCompliant)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testPodHugePagesSize --> func_GetHugepagesPods + func_testPodHugePagesSize --> func_CheckResourceHugePagesSize + func_testPodHugePagesSize --> func_LogInfo + func_testPodHugePagesSize --> func_LogError + func_testPodHugePagesSize --> func_NewPodReportObject + func_testPodHugePagesSize --> func_SetResult +``` + +#### Functions calling `testPodHugePagesSize` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testPodHugePagesSize +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPodHugePagesSize +func ExampleTest() { + // Assume check and env are already created by the testing framework. + var check *checksdb.Check + var env *provider.TestEnvironment + + // Validate that all huge‑pages pods use 2Mi pages. + testPodHugePagesSize(check, env, provider.HugePages2Mi) +} +``` + +--- + +--- + +### testServiceMesh + +**testServiceMesh** - Verifies each pod has at least one Istio‑proxy container. Pods lacking the proxy are flagged as non‑compliant; those with it are marked compliant. + +#### Signature (Go) + +```go +func testServiceMesh(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Verifies each pod has at least one Istio‑proxy container. Pods lacking the proxy are flagged as non‑compliant; those with it are marked compliant. | +| **Parameters** | `check *checksdb.Check` – test context for logging and result reporting.
`env *provider.TestEnvironment` – runtime information, including the list of pods to inspect. | +| **Return value** | None – results are stored via `check.SetResult`. | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `Container.IsIstioProxy()`
• `testhelper.NewPodReportObject`
• `check.SetResult` | +| **Side effects** | Emits log messages; creates report objects; updates the check result. No external I/O or state mutation beyond the supplied structures. | +| **How it fits the package** | Implements the “Service Mesh” test in the platform suite, ensuring that an Istio installation is present on all pods when required. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate env.Pods"} + B -->|"for each pod"| C["Log pod name"] + C --> D{"Search containers"} + D -->|"found Istio proxy"| E["Mark compliant"] + D -->|"not found"| F["Mark non‑compliant"] + E --> G["Append to compliant list"] + F --> H["Append to non‑compliant list"] + G & H --> I["Continue loop"] + I --> J{"Loop finished?"} + J -- yes --> K["SetResult(compliant, nonCompliant)"] + K --> L["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testServiceMesh --> func_LogInfo + func_testServiceMesh --> func_IsIstioProxy + func_testServiceMesh --> func_LogError + func_testServiceMesh --> func_append + func_testServiceMesh --> func_NewPodReportObject + func_testServiceMesh --> func_SetResult +``` + +#### Functions calling `testServiceMesh` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testServiceMesh +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testServiceMesh +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/provider" +) + +func main() { + // Assume `c` and `env` are initialized appropriately. + var c *checksdb.Check + var env *provider.TestEnvironment + + platform.testServiceMesh(c, env) +} +``` + +--- + +--- + +### testSysctlConfigs + +**testSysctlConfigs** - Ensures that each node’s runtime sysctl settings match the corresponding kernel arguments defined in its machine config. Non‑compliant nodes are recorded for reporting. + +#### Signature (Go) + +```go +func testSysctlConfigs(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Ensures that each node’s runtime sysctl settings match the corresponding kernel arguments defined in its machine config. Non‑compliant nodes are recorded for reporting. | +| **Parameters** | `check *checksdb.Check` – test context used for logging and result aggregation.
`env *provider.TestEnvironment` – collection of cluster metadata, including containers, probe pods, and node configuration. | +| **Return value** | None; the function records results via `check.SetResult`. | +| **Key dependencies** | • `sysctlconfig.GetSysctlSettings` (executes `sysctl --system` inside a probe pod).
• `bootparams.GetMcKernelArguments` (extracts kernel arguments from machine config).
• `testhelper.NewNodeReportObject` (creates report entries).
• Logging helpers: `LogInfo`, `LogError`. | +| **Side effects** | • Logs informational and error messages.
• Modifies the internal slices of compliant/non‑compliant objects held by `check`. | +| **How it fits the package** | Implements the “TestSysctlConfigs” check registered in `LoadChecks`. It verifies kernel configuration consistency across nodes, a core part of platform integrity validation. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Iterate over all containers"] --> B{"First container on node?"} + B -- Yes --> C["Get probe pod"] + C --> D{"Probe pod exists?"} + D -- No --> E["Log error, add non‑compliant report"] + D -- Yes --> F["Retrieve sysctl settings"] + F --> G{"Error retrieving?"} + G -- Yes --> H["Log error, add non‑compliant report"] + G -- No --> I["Get kernel arguments map"] + I --> J["Compare each key/value pair"] + J --> K{"Mismatch found?"} + K -- Yes --> L["Log mismatch, add non‑compliant report, mark invalid"] + K -- No --> M["Continue comparison"] + J --> N{"All keys valid?"} + N -- Yes --> O["Mark node compliant, add report"] + N -- No --> P["Skip compliance for this node"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testSysctlConfigs --> func_GetSysctlSettings + func_testSysctlConfigs --> func_GetMcKernelArguments + func_testSysctlConfigs --> func_NewNodeReportObject + func_testSysctlConfigs --> func_LogInfo + func_testSysctlConfigs --> func_LogError +``` + +#### Functions calling `testSysctlConfigs` + +```mermaid +graph TD + func_LoadChecks --> func_testSysctlConfigs +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testSysctlConfigs +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform/provider" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" +) + +func example() { + // Assume env is prepared elsewhere with containers and probe pods + var env *provider.TestEnvironment + + // Create a dummy check context + check := checksdb.NewCheck(nil) // In real code, provide proper metadata + + platform.testSysctlConfigs(check, env) + + // Results are now available via check.Result() +} +``` + +--- + +### testTainted + +**testTainted** - Determines whether each node in the environment has kernel taints that are either unapproved or caused by modules not on an allow‑list. It records compliant and non‑compliant findings for reporting. + +#### Signature (Go) + +```go +func testTainted(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Determines whether each node in the environment has kernel taints that are either unapproved or caused by modules not on an allow‑list. It records compliant and non‑compliant findings for reporting. | +| **Parameters** | `check *checksdb.Check` – test harness object;
`env *provider.TestEnvironment` – runtime data (nodes, pods, config). | +| **Return value** | none (the function records results via `check.SetResult`). | +| **Key dependencies** | - `clientsholder.NewContext`, `nodetainted.NewNodeTaintedTester` for per‑node taint access.
- `GetKernelTaintsMask`, `GetTainterModules`, `DecodeKernelTaintsFromBitMask`, `DecodeKernelTaintsFromLetters`, `GetOtherTaintedBits`, `RemoveAllExceptNumbers`, `GetTaintMsg`.
- Logging helpers (`LogInfo`, `LogError`).
- Report constructors (`testhelper.NewNodeReportObject`, `testhelper.NewTaintReportObject`). | +| **Side effects** | Emits log messages; collects and sets test results via `check.SetResult`; may mutate slices of report objects but does not alter global state. | +| **How it fits the package** | This function is invoked by the platform test loader (`LoadChecks`) as part of the “Non‑Tainted Node Kernels” check, ensuring that cluster nodes do not contain unexpected kernel taints. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["testTainted"] --> B["Iterate env.Nodes"] + B --> C["Check workload deployed?"] + C -->|"yes"| D["Create probe context & tester"] + D --> E["Retrieve taints mask"] + E --> F{"mask==0"} + F -->|"yes"| G["Mark node compliant"] + F -->|"no"| H["Decode mask to taint list"] + H --> I["Check allow‑list presence"] + I -->|"none"| J["Log error, mark non‑compliant"] + I -->|"present"| K["Get tainter modules"] + K --> L{"error"} + L -->|"yes"| M["Mark node non‑compliant, record error"] + L -->|"no"| N["Process each module’s taints"] + N --> O["Log errors per taint"] + N --> P["Set compliantNode=false"] + N --> Q["Collect other kernel taints"] + Q --> R["Log errors for unmodule taints"] + R --> S["Set compliantNode=false"] + P --> T{"compliantNode"} + T -->|"yes"| U["Mark node compliant"] + T -->|"no"| V["Continue next node"] + U & V --> W["Finish loop"] + W --> X["Check errNodes, badModules, otherTaints"] + X --> Y["Log aggregated errors"] + Y --> Z["check.SetResult(compliantObjects, nonCompliantObjects)"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_testTainted --> func_LogInfo + func_testTainted --> func_HasWorkloadDeployed + func_testTainted --> func_NewContext + func_testTainted --> func_NewNodeTaintedTester + func_testTainted --> func_GetKernelTaintsMask + func_testTainted --> func_DecodeKernelTaintsFromBitMask + func_testTainted --> func_GetTainterModules + func_testTainted --> func_RemoveAllExceptNumbers + func_testTainted --> func_EncodeKernelTaintsFromLetters + func_testTainted --> func_GetOtherTaintedBits + func_testTainted --> func_GetTaintMsg + func_testTainted --> func_LogError + func_testTainted --> func_AddField + func_testTainted --> func_NewNodeReportObject + func_testTainted --> func_NewTaintReportObject + func_testTainted --> func_SetResult +``` + +#### Functions calling `testTainted` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testTainted +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testTainted +func ExampleTestTainted() { + // Setup a mock environment and check object. + env := &provider.TestEnvironment{ /* populate Nodes, Pods, Config */ } + chk := checksdb.NewCheck("example-test") + + // Execute the taint verification logic. + testTainted(chk, env) + + // Inspect results (for demonstration purposes). + fmt.Println("Compliant:", len(chk.CompliantObjects)) + fmt.Println("Non‑compliant:", len(chk.NonCompliantObjects)) +} +``` + +--- + +### testUnalteredBootParams + +**testUnalteredBootParams** - For every distinct node in the test environment, it runs `bootparams.TestBootParamsHelper` to ensure that kernel command‑line arguments are unchanged from their configured MachineConfig values. It records compliant and non‑compliant nodes for reporting. + +#### Signature (Go) + +```go +func testUnalteredBootParams(check *checksdb.Check, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | For every distinct node in the test environment, it runs `bootparams.TestBootParamsHelper` to ensure that kernel command‑line arguments are unchanged from their configured MachineConfig values. It records compliant and non‑compliant nodes for reporting. | +| **Parameters** | `check *checksdb.Check` – the check context used for logging and result aggregation.
`env *provider.TestEnvironment` – contains containers, probe pods, and helper methods needed to query node state. | +| **Return value** | None (the function updates the check’s result via `SetResult`). | +| **Key dependencies** | • `check.LogInfo`, `check.LogError`
• `bootparams.TestBootParamsHelper`
• `check.GetLogger()`
• `testhelper.NewNodeReportObject`
• `check.SetResult` | +| **Side effects** | • Logs information and errors.
• Creates report objects for compliant/non‑compliant nodes.
• Marks nodes as checked to avoid duplicate processing. | +| **How it fits the package** | It is one of the platform‑alteration tests loaded by `LoadChecks`. It ensures boot parameter integrity across all nodes in an OCP cluster. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CheckContainers + CheckContainers -->|"For each container"| NodeLoop + NodeLoop --> DedupCheck + DedupCheck -->|"Not checked yet"| CallBootParamsHelper + DedupCheck -->|"Already checked"| SkipNode + CallBootParamsHelper -->|"Error"| RecordNonCompliant + CallBootParamsHelper -->|"Success"| RecordCompliant + RecordNonCompliant --> SetResult + RecordCompliant --> SetResult + SetResult --> End +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + testUnalteredBootParams --> LogInfo + testUnalteredBootParams --> TestBootParamsHelper + testUnalteredBootParams --> GetLogger + testUnalteredBootParams --> LogError + testUnalteredBootParams --> NewNodeReportObject + testUnalteredBootParams --> SetResult +``` + +#### Functions calling `testUnalteredBootParams` (Mermaid) + +```mermaid +graph TD + LoadChecks --> testUnalteredBootParams +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testUnalteredBootParams +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/platform" + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/provider" +) + +func main() { + check := checksdb.NewCheck("testUnalteredBootParams") + env := provider.NewTestEnvironment() + platform.testUnalteredBootParams(check, env) +} +``` + +--- diff --git a/docs/tests/platform/sysctlconfig/sysctlconfig.md b/docs/tests/platform/sysctlconfig/sysctlconfig.md new file mode 100644 index 000000000..a0b69df1f --- /dev/null +++ b/docs/tests/platform/sysctlconfig/sysctlconfig.md @@ -0,0 +1,192 @@ +# Package sysctlconfig + +**Path**: `tests/platform/sysctlconfig` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [GetSysctlSettings](#getsysctlsettings) +- [Local Functions](#local-functions) + - [parseSysctlSystemOutput](#parsesysctlsystemoutput) + +## Overview + +The sysctlconfig package provides utilities for retrieving and parsing kernel sysctl settings from a Kubernetes node via a probe pod, enabling tests to verify system configuration. + +### Key Features + +- Executes `sysctl --system` inside a probe pod and returns the output as a map of key‑value pairs +- Parses multiline sysctl output while filtering comments and malformed lines +- Integrates with CertSuite’s client holder for pod communication + +### Design Notes + +- Assumes the test environment can run privileged pods to execute sysctl commands +- Relies on regex pattern matching; non‑matching lines are silently ignored +- Best practice: call GetSysctlSettings early in a platform test to capture baseline configuration + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func GetSysctlSettings(env *provider.TestEnvironment, nodeName string) (map[string]string, error)](#getsysctlsettings) | Executes `sysctl --system` inside a probe pod to collect kernel sysctl settings for a specific node and returns them as a key‑value map. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func parseSysctlSystemOutput(string) map[string]string](#parsesysctlsystemoutput) | Parses the multiline string returned by `sysctl --system` and builds a key‑value map of sysctl settings. Lines that are comments or do not match the expected pattern are ignored. | + +## Exported Functions + +### GetSysctlSettings + +**GetSysctlSettings** - Executes `sysctl --system` inside a probe pod to collect kernel sysctl settings for a specific node and returns them as a key‑value map. + +#### 1) Signature (Go) + +```go +func GetSysctlSettings(env *provider.TestEnvironment, nodeName string) (map[string]string, error) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes `sysctl --system` inside a probe pod to collect kernel sysctl settings for a specific node and returns them as a key‑value map. | +| **Parameters** | `env *provider.TestEnvironment –` test environment containing probe pods.
`nodeName string –` name of the target node. | +| **Return value** | ` –` mapping from sysctl keys to their current values.
`error –` non‑nil if command execution fails or output parsing encounters an issue. | +| **Key dependencies** | • `clientsholder.GetClientsHolder()` – obtains Kubernetes client holder.
• `clientsholder.NewContext(...)` – creates exec context for the probe pod.
• `ExecCommandContainer(ctx, cmd)` – runs the command inside the container.
• `parseSysctlSystemOutput(outStr)` – parses raw sysctl output into a map.
• `fmt.Errorf` – formats error messages. | +| **Side effects** | No state mutation; performs I/O by executing a shell command inside a pod and reads its stdout/stderr. | +| **How it fits the package** | Provides core functionality for sysctl configuration checks, enabling tests to compare runtime settings against expected machine‑config values. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["GetClientsHolder"] --> B["NewContext"] + B --> C["ExecCommandContainer"] + C --> D{"Success?"} + D -- yes --> E["parseSysctlSystemOutput"] + D -- no --> F["Return error"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetSysctlSettings --> func_GetClientsHolder + func_GetSysctlSettings --> func_NewContext + func_GetSysctlSettings --> func_ExecCommandContainer + func_GetSysctlSettings --> func_parseSysctlSystemOutput + func_GetSysctlSettings --> fmt_Errorf +``` + +#### 5) Functions calling `GetSysctlSettings` (Mermaid) + +```mermaid +graph TD + func_testSysctlConfigs --> func_GetSysctlSettings +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking GetSysctlSettings +env := &provider.TestEnvironment{ /* initialized elsewhere */ } +nodeName := "worker-1" + +settings, err := sysctlconfig.GetSysctlSettings(env, nodeName) +if err != nil { + log.Fatalf("Failed to get sysctl settings: %v", err) +} + +for k, v := range settings { + fmt.Printf("%s = %s\n", k, v) +} +``` + +--- + +## Local Functions + +### parseSysctlSystemOutput + +**parseSysctlSystemOutput** - Parses the multiline string returned by `sysctl --system` and builds a key‑value map of sysctl settings. Lines that are comments or do not match the expected pattern are ignored. + +Creates a map of sysctl keys to their values from the output of `sysctl --system`. + +#### Signature (Go) + +```go +func parseSysctlSystemOutput(string) map[string]string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Parses the multiline string returned by `sysctl --system` and builds a key‑value map of sysctl settings. Lines that are comments or do not match the expected pattern are ignored. | +| **Parameters** | `sysctlSystemOutput string –` raw output from `sysctl --system`. | +| **Return value** | `map[string]string` – mapping of sysctl keys to their corresponding values. | +| **Key dependencies** | • `make` (to create the map)
• `strings.Split` and `strings.HasPrefix` (to iterate lines)
• `regexp.MustCompile`, `MatchString`, `FindStringSubmatch` (regex parsing of key‑value pairs). | +| **Side effects** | None – purely functional. No I/O, no state mutation outside the returned map. | +| **How it fits the package** | Utility used by `GetSysctlSettings` to interpret command output and expose sysctl configuration as a Go data structure for further analysis or validation. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Split output into lines"} + B --> C{"For each line"} + C --> D{"Is line a comment?"} + D -- Yes --> E["Skip"] + D -- No --> F{"Matches key=val pattern?"} + F -- No --> G["Continue to next line"] + F -- Yes --> H["Extract key and value via regex"] + H --> I["Store in map"] + I --> J["End loop"] + J --> K["Return map"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_parseSysctlSystemOutput --> make + func_parseSysctlSystemOutput --> strings.Split + func_parseSysctlSystemOutput --> strings.HasPrefix + func_parseSysctlSystemOutput --> regexp.MustCompile + func_parseSysctlSystemOutput --> MatchString + func_parseSysctlSystemOutput --> FindStringSubmatch +``` + +#### Functions calling `parseSysctlSystemOutput` (Mermaid) + +```mermaid +graph TD + GetSysctlSettings --> parseSysctlSystemOutput +``` + +#### Usage example (Go) + +```go +// Minimal example invoking parseSysctlSystemOutput +package main + +import ( + "fmt" +) + +func main() { + raw := ` +kernel.yama.ptrace_scope = 0 +*some comment line* +net.ipv4.ip_forward = 1` + settings := parseSysctlSystemOutput(raw) + fmt.Println(settings) // map[net.ipv4.ip_forward:1 kernel.yama.ptrace_scope:0] +} +``` + +--- diff --git a/docs/tests/preflight/preflight.md b/docs/tests/preflight/preflight.md new file mode 100644 index 000000000..961de784d --- /dev/null +++ b/docs/tests/preflight/preflight.md @@ -0,0 +1,745 @@ +# Package preflight + +**Path**: `tests/preflight` + +## Table of Contents + +- [Overview](#overview) +- [Exported Functions](#exported-functions) + - [LoadChecks](#loadchecks) + - [ShouldRun](#shouldrun) +- [Local Functions](#local-functions) + - [generatePreflightContainerCnfCertTest](#generatepreflightcontainercnfcerttest) + - [generatePreflightOperatorCnfCertTest](#generatepreflightoperatorcnfcerttest) + - [getUniqueTestEntriesFromContainerResults](#getuniquetestentriesfromcontainerresults) + - [getUniqueTestEntriesFromOperatorResults](#getuniquetestentriesfromoperatorresults) + - [labelsAllowTestRun](#labelsallowtestrun) + - [testPreflightContainers](#testpreflightcontainers) + - [testPreflightOperators](#testpreflightoperators) + +## Overview + +The preflight package orchestrates security scans for containers and operators in a test environment, converting the results into CNF‑certified checks that can be reported and evaluated. + +### Key Features + +- Runs container and operator pre‑flight diagnostics while caching image results to avoid duplicate work +- Aggregates unique test entries across all subjects and registers them as checks in a checks group +- Provides helper logic for determining if tests should run based on environment labels and configuration + +### Design Notes + +- Preflight execution is gated by ShouldRun, which inspects both environment state and label filters before any scans are performed +- Results are aggregated into maps keyed by test name to ensure each distinct test appears only once in the catalog +- Checks are added via a ChecksGroup; best practice is to create a new group per suite run so that results remain isolated + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func LoadChecks()](#loadchecks) | Initiates the pre‑flight test suite, retrieves the test environment, creates a checks group for *Preflight*, runs container and operator tests, and logs progress. | +| [func ShouldRun(labelsExpr string) bool](#shouldrun) | Returns `true` if the current environment and labels permit running preflight checks. It prevents unnecessary execution when no relevant tags are present or required configuration is missing. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func generatePreflightContainerCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, containers []*provider.Container)](#generatepreflightcontainercnfcerttest) | Creates a catalog entry for a specific pre‑flight test and registers a check that evaluates the results of that test across all supplied container objects. | +| [func generatePreflightOperatorCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, operators []*provider.Operator) {}](#generatepreflightoperatorcnfcerttest) | Registers a CNF pre‑flight test into the results catalog and adds a corresponding check that evaluates each operator’s pre‑flight outcomes for that specific test. | +| [func getUniqueTestEntriesFromContainerResults(containers []*provider.Container) map[string]provider.PreflightTest](#getuniquetestentriesfromcontainerresults) | Builds a map keyed by test name containing the first encountered `PreflightTest` result for each unique test across all provided containers. | +| [func getUniqueTestEntriesFromOperatorResults(operators []*provider.Operator) map[string]provider.PreflightTest](#getuniquetestentriesfromoperatorresults) | Aggregates all pre‑flight test entries (`Passed`, `Failed`, `Errors`) from a slice of operators into a single map keyed by test name, ensuring each test appears only once. | +| [func(labelsFilter string, allowedLabels []string) bool](#labelsallowtestrun) | Checks whether any of the *allowed* labels appear within the supplied label filter string. If at least one match is found, it permits the test run. | +| [func testPreflightContainers(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment)](#testpreflightcontainers) | Executes Preflight security scans for every container in the supplied `TestEnvironment`, caches image results to avoid duplicate work, logs progress, and converts each unique test result into a CNF‑certified check within the provided checks group. | +| [func testPreflightOperators(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment)](#testpreflightoperators) | Executes pre‑flight diagnostics on every operator present in `env.Operators`, records the results into the checks group, and generates CNF‑certification tests for each unique pre‑flight test discovered. | + +## Exported Functions + +### LoadChecks + +**LoadChecks** - Initiates the pre‑flight test suite, retrieves the test environment, creates a checks group for *Preflight*, runs container and operator tests, and logs progress. + +#### Signature (Go) + +```go +func LoadChecks() +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Initiates the pre‑flight test suite, retrieves the test environment, creates a checks group for *Preflight*, runs container and operator tests, and logs progress. | +| **Parameters** | None | +| **Return value** | None (side effects only) | +| **Key dependencies** | `log.Debug`, `provider.GetTestEnvironment`, `checksdb.NewChecksGroup`, `WithBeforeEachFn`, `testPreflightContainers`, `provider.IsOCPCluster`, `log.Info`, `testPreflightOperators` | +| **Side effects** | Writes log entries, mutates global variable `env`, registers checks in the database via `NewChecksGroup`. | +| **How it fits the package** | Acts as the entry point for the pre‑flight test suite; called by higher‑level test orchestration (`LoadChecksDB`). | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start LoadChecks"] --> B["log.Debug"] + B --> C["get Test Environment via provider.GetTestEnvironment"] + C --> D["Create ChecksGroup with NewChecksGroup"] + D --> E["Attach beforeEachFn via WithBeforeEachFn"] + E --> F["testPreflightContainers"] + F --> G{"IsOCPCluster?"} + G -- Yes --> H["testPreflightOperators"] + G -- No --> I["Log skip message"] + H & I --> J["End LoadChecks"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_Log_Debug + func_LoadChecks --> func_Provider_GetTestEnvironment + func_LoadChecks --> func_Checksdb_NewChecksGroup + func_LoadChecks --> func_WithBeforeEachFn + func_LoadChecks --> func_testPreflightContainers + func_LoadChecks --> func_Provider_IsOCPCluster + func_LoadChecks --> func_Log_Info + func_LoadChecks --> func_testPreflightOperators +``` + +#### Functions calling `LoadChecks` (Mermaid) + +```mermaid +graph TD + func_Certsuite_LoadChecksDB --> func_LoadChecks +``` + +#### Usage example (Go) + +```go +// Minimal example invoking LoadChecks +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight" +) + +func main() { + // Trigger the pre‑flight test suite + preflight.LoadChecks() +} +``` + +--- + +### ShouldRun + +**ShouldRun** - Returns `true` if the current environment and labels permit running preflight checks. It prevents unnecessary execution when no relevant tags are present or required configuration is missing. + +Determines whether the preflight test suite should be executed for a given label expression. + +--- + +#### Signature (Go) + +```go +func ShouldRun(labelsExpr string) bool +``` + +--- + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Returns `true` if the current environment and labels permit running preflight checks. It prevents unnecessary execution when no relevant tags are present or required configuration is missing. | +| **Parameters** | `labelsExpr string –` label expression supplied by the caller (e.g., a comma‑separated list of test tags). | +| **Return value** | `bool –` `true` if preflight tests should run, otherwise `false`. | +| **Key dependencies** | • `provider.GetTestEnvironment()`
• `labelsAllowTestRun(labelsExpr, allowedLabels)`
• `configuration.GetTestParameters().PfltDockerconfig`
• `log.Warn(...)` | +| **Side effects** | Sets the global test environment flag `SkipPreflight` to `true` when Docker config is missing; logs a warning. No other mutable state is altered. | +| **How it fits the package** | Used by `certsuite.LoadChecksDB` to decide whether to load and run preflight checks, thereby optimizing startup time and avoiding unnecessary work. | + +--- + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Get test environment"] --> B{"Check allowed labels"} + B -- false --> C["Return false"] + B -- true --> D["Retrieve Docker config path"] + D -- missing or NA --> E["Log warning & set SkipPreflight"] + D -- present --> F["Return true"] +``` + +--- + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_ShouldRun --> func_GetTestEnvironment + func_ShouldRun --> func_labelsAllowTestRun + func_ShouldRun --> func_GetTestParameters + func_ShouldRun --> func_Warn +``` + +--- + +#### Functions calling `ShouldRun` (Mermaid) + +```mermaid +graph TD + func_LoadChecksDB --> func_ShouldRun +``` + +--- + +#### Usage example (Go) + +```go +// Minimal example invoking ShouldRun +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight" +) + +func main() { + labels := "preflight,security" // example label expression + if preflight.ShouldRun(labels) { + fmt.Println("Preflight checks will run.") + } else { + fmt.Println("Preflight checks skipped.") + } +} +``` + +--- + +--- + +## Local Functions + +### generatePreflightContainerCnfCertTest + +**generatePreflightContainerCnfCertTest** - Creates a catalog entry for a specific pre‑flight test and registers a check that evaluates the results of that test across all supplied container objects. + +#### Signature (Go) + +```go +func generatePreflightContainerCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, containers []*provider.Container) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Creates a catalog entry for a specific pre‑flight test and registers a check that evaluates the results of that test across all supplied container objects. | +| **Parameters** | `checksGroup *checksdb.ChecksGroup` – group to which the check will be added.
`testName string` – name of the pre‑flight test.
`description string` – human readable description.
`remediation string` – suggested remediation action.
`containers []*provider.Container` – list of containers whose pre‑flight results are evaluated. | +| **Return value** | None (void). | +| **Key dependencies** | `identifiers.AddCatalogEntry`, `identifiers.GetTestIDAndLabels`, `testhelper.GetNoContainersUnderTestSkipFn`, `testhelper.NewContainerReportObject`, `fmt.Sprintf`, `check.SetResult` and several logging helpers. | +| **Side effects** | Modifies the supplied `checksGroup` by adding a new check; writes to internal catalog maps via `AddCatalogEntry`; produces log output for each container processed. No external I/O. | +| **How it fits the package** | Called from `testPreflightContainers` to translate per‑container pre‑flight results into the generic test framework used by CertSuite. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Create catalog entry via identifiers.AddCatalogEntry"] + B --> C["Add new check to checksGroup using checksdb.NewCheck"] + C --> D["Attach SkipCheckFn from testhelper.GetNoContainersUnderTestSkipFn"] + D --> E["Define CheckFn that iterates containers and pre‑flight results"] + E --> F["Populate compliantObjects / nonCompliantObjects"] + F --> G["SetResult on the check"] + G --> H["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_generatePreflightContainerCnfCertTest --> identifiers.AddCatalogEntry + func_generatePreflightContainerCnfCertTest --> checksdb.NewCheck + func_generatePreflightContainerCnfCertTest --> testhelper.GetNoContainersUnderTestSkipFn + func_generatePreflightContainerCnfCertTest --> testhelper.NewContainerReportObject + func_generatePreflightContainerCnfCertTest --> fmt.Sprintf + func_generatePreflightContainerCnfCertTest --> check.SetResult +``` + +#### Functions calling `generatePreflightContainerCnfCertTest` (Mermaid) + +```mermaid +graph TD + func_testPreflightContainers --> func_generatePreflightContainerCnfCertTest +``` + +#### Usage example (Go) + +```go +// Minimal example invoking generatePreflightContainerCnfCertTest +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight" + // assume provider.Container is defined elsewhere +) + +func main() { + checksGroup := &checksdb.ChecksGroup{} + containers := []*provider.Container{ /* populated container objects */ } + + preflight.generatePreflightContainerCnfCertTest( + checksGroup, + "ExampleTest", + "Checks that the image contains a non‑root user", + "Run `preflight run --tests=container.user` to verify", + containers, + ) +} +``` + +--- + +### generatePreflightOperatorCnfCertTest + +**generatePreflightOperatorCnfCertTest** - Registers a CNF pre‑flight test into the results catalog and adds a corresponding check that evaluates each operator’s pre‑flight outcomes for that specific test. + +#### Signature (Go) + +```go +func generatePreflightOperatorCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, operators []*provider.Operator) {} +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Registers a CNF pre‑flight test into the results catalog and adds a corresponding check that evaluates each operator’s pre‑flight outcomes for that specific test. | +| **Parameters** | `checksGroup *checksdb.ChecksGroup` – container of checks.
`testName string` – unique identifier of the test.
`description string` – human‑readable description.
`remediation string` – suggested fix.
`operators []*provider.Operator` – list of operators to evaluate. | +| **Return value** | none (void) | +| **Key dependencies** | • `identifiers.AddCatalogEntry`
• `checksdb.NewCheck` & its builder methods (`WithSkipCheckFn`, `WithCheckFn`)
• `identifiers.GetTestIDAndLabels`
• `testhelper.GetNoOperatorsSkipFn`
• Logging helpers (`LogInfo`, `LogError`)
• `testhelper.NewOperatorReportObject`
• `fmt.Sprintf`
• `Check.SetResult` | +| **Side effects** | Adds a catalog entry, creates and registers a new check in `checksGroup`, logs information or errors during evaluation, and records compliance results. | +| **How it fits the package** | Implements the core logic that translates operator pre‑flight outcomes into structured test cases used by the CNF testing framework. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Add catalog entry"] --> B["Create new check"] + B --> C["Set skip function"] + B --> D["Define check function"] + D --> E{"For each operator"} + E --> F{"Check Passed results"} + E --> G{"Check Failed results"} + E --> H{"Check Error results"} + F --> I["LogInfo & append compliant object"] + G --> J["LogError & append non‑compliant object"] + H --> K["LogError with fmt.Sprintf & append non‑compliant object"] + I & J & K --> L["SetResult on check"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_generatePreflightOperatorCnfCertTest --> identifiers.AddCatalogEntry + func_generatePreflightOperatorCnfCertTest --> checksdb.NewCheck + func_generatePreflightOperatorCnfCertTest --> identifiers.GetTestIDAndLabels + func_generatePreflightOperatorCnfCertTest --> testhelper.GetNoOperatorsSkipFn + func_generatePreflightOperatorCnfCertTest --> testhelper.LogInfo + func_generatePreflightOperatorCnfCertTest --> testhelper.LogError + func_generatePreflightOperatorCnfCertTest --> testhelper.NewOperatorReportObject + func_generatePreflightOperatorCnfCertTest --> fmt.Sprintf + func_generatePreflightOperatorCnfCertTest --> checksdb.Check.SetResult +``` + +#### Functions calling `generatePreflightOperatorCnfCertTest` (Mermaid) + +```mermaid +graph TD + testPreflightOperators --> generatePreflightOperatorCnfCertTest +``` + +#### Usage example (Go) + +```go +// Minimal example invoking generatePreflightOperatorCnfCertTest +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight" +) + +func main() { + var checksGroup *checksdb.ChecksGroup // assume initialized elsewhere + testName := "Operator-ConfigValidation" + description := "Checks operator configuration validity." + remediation := "Verify operator config files." + + // Example operators slice (normally obtained from the environment) + var operators []*provider.Operator + + preflight.generatePreflightOperatorCnfCertTest(checksGroup, testName, description, remediation, operators) +} +``` + +--- + +### getUniqueTestEntriesFromContainerResults + +**getUniqueTestEntriesFromContainerResults** - Builds a map keyed by test name containing the first encountered `PreflightTest` result for each unique test across all provided containers. + +#### Signature (Go) + +```go +func getUniqueTestEntriesFromContainerResults(containers []*provider.Container) map[string]provider.PreflightTest +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a map keyed by test name containing the first encountered `PreflightTest` result for each unique test across all provided containers. | +| **Parameters** | *containers* `[]*provider.Container` – list of container objects to inspect. | +| **Return value** | `map[string]provider.PreflightTest` – a lookup of test names to their corresponding `PreflightTest` entry (passed, failed or error). | +| **Key dependencies** | • `make(map[string]provider.PreflightTest)`
• Iteration over `cut.PreflightResults.Passed`, `.Failed`, and `.Errors`. | +| **Side effects** | None – pure function; no mutation of input slices or external state. | +| **How it fits the package** | Used by `testPreflightContainers` to deduplicate test entries when generating container‑based pre‑flight certificates, ensuring each distinct test is processed once even if multiple containers share the same image. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> InitializeMap["Create empty map"] + InitializeMap --> LoopContainers{"For each container"} + LoopContainers --> CheckPassed["Iterate Passed results"] + CheckPassed --> StorePassed["Store in map by Name"] + StorePassed --> CheckFailed{"Next: Failed results"} + CheckFailed --> StoreFailed["Store in map by Name"] + StoreFailed --> CheckErrors{"Next: Error results"} + CheckErrors --> StoreErrors["Store in map by Name"] + StoreErrors --> EndLoop + EndLoop --> ReturnMap["Return constructed map"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getUniqueTestEntriesFromContainerResults --> make +``` + +#### Functions calling `getUniqueTestEntriesFromContainerResults` (Mermaid) + +```mermaid +graph TD + testPreflightContainers --> getUniqueTestEntriesFromContainerResults +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getUniqueTestEntriesFromContainerResults +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight" +) + +func main() { + var containers []*provider.Container // assume populated elsewhere + + testMap := preflight.getUniqueTestEntriesFromContainerResults(containers) + for name, test := range testMap { + fmt.Printf("Test %q: %+v\n", name, test) + } +} +``` + +--- + +### getUniqueTestEntriesFromOperatorResults + +**getUniqueTestEntriesFromOperatorResults** - Aggregates all pre‑flight test entries (`Passed`, `Failed`, `Errors`) from a slice of operators into a single map keyed by test name, ensuring each test appears only once. + +#### Signature (Go) + +```go +func getUniqueTestEntriesFromOperatorResults(operators []*provider.Operator) map[string]provider.PreflightTest +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Aggregates all pre‑flight test entries (`Passed`, `Failed`, `Errors`) from a slice of operators into a single map keyed by test name, ensuring each test appears only once. | +| **Parameters** | `operators []*provider.Operator` – list of operator instances whose pre‑flight results are to be examined. | +| **Return value** | `map[string]provider.PreflightTest` – mapping from test names to their most recent `PreflightTest` entry (from any operator). | +| **Key dependencies** | • `make(map[string]provider.PreflightTest)`
• Iteration over `op.PreflightResults.Passed`, `.Failed`, `.Errors`. | +| **Side effects** | None; purely functional. | +| **How it fits the package** | Used by higher‑level test orchestration to collate operator‑specific pre‑flight results before generating consolidated certificates or logs. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"Iterate over operators"} + B -->|"for each op"| C["Collect Passed tests"] + C --> D["Store in map by name"] + B -->|"continue"| E["Collect Failed tests"] + E --> F["Store/overwrite in map"] + B -->|"continue"| G["Collect Errors"] + G --> H["Store/overwrite in map"] + H --> I["Return aggregated map"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_getUniqueTestEntriesFromOperatorResults --> make +``` + +#### Functions calling `getUniqueTestEntriesFromOperatorResults` (Mermaid) + +```mermaid +graph TD + func_testPreflightOperators --> func_getUniqueTestEntriesFromOperatorResults +``` + +#### Usage example (Go) + +```go +// Minimal example invoking getUniqueTestEntriesFromOperatorResults +operators := []*provider.Operator{op1, op2, op3} +uniqueTests := getUniqueTestEntriesFromOperatorResults(operators) + +for name, test := range uniqueTests { + fmt.Printf("Test %s: %v\n", name, test) +} +``` + +--- + +### labelsAllowTestRun + +**labelsAllowTestRun** - Checks whether any of the *allowed* labels appear within the supplied label filter string. If at least one match is found, it permits the test run. + +#### Signature (Go) + +```go +func(labelsFilter string, allowedLabels []string) bool +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Checks whether any of the *allowed* labels appear within the supplied label filter string. If at least one match is found, it permits the test run. | +| **Parameters** | `labelsFilter` – a comma‑separated or space‑separated string representing labels applied to a test.
`allowedLabels` – slice of strings containing labels that grant permission to execute the suite. | +| **Return value** | `true` if any allowed label is present in `labelsFilter`; otherwise `false`. | +| **Key dependencies** | • `strings.Contains` from the standard library.
• No external packages are imported beyond `strings`. | +| **Side effects** | None – purely functional; no state mutation, I/O, or concurrency. | +| **How it fits the package** | Used by the internal `ShouldRun` helper to gate execution of preflight tests based on environmental labeling conventions. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> CheckEachLabel + CheckEachLabel -->|"Match Found"| ReturnTrue + CheckEachLabel -->|"No Match"| ReturnFalse +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_labelsAllowTestRun --> strings.Contains +``` + +#### Functions calling `labelsAllowTestRun` (Mermaid) + +```mermaid +graph TD + func_ShouldRun --> func_labelsAllowTestRun +``` + +#### Usage example (Go) + +```go +// Minimal example invoking labelsAllowTestRun +package main + +import ( + "fmt" +) + +func main() { + filter := "preflight,performance" + allowed := []string{"preflight", "security"} + + if labelsAllowTestRun(filter, allowed) { + fmt.Println("Test run permitted.") + } else { + fmt.Println("Test run denied.") + } +} + +// Output: +// Test run permitted. +``` + +--- + +### testPreflightContainers + +**testPreflightContainers** - Executes Preflight security scans for every container in the supplied `TestEnvironment`, caches image results to avoid duplicate work, logs progress, and converts each unique test result into a CNF‑certified check within the provided checks group. + +#### 1) Signature (Go) + +```go +func testPreflightContainers(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes Preflight security scans for every container in the supplied `TestEnvironment`, caches image results to avoid duplicate work, logs progress, and converts each unique test result into a CNF‑certified check within the provided checks group. | +| **Parameters** | *`checksGroup`* *(*`*checksdb.ChecksGroup`*)* – Target collection for generated checks.
*`env`* *(*`*provider.TestEnvironment`*)* – Environment containing containers and related metadata. | +| **Return value** | None (void). The function mutates `checksGroup` and logs status. | +| **Key dependencies** | • `cut.SetPreflightResults(preflightImageCache, env)`
• `log.Fatal`, `log.Info`
• `getUniqueTestEntriesFromContainerResults(env.Containers)`
• `generatePreflightContainerCnfCertTest(checksGroup, testName, description, remediation, env.Containers)` | +| **Side effects** | • Populates `preflightImageCache` per image.
• Calls `SetPreflightResults` on each container, which may perform network I/O and modify the container’s result state.
• Emits fatal logs on error and info logs for progress.
• Adds checks to `checksGroup`. | +| **How it fits the package** | This helper is invoked by the preflight test loader (`LoadChecks`) to bridge raw Preflight results with Cert‑Suite's CNF certification framework. It centralizes result collection, caching, and transformation logic for container‑level tests. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> Cache["Create empty image cache"] + Cache --> ForEachContainer{"For each container in env.Containers"} + ForEachContainer --> SetResults["SetPreflightResults(cache, env)"] + SetResults -->|"Error?"| Fail["log.Fatal & exit"] + SetResults --> Continue["Proceed to next container"] + Continue --> EndLoop{{End of loop}} + EndLoop --> LogInfo["log.Info(Completed running Preflight container tests)"] + LogInfo --> ForEachTest{"For each unique test entry"} + ForEachTest --> GenerateTest["generatePreflightContainerCnfCertTest(checksGroup, …)"] + GenerateTest --> NextTest + NextTest --> End{{End of function}} +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_testPreflightContainers --> func_SetPreflightResults + func_testPreflightContainers --> func_log.Fatal + func_testPreflightContainers --> func_log.Info + func_testPreflightContainers --> func_getUniqueTestEntriesFromContainerResults + func_testPreflightContainers --> func_generatePreflightContainerCnfCertTest +``` + +#### 5) Functions calling `testPreflightContainers` (Mermaid) + +```mermaid +graph TD + func_LoadChecks --> func_testPreflightContainers +``` + +#### 6) Usage example (Go) + +```go +// Minimal example invoking testPreflightContainers +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func main() { + // Assume env has been populated elsewhere + var env provider.TestEnvironment + + checksGroup := checksdb.NewChecksGroup("preflight") + preflight.testPreflightContainers(checksGroup, &env) + + // Now checksGroup contains CNF‑certified tests derived from Preflight results +} +``` + +--- + +### testPreflightOperators + +**testPreflightOperators** - Executes pre‑flight diagnostics on every operator present in `env.Operators`, records the results into the checks group, and generates CNF‑certification tests for each unique pre‑flight test discovered. + +#### Signature (Go) + +```go +func testPreflightOperators(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Executes pre‑flight diagnostics on every operator present in `env.Operators`, records the results into the checks group, and generates CNF‑certification tests for each unique pre‑flight test discovered. | +| **Parameters** | `checksGroup *checksdb.ChecksGroup` – container where check definitions are stored.
`env *provider.TestEnvironment` – runtime information including operator list. | +| **Return value** | None (void). Errors during pre‑flight execution terminate the process via a fatal log. | +| **Key dependencies** | • `op.SetPreflightResults(env)`
• `log.Fatal`, `log.Info` from internal/log
• `getUniqueTestEntriesFromOperatorResults`
• `generatePreflightOperatorCnfCertTest` | +| **Side effects** | • Mutates each operator’s pre‑flight result state.
• Emits log messages (info/fatal).
• Adds new checks to `checksGroup`. | +| **How it fits the package** | This function is called by `LoadChecks()` when an OpenShift cluster is detected. It bridges raw operator diagnostics with the CNF certification test framework. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Iterate over env.Operators"] --> B["op.SetPreflightResults(env)"] + B --> C{"err?"} + C -- yes --> D["log.Fatal"] + C -- no --> E["continue"] + E --> F["Collect unique test entries"] + F --> G["For each entry: generatePreflightOperatorCnfCertTest"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_testPreflightOperators --> func_SetPreflightResults + func_testPreflightOperators --> func_log.Fatal + func_testPreflightOperators --> func_log.Info + func_testPreflightOperators --> func_getUniqueTestEntriesFromOperatorResults + func_testPreflightOperators --> func_generatePreflightOperatorCnfCertTest +``` + +#### Functions calling `testPreflightOperators` + +```mermaid +graph TD + func_LoadChecks --> func_testPreflightOperators +``` + +#### Usage example (Go) + +```go +// Minimal example invoking testPreflightOperators +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/checksdb" + "github.com/redhat-best-practices-for-k8s/certsuite/internal/provider" +) + +func main() { + checksGroup := checksdb.NewChecksGroup("preflight") + env := provider.GetTestEnvironment() + preflight.testPreflightOperators(checksGroup, env) +} +``` + +--- diff --git a/docs/tests/suite.md b/docs/tests/suite.md new file mode 100644 index 000000000..8737516ee --- /dev/null +++ b/docs/tests/suite.md @@ -0,0 +1,23 @@ +# Package suite + +**Path**: `tests` + +## Table of Contents + +- [Overview](#overview) + +## Overview + +The `suite` package provides a lightweight framework for executing certificate‑related test cases against Kubernetes clusters. It is intended to be used by integration tests in the CertSuite project and offers convenient orchestration of individual test functions, result aggregation, and basic reporting. + +### Key Features + +- Centralized execution engine that runs a collection of test functions with setup/teardown hooks +- Automatic discovery and registration of test cases based on naming conventions or explicit registration +- Structured result collection including pass/fail status, error messages, and optional metrics + +### Design Notes + +- Test execution is driven by the Kubernetes client configuration available in the environment; if no kubeconfig is found tests are skipped +- The framework deliberately keeps side‑effects minimal – it performs only the actions required for each test case and cleans up immediately afterward +- Users should register tests via the exported `Register` function or by embedding them in a type that implements the `TestCase` interface to ensure deterministic ordering diff --git a/docs/webserver/webserver.md b/docs/webserver/webserver.md new file mode 100644 index 000000000..a4ba2a383 --- /dev/null +++ b/docs/webserver/webserver.md @@ -0,0 +1,863 @@ +# Package webserver + +**Path**: `webserver` + +## Table of Contents + +- [Overview](#overview) +- [Structs](#structs) + - [Entry](#entry) + - [RequestedData](#requesteddata) + - [ResponseData](#responsedata) +- [Exported Functions](#exported-functions) + - [CreatePrintableCatalogFromIdentifiers](#createprintablecatalogfromidentifiers) + - [GetSuitesFromIdentifiers](#getsuitesfromidentifiers) + - [StartServer](#startserver) +- [Local Functions](#local-functions) + - [installReqHandlers](#installreqhandlers) + - [logStreamHandler](#logstreamhandler) + - [outputTestCases](#outputtestcases) + - [runHandler](#runhandler) + - [toJSONString](#tojsonstring) + - [updateTnf](#updatetnf) + +## Overview + +The webserver package provides an HTTP server that serves static client assets and exposes endpoints to submit test runs, stream logs via WebSocket, and deliver a catalog of test cases for the certsuite tool. + +### Key Features + +- Serves embedded HTML/JS/CSS files and supports dynamic JSON responses for test case data +- Handles multipart form uploads with kubeconfig and configuration overrides, then triggers CERTSUITE execution +- Streams log output in real‑time over WebSockets converting ANSI to HTML + +### Design Notes + +- Embedded assets are compiled into the binary using Go's embed package, simplifying distribution +- Log streaming upgrades HTTP connections via gorilla/websocket; each line is processed before sending +- Configuration updates combine YAML defaults with user overrides from RequestedData; errors abort execution + +### Structs Summary + +| Name | Purpose | +|------|----------| +| [**Entry**](#entry) | One-line purpose | +| [**RequestedData**](#requesteddata) | Struct definition | +| [**ResponseData**](#responsedata) | One-line purpose | + +### Exported Functions Summary + +| Name | Purpose | +|------|----------| +| [func CreatePrintableCatalogFromIdentifiers(keys []claim.Identifier) map[string][]Entry](#createprintablecatalogfromidentifiers) | Organises identifiers by their associated test suite into a printable structure. | +| [func GetSuitesFromIdentifiers(keys []claim.Identifier) []string](#getsuitesfromidentifiers) | Collects all suite names referenced by a slice of `claim.Identifier` values and returns them as a de‑duplicated list. | +| [func StartServer(outputFolder string)](#startserver) | Starts an HTTP server on port 8084 that serves static content and exposes endpoints to run tests, stream logs, and provide test results. The server’s context is enriched with the output folder path for downstream handlers. | + +### Local Functions Summary + +| Name | Purpose | +|------|----------| +| [func installReqHandlers() ()](#installreqhandlers) | Binds several HTTP endpoints (`/`, `/submit.js`, `/logs.js`, etc.) to handler functions that return embedded static content or dynamic data. | +| [func(logStreamHandler)(w http.ResponseWriter, r *http.Request)](#logstreamhandler) | Upgrades an HTTP connection to a WebSocket and streams the contents of a log file in real‑time to the client. Each line is converted from ANSI escape codes to HTML before transmission. | +| [func outputTestCases() (outString string)](#outputtestcases) | Builds a string representation of the test case catalog, formatted as a JavaScript object literal. The string is later written to an HTTP response for client‑side consumption. | +| [func runHandler(w http.ResponseWriter, r *http.Request)](#runhandler) | Processes a multipart/form‑data request containing a kubeconfig file and JSON options, updates the CERTSUITE configuration, runs the test suite, and returns a JSON status message. | +| [func(toJSONString data map[string]string) string](#tojsonstring) | Produces an indented JSON representation of a `map[string]string`. Useful for embedding classification details in output. | +| [func updateTnf(tnfConfig []byte, data *RequestedData) []byte](#updatetnf) | Reads a YAML configuration, applies user‑supplied overrides from `*RequestedData`, and returns the updated YAML. | + +## Structs + +### Entry + +Represents a single test entry in a printable catalog, containing the test name and its identifying information. + +#### Fields + +| Field | Type | Description | +|------------|-------------------|-------------| +| `testName` | `string` | The human‑readable name of the test. | +| `identifier` | `claim.Identifier` | Metadata that uniquely identifies the test, typically including a URL and version information. | + +#### Purpose + +The `Entry` struct is used to collect and organize tests by suite when generating printable catalogs. Each entry holds the test’s display name (`testName`) along with its underlying identifier (`identifier`) so that downstream processes can reference or link to the specific test definition. + +#### Related functions (if any) + +| Function | Purpose | +|----------|---------| +| `CreatePrintableCatalogFromIdentifiers` | Builds a map of suite names to slices of `Entry`, grouping tests by their `Suite` field extracted from each `claim.Identifier`. | + +--- + +--- + +### RequestedData + + +**Purpose**: + +**Fields**: + +| Field | Type | Description | +|-------|------|--------------| +| `ManagedDeployments` | `[]string` | Field documentation | +| `SkipScalingTestStatefulsetsname` | `[]string` | Field documentation | +| `ValidProtocolNames` | `[]string` | Field documentation | +| `PartnerName` | `[]string` | Field documentation | +| `ConnectAPIKey` | `[]string` | Field documentation | +| `ConnectProjectID` | `[]string` | Field documentation | +| `ConnectAPIProxyPort` | `[]string` | Field documentation | +| `TargetNameSpaces` | `[]string` | Field documentation | +| `PodsUnderTestLabels` | `[]string` | Field documentation | +| `SkipScalingTestDeploymentsname` | `[]string` | Field documentation | +| `SkipHelmChartList` | `[]string` | Field documentation | +| `CollectorAppEndPoint` | `[]string` | Field documentation | +| `ConnectAPIBaseURL` | `[]string` | Field documentation | +| `OperatorsUnderTestLabels` | `[]string` | Field documentation | +| `TargetCrdFiltersnameSuffix` | `[]string` | Field documentation | +| `TargetCrdFiltersscalable` | `[]string` | Field documentation | +| `ConnectAPIProxyURL` | `[]string` | Field documentation | +| `ManagedStatefulsets` | `[]string` | Field documentation | +| `SkipScalingTestDeploymentsnamespace` | `[]string` | Field documentation | +| `SkipScalingTestStatefulsetsnamespace` | `[]string` | Field documentation | +| `AcceptedKernelTaints` | `[]string` | Field documentation | +| `Servicesignorelist` | `[]string` | Field documentation | +| `ProbeDaemonSetNamespace` | `[]string` | Field documentation | +| `ExecutedBy` | `[]string` | Field documentation | +| `CollectorAppPassword` | `[]string` | Field documentation | +| `SelectedOptions` | `[]string` | Field documentation | + +--- + +### ResponseData + +A lightweight container for sending textual responses from the web server. + +#### Fields + +| Field | Type | Description | +|-------|--------|-------------| +| `Message` | `string` | Human‑readable message returned in JSON format (`"message"` key). | + +#### Purpose + +`ResponseData` encapsulates a simple string payload that the web server marshals into JSON for HTTP responses. It is used to convey status or error messages back to clients. + +#### Related functions (none) + +| Function | Purpose | +|----------|---------| +| — | — | + +--- + +## Exported Functions + +### CreatePrintableCatalogFromIdentifiers + +**CreatePrintableCatalogFromIdentifiers** - Organises identifiers by their associated test suite into a printable structure. + +#### Signature (Go) + +```go +func CreatePrintableCatalogFromIdentifiers(keys []claim.Identifier) map[string][]Entry +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Organises identifiers by their associated test suite into a printable structure. | +| **Parameters** | `keys` – slice of `claim.Identifier`; each identifier contains an ID and a suite name. | +| **Return value** | A map where the key is a suite name (`string`) and the value is a slice of `Entry` objects representing individual tests. | +| **Key dependencies** | • `make` to initialise the map.
• `append` to accumulate entries per suite. | +| **Side effects** | None – purely functional; no global state or I/O. | +| **How it fits the package** | Used by `outputTestCases` to generate a human‑readable classification string from the catalog of test identifiers. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph BuildMap["Create map"] + A1["Initialise empty map"] --> B1["Iterate over keys"] + B1 --> C1{"For each identifier"} + C1 --> D1["Append Entry to corresponding suite slice"] + end +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_CreatePrintableCatalogFromIdentifiers --> make + func_CreatePrintableCatalogFromIdentifiers --> append +``` + +#### Functions calling `CreatePrintableCatalogFromIdentifiers` (Mermaid) + +```mermaid +graph TD + outputTestCases --> CreatePrintableCatalogFromIdentifiers +``` + +#### Usage example (Go) + +```go +// Minimal example invoking CreatePrintableCatalogFromIdentifiers +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/webserver" +) + +func main() { + // Example identifiers – in practice these would come from the catalog + ids := []claim.Identifier{ + {Id: "test1", Suite: "suiteA"}, + {Id: "test2", Suite: "suiteB"}, + {Id: "test3", Suite: "suiteA"}, + } + + // Build printable map + catalog := webserver.CreatePrintableCatalogFromIdentifiers(ids) + + fmt.Printf("%+v\n", catalog) +} +``` + +--- + +### GetSuitesFromIdentifiers + +**GetSuitesFromIdentifiers** - Collects all suite names referenced by a slice of `claim.Identifier` values and returns them as a de‑duplicated list. + +#### Signature (Go) + +```go +func GetSuitesFromIdentifiers(keys []claim.Identifier) []string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Collects all suite names referenced by a slice of `claim.Identifier` values and returns them as a de‑duplicated list. | +| **Parameters** | `keys []claim.Identifier –` identifiers whose `Suite` fields are to be extracted. | +| **Return value** | `[]string –` unique suite names in the order first encountered. | +| **Key dependencies** | • `append` (built‑in)
• `arrayhelper.Unique` from `github.com/redhat-best-practices-for-k8s/certsuite/pkg/arrayhelper` | +| **Side effects** | None – pure function; does not modify inputs or global state. | +| **How it fits the package** | Used by the HTTP handler to build a printable catalog of test cases grouped by suite name. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + Start --> ExtractSuites + ExtractSuites --> UniqueFilter + UniqueFilter --> ReturnResult +``` + +- **Start**: function receives `keys`. +- **ExtractSuites**: iterate over each identifier, append its `Suite` field to a slice. +- **UniqueFilter**: call `arrayhelper.Unique` to remove duplicates while preserving order. +- **ReturnResult**: return the de‑duplicated slice. + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_GetSuitesFromIdentifiers --> func_Append + func_GetSuitesFromIdentifiers --> func_Unique +``` + +#### Functions calling `GetSuitesFromIdentifiers` (Mermaid) + +```mermaid +graph TD + func_outputTestCases --> func_GetSuitesFromIdentifiers +``` + +#### Usage example (Go) + +```go +// Minimal example invoking GetSuitesFromIdentifiers +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/pkg/claim" + "github.com/redhat-best-practices-for-k8s/certsuite/webserver" +) + +func main() { + ids := []claim.Identifier{ + {Id: "1", Suite: "alpha"}, + {Id: "2", Suite: "beta"}, + {Id: "3", Suite: "alpha"}, + } + + suites := webserver.GetSuitesFromIdentifiers(ids) + // suites == []string{"alpha", "beta"} +} +``` + +--- + +### StartServer + +**StartServer** - Starts an HTTP server on port 8084 that serves static content and exposes endpoints to run tests, stream logs, and provide test results. The server’s context is enriched with the output folder path for downstream handlers. + +#### 1) Signature (Go) + +```go +func StartServer(outputFolder string) +``` + +#### 2) Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Starts an HTTP server on port 8084 that serves static content and exposes endpoints to run tests, stream logs, and provide test results. The server’s context is enriched with the output folder path for downstream handlers. | +| **Parameters** | `outputFolder string` – Directory where test artifacts are written; stored in request‑context under key `outputFolderCtxKey`. | +| **Return value** | None (the function blocks until the server exits). | +| **Key dependencies** | • `net/http.HandleFunc` for routing.
• `http.Server.ListenAndServe` to bind and serve.
• `context.WithValue` to inject output folder into base context.
• `installReqHandlers()` – registers static file handlers.
• `runHandler` – endpoint `/runFunction`. | +| **Side effects** | • Starts a blocking HTTP server.
• Logs startup message via `log.Info`.
• Panics if the server fails to start. | +| **How it fits the package** | Serves as the entry point for web‑server mode, enabling external tools or browsers to trigger test runs and view results through a browser UI. | + +#### 3) Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["StartServer"] --> B{"Create http.Server"} + B --> C["Set Addr & ReadTimeout"] + B --> D["Define BaseContext"] + D --> E["Inject outputFolder into ctx"] + A --> F["installReqHandlers()"] + F --> G["Register /runFunction handler"] + G --> H["Log startup message"] + H --> I["ListenAndServe()"] + I --> J{"Server error?"} + J -->|"yes"| K["panic(err)"] +``` + +#### 4) Function dependencies (Mermaid) + +```mermaid +graph TD + func_StartServer --> context.WithValue + func_StartServer --> http.HandleFunc + func_StartServer --> installReqHandlers + func_StartServer --> runHandler + func_StartServer --> log.Info + func_StartServer --> http.Server.ListenAndServe +``` + +#### 5) Functions calling `StartServer` (Mermaid) + +```mermaid +graph TD + runTestSuite --> func_StartServer +``` + +#### 6) Usage example (Go) + +```go +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/webserver" +) + +func main() { + // Start the web server and serve test output from "./results" + webserver.StartServer("./results") +} +``` + +--- + +--- + +## Local Functions + +### installReqHandlers + +**installReqHandlers** - Binds several HTTP endpoints (`/`, `/submit.js`, `/logs.js`, etc.) to handler functions that return embedded static content or dynamic data. + +#### Signature (Go) + +```go +func installReqHandlers() () +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Binds several HTTP endpoints (`/`, `/submit.js`, `/logs.js`, etc.) to handler functions that return embedded static content or dynamic data. | +| **Parameters** | None | +| **Return value** | None (side‑effecting) | +| **Key dependencies** | `net/http.HandleFunc`, `http.ResponseWriter.Header().Set`, `ResponseWriter.Write`, `http.Error` | +| **Side effects** | Registers global handlers in the default HTTP serve mux; writes static content to responses; sets appropriate MIME types. | +| **How it fits the package** | Invoked by `StartServer`; establishes the endpoints needed for the web UI that interacts with the rest of Certsuite’s server logic. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + subgraph Handlers["Register handlers"] + A["/"] --> B["Set Content‑Type: text/html"] + B --> C["Write indexHTML to response"] + D["/submit.js"] --> E["Set Content‑Type: application/javascript"] + E --> F["Write submit JS"] + G["/logs.js"] --> H["Set Content‑Type: application/javascript"] + H --> I["Write logs JS"] + J["/toast.js"] --> K["Set Content‑Type: application/javascript"] + K --> L["Write toast JS"] + M["/index.js"] --> N["Set Content‑Type: application/javascript"] + N --> O["Write index JS"] + P["/classification.js"] --> Q["Generate classification via outputTestCases()"] + Q --> R["Set Content‑Type: application/javascript"] + R --> S["Write classification JS"] + T["/logstream"] --> U["Invoke logStreamHandler"] + end +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + installReqHandlers --> HandleFunc + HandleFunc --> Set + Set --> Header + Header --> Write + Write --> Error +``` + +#### Functions calling `installReqHandlers` (Mermaid) + +```mermaid +graph TD + StartServer --> installReqHandlers +``` + +#### Usage example (Go) + +```go +// Minimal example invoking installReqHandlers +package main + +import ( + "github.com/redhat-best-practices-for-k8s/certsuite/webserver" + "net/http" +) + +func main() { + // Register handlers before starting the server. + webserver.StartServer("/tmp/output") + http.ListenAndServe(":8084", nil) +} +``` + +--- + +--- + +### logStreamHandler + +**logStreamHandler** - Upgrades an HTTP connection to a WebSocket and streams the contents of a log file in real‑time to the client. Each line is converted from ANSI escape codes to HTML before transmission. + +#### Signature (Go) + +```go +func(logStreamHandler)(w http.ResponseWriter, r *http.Request) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Upgrades an HTTP connection to a WebSocket and streams the contents of a log file in real‑time to the client. Each line is converted from ANSI escape codes to HTML before transmission. | +| **Parameters** | `w http.ResponseWriter` – original HTTP response writer.
`r *http.Request` – incoming request containing context for upgrade. | +| **Return value** | None (function has no return). | +| **Key dependencies** | • `upgrader.Upgrade` – converts HTTP to WebSocket.
• `log.Info` – logs status and errors.
• `bufio.NewScanner`, `scanner.Scan()`, `scanner.Bytes()` – read log file line by line.
• `ansihtml.ConvertToHTML` – translate ANSI to HTML.
• `conn.WriteMessage` – send data over WebSocket.
• `time.Sleep(logTimeout)` – throttle message sending. | +| **Side effects** | • Creates a WebSocket connection that remains open until an error occurs or the client disconnects.
• Logs errors and informational messages via the package logger.
• Writes each log line (after conversion) to the WebSocket, appending `
` for HTML formatting. | +| **How it fits the package** | Part of the `webserver` module’s debugging interface; exposes a live log view that can be embedded in web dashboards or accessed via browser consoles. | + +#### Internal workflow + +```mermaid +flowchart TD + A["HTTP request received"] --> B{"Upgrade to WebSocket"} + B -->|"Success"| C["Open scanner on log file"] + C --> D{"Read next line?"} + D -->|"Yes"| E["Convert ANSI → HTML +
"] + E --> F["Send via conn.WriteMessage"] + F --> G["Sleep for logTimeout"] + G --> D + D -->|"No (EOF)"| H["Check scanner.Err()"] + H -->|"Error"| I["Log error & return"] + H -->|"OK"| J["Loop back to C"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_logStreamHandler --> func_upgrader.Upgrade + func_logStreamHandler --> func_Log.Info + func_logStreamHandler --> func_bufio.NewScanner + func_logStreamHandler --> func_ansihtml.ConvertToHTML + func_logStreamHandler --> func_websocket.Conn.WriteMessage + func_logStreamHandler --> func_time.Sleep +``` + +#### Functions calling `logStreamHandler` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +// Minimal example invoking logStreamHandler +package main + +import ( + "net/http" + "github.com/redhat-best-practices-for-k8s/certsuite/webserver" +) + +func main() { + http.HandleFunc("/logs", webserver.logStreamHandler) + _ = http.ListenAndServe(":8080", nil) +} +``` + +--- + +### outputTestCases + +**outputTestCases** - Builds a string representation of the test case catalog, formatted as a JavaScript object literal. The string is later written to an HTTP response for client‑side consumption. + +#### Signature (Go) + +```go +func outputTestCases() (outString string) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Builds a string representation of the test case catalog, formatted as a JavaScript object literal. The string is later written to an HTTP response for client‑side consumption. | +| **Parameters** | None | +| **Return value** | `outString` – a single string containing the serialized classification data (e.g., `classification= {"testID": [...], ...}`). | +| **Key dependencies** | • `make`, `append`, `len` – slice construction
• `sort.Slice`, `sort.Strings` – ordering of identifiers and suites
• `CreatePrintableCatalogFromIdentifiers(keys)` – groups entries by suite
• `GetSuitesFromIdentifiers(keys)` – extracts unique suite names
• `fmt.Sprintf`, `strings.ReplaceAll` – string formatting and sanitisation
• `toJSONString(map[string]string)` – serialises the `CategoryClassification` map | +| **Side effects** | No global state changes. Produces output only via the return value. | +| **How it fits the package** | Used by the HTTP handler for `/classification.js` to supply the front‑end with test case metadata in a format that can be embedded directly into JavaScript code. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B["Collect identifiers from catalog"] + B --> C["Sort identifiers by ID"] + C --> D["Group entries per suite via CreatePrintableCatalogFromIdentifiers"] + D --> E["Extract and sort unique suite names via GetSuitesFromIdentifiers & sort.Strings"] + E --> F["Iterate suites → entries"] + F --> G["Format each entry into JSON‑like string"] + G --> H["Append to outString"] + H --> I["Close object literal"] + I --> J["Return outString"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_outputTestCases --> func_CreatePrintableCatalogFromIdentifiers + func_outputTestCases --> func_GetSuitesFromIdentifiers + func_outputTestCases --> fmt.Sprintf + func_outputTestCases --> strings.ReplaceAll + func_outputTestCases --> toJSONString +``` + +#### Functions calling `outputTestCases` (Mermaid) + +```mermaid +graph TD + func_installReqHandlers --> func_outputTestCases +``` + +#### Usage example (Go) + +```go +// Minimal example invoking outputTestCases +package main + +import ( + "fmt" + "github.com/redhat-best-practices-for-k8s/certsuite/webserver" +) + +func main() { + classificationJS := webserver.OutputTestCases() + fmt.Println(classificationJS) +} +``` + +> **Note**: `outputTestCases` is unexported; in practice it is accessed via the HTTP handler defined in `installReqHandlers`. + +--- + +### runHandler + +**runHandler** - Processes a multipart/form‑data request containing a kubeconfig file and JSON options, updates the CERTSUITE configuration, runs the test suite, and returns a JSON status message. + +#### Signature (Go) + +```go +func runHandler(w http.ResponseWriter, r *http.Request) +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Processes a multipart/form‑data request containing a kubeconfig file and JSON options, updates the CERTSUITE configuration, runs the test suite, and returns a JSON status message. | +| **Parameters** | `w http.ResponseWriter` – HTTP response writer.
`r *http.Request` – incoming HTTP request. | +| **Return value** | None (writes directly to `w`). | +| **Key dependencies** | - `bytes.NewBufferString`,
- `log.SetLogger`, `log.GetMultiLogger`,
- `json.Unmarshal`,
- `r.FormValue`, `r.FormFile`,
- `os.CreateTemp`, `io.Copy`, `os.Remove`,
- `updateTnf`, `os.WriteFile`,
- `clientsholder.GetNewClientsHolder`,
- `certsuite.LoadChecksDB`,
- `checksdb.InitLabelsExprEvaluator`,
- `log.CreateGlobalLogFile`,
- `certsuite.Run` | +| **Side effects** | Creates temporary file for kubeconfig, writes logs to a buffer and log file, modifies configuration YAML on disk, runs the entire CERTSUITE test suite (which may spawn goroutines), removes temp file, returns JSON response. | +| **How it fits the package** | Serves as the core HTTP endpoint of the webserver, orchestrating user input handling, configuration updates, test execution, and result reporting in web mode. | + +#### Internal workflow + +```mermaid +flowchart TD + A["Receive HTTP request"] --> B["Initialize log buffer"] + B --> C["Parse JSON options"] + C --> D["Handle kubeconfig file upload"] + D --> E["Create temp kubeconfig file"] + E --> F["Copy uploaded data to temp file"] + F --> G["Update certsuite_config.yml via updateTnf"] + G --> H["Write modified config back"] + H --> I["Prepare labels filter string"] + I --> J["Instantiate clients holder with temp kubeconfig"] + J --> K["Load checks database with labels filter"] + K --> L["Initialize label evaluator"] + L --> M["Create global log file in output folder"] + M --> N["Run certsuite.Run(labelsFilter, outputFolder)"] + N --> O["Build JSON success response"] + O --> P["Write response to client"] +``` + +#### Function dependencies + +```mermaid +graph TD + func_runHandler --> func_NewBufferString + func_runHandler --> func_SetLogger + func_runHandler --> func_GetMultiLogger + func_runHandler --> func_Unmarshal + func_runHandler --> func_FormValue + func_runHandler --> func_FormFile + func_runHandler --> func_CreateTemp + func_runHandler --> func_Copy + func_runHandler --> func_Remove + func_runHandler --> func_updateTnf + func_runHandler --> func_WriteFile + func_runHandler --> func_GetNewClientsHolder + func_runHandler --> func_LoadChecksDB + func_runHandler --> func_InitLabelsExprEvaluator + func_runHandler --> func_CreateGlobalLogFile + func_runHandler --> func_Run +``` + +#### Functions calling `runHandler` + +None – this function is currently not referenced elsewhere in the package. + +#### Usage example (Go) + +```go +package main + +import ( + "net/http" + "github.com/redhat-best-practices-for-k8s/certsuite/webserver" +) + +func main() { + http.HandleFunc("/", webserver.runHandler) // register handler + http.ListenAndServe(":8080", nil) +} +``` + +--- + +### toJSONString + +**toJSONString** - Produces an indented JSON representation of a `map[string]string`. Useful for embedding classification details in output. + +#### Signature (Go) + +```go +func(toJSONString data map[string]string) string +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Produces an indented JSON representation of a `map[string]string`. Useful for embedding classification details in output. | +| **Parameters** | `data` (map[string]string) – key‑value pairs to serialize. | +| **Return value** | `string` – UTF‑8 JSON text; empty string if marshalling fails. | +| **Key dependencies** | • `encoding/json.MarshalIndent`
• Built‑in `string()` conversion | +| **Side effects** | None. The function is pure: it only reads the input map and returns a new string. | +| **How it fits the package** | Used by `outputTestCases` to embed category classification data into a larger JSON‑like report. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Start"] --> B{"MarshalIndent"} + B --> C["Success"] + B -->|"Error"| D["Return empty string"] + C --> E["Convert bytes to string"] + E --> F["End"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_toJSONString --> func_MarshalIndent + func_toJSONString --> func_string +``` + +#### Functions calling `toJSONString` (Mermaid) + +```mermaid +graph TD + func_outputTestCases --> func_toJSONString +``` + +#### Usage example (Go) + +```go +// Minimal example invoking toJSONString +package main + +import ( + "fmt" +) + +func main() { + data := map[string]string{ + "category": "Security", + "description": "Ensures proper security configuration.", + } + jsonStr := toJSONString(data) + fmt.Println(jsonStr) +} +``` + +--- + +### updateTnf + +**updateTnf** - Reads a YAML configuration, applies user‑supplied overrides from `*RequestedData`, and returns the updated YAML. + +#### Signature (Go) + +```go +func updateTnf(tnfConfig []byte, data *RequestedData) []byte +``` + +#### Summary Table + +| Aspect | Details | +|--------|---------| +| **Purpose** | Reads a YAML configuration, applies user‑supplied overrides from `*RequestedData`, and returns the updated YAML. | +| **Parameters** | `tnfConfig` – original YAML bytes; `data` – pointer to `RequestedData` containing override values. | +| **Return value** | Updated YAML as a byte slice. The function never returns an error; fatal errors terminate the process via logging. | +| **Key dependencies** | *`gopkg.in/yaml.v3`* for unmarshalling/marshalling
*`github.com/redhat-best-practices-for-k8s/certsuite/internal/log`* for fatal logging | +| **Side effects** | Calls `log.Fatal` on any unmarshal/marshal error, which writes to stderr and exits the program. No other I/O or global state is modified. | +| **How it fits the package** | Used by the HTTP handler (`runHandler`) to persist user‑supplied configuration changes back into the main test suite YAML file before execution. | + +#### Internal workflow (Mermaid) + +```mermaid +flowchart TD + A["Unmarshal tnfConfig"] --> B{"Success?"} + B -- No --> C["log.Fatal Error unmarshalling YAML"] + B -- Yes --> D["Build namespace slice"] + D --> E["Set config.TargetNameSpaces"] + E --> F["Set PodsUnderTestLabels"] + F --> G["Build managedDeployments slice"] + G --> H["Set config.ManagedDeployments"] + H --> I["Build managedStatefulsets slice"] + I --> J["Set config.ManagedStatefulsets"] + J --> K["Build crdFilter slice"] + K --> L["Set config.CrdFilters"] + L --> M["Build acceptedKernelTaints slice"] + M --> N["Set config.AcceptedKernelTaints"] + N --> O["Build skipHelmChartList slice"] + O --> P["Set config.SkipHelmChartList"] + P --> Q["Build skipScalingTestDeployments slice"] + Q --> R["Set config.SkipScalingTestDeployments"] + R --> S["Build skipScalingTestStatefulSets slice"] + S --> T["Set config.SkipScalingTestStatefulSets"] + T --> U["Copy scalar fields (ServicesIgnoreList, ValidProtocolNames)"] + U --> V["Conditional assignments for password, names, API config"] + V --> W["Marshal updated config to YAML"] + W -- No --> X["log.Fatal Error marshaling YAML"] + W -- Yes --> Y["Return newData"] +``` + +#### Function dependencies (Mermaid) + +```mermaid +graph TD + func_updateTnf --> func_yaml.Unmarshal + func_updateTnf --> func_log.Logger.Fatal + func_updateTnf --> func_append + func_updateTnf --> func_len + func_updateTnf --> func_yaml.Marshal +``` + +#### Functions calling `updateTnf` (Mermaid) + +```mermaid +graph TD + func_runHandler --> func_updateTnf +``` + +#### Usage example (Go) + +```go +// Minimal example invoking updateTnf + +import ( + "fmt" + + "github.com/redhat-best-practices-for-k8s/certsuite/webserver" +) + +// Assume we have the original YAML and a RequestedData instance. +originalYAML := []byte(`targetNamespaces: []`) + +data := &webserver.RequestedData{ + TargetNameSpaces: []string{"default"}, + PodsUnderTestLabels: []string{"app=demo"}, + OperatorsUnderTestLabels: []string{}, + ManagedDeployments: []string{"my-app"}, + // ... other fields as needed +} + +updatedYAML := webserver.updateTnf(originalYAML, data) +fmt.Printf("Updated YAML:\n%s\n", string(updatedYAML)) +``` + +--- + +--- diff --git a/internal/cli/cli.go b/internal/cli/cli.go index d8f3d17ba..62f6b2152 100644 --- a/internal/cli/cli.go +++ b/internal/cli/cli.go @@ -56,16 +56,44 @@ var ( stopChan chan bool ) +// PrintBanner Displays a banner at startup +// +// This function writes the predefined banner string to standard output using +// the fmt package. It is invoked during application initialization to show +// branding or version information. No parameters are taken, and it does not +// return a value. func PrintBanner() { fmt.Print(banner) } +// cliCheckLogSniffer forwards terminal output to a logging channel +// +// This type implements an io.Writer that captures data written by the CLI when +// running in a TTY environment. When Write is called, it attempts to send the +// byte slice as a string over a dedicated channel; if the channel is not ready +// or closed, the data is silently dropped to avoid blocking execution. In +// non‑TTY scenarios, all writes are simply acknowledged without any side +// effects. type cliCheckLogSniffer struct{} +// isTTY determines whether standard input is a terminal +// +// The function checks if the current process’s stdin corresponds to an +// interactive terminal device by converting its file descriptor to an integer +// and using the external library’s IsTerminal call. It returns true when +// output can be formatted for a tty, otherwise false. This value influences how +// log lines are printed or suppressed in the CLI. func isTTY() bool { return term.IsTerminal(int(os.Stdin.Fd())) } +// updateRunningCheckLine updates the running check status line with elapsed time and latest log +// +// This routine starts a ticker that triggers every to refresh the console +// output for a running test. It listens on a channel for new log messages, +// updating the displayed line accordingly, and stops when a stop signal is +// received. The function prints the check name, elapsed time, and optionally a +// cropped latest log if terminal width permits. func updateRunningCheckLine(checkName string, stopChan <-chan bool) { startTime := time.Now() @@ -93,11 +121,23 @@ func updateRunningCheckLine(checkName string, stopChan <-chan bool) { } } +// getTerminalWidth Determines the current terminal width in columns +// +// It calls a system routine to query the size of the standard input device, +// returning the number of columns available for output. The value is used to +// format log lines so they fit within the terminal without wrapping or +// truncating unexpectedly. func getTerminalWidth() int { width, _, _ := term.GetSize(int(os.Stdin.Fd())) return width } +// cropLogLine Trims a log line to fit terminal width +// +// The function removes newline characters from the input string and then +// truncates it if its length exceeds the specified maximum width. It returns +// the processed string, which is safe to display in a single-line CLI output +// without breaking formatting. func cropLogLine(line string, maxAvailableWidth int) string { // Remove line feeds to avoid the log line to break the cli output. filteredLine := strings.ReplaceAll(line, "\n", " ") @@ -108,6 +148,12 @@ func cropLogLine(line string, maxAvailableWidth int) string { return filteredLine } +// printRunningCheckLine Displays the progress of a running check +// +// It prints a status line that includes the check name, elapsed time since +// start, and optionally a cropped log message when running in a terminal. If +// output is not a TTY it simply writes the line with a newline. The function +// clears the current terminal line before printing to keep the display updated. func printRunningCheckLine(checkName string, startTime time.Time, logLine string) { // Minimum space on the right needed to show the current last log line. const minColsNeededForLogLine = 40 @@ -129,7 +175,14 @@ func printRunningCheckLine(checkName string, startTime time.Time, logLine string fmt.Print(ClearLineCode + line) } -// Implements the io.Write for the checks' custom handler for slog. +// cliCheckLogSniffer.Write Writes log data to a channel when running in a terminal +// +// When the process is attached to a TTY, this method attempts to send the +// provided byte slice as a string into a dedicated logger channel without +// blocking; if the channel is not ready or closed, the data is silently +// dropped. In non‑TTY environments it simply returns the length of the input +// and no error, effectively discarding output. The function always reports the +// full number of bytes processed. func (c *cliCheckLogSniffer) Write(p []byte) (n int, err error) { if !isTTY() { return len(p), nil @@ -144,6 +197,12 @@ func (c *cliCheckLogSniffer) Write(p []byte) (n int, err error) { return len(p), nil } +// PrintResultsTable Displays a formatted summary of test suite outcomes +// +// The function accepts a mapping from group names to integer slices that +// represent passed, failed, and skipped counts. It outputs a neatly aligned +// table with column headers and separators, iterating over each group to show +// its results. After listing all groups, it adds blank lines for readability. func PrintResultsTable(results map[string][]int) { fmt.Printf("\n") fmt.Println("-----------------------------------------------------------") @@ -159,6 +218,13 @@ func PrintResultsTable(results map[string][]int) { fmt.Printf("\n") } +// stopCheckLineGoroutine Signals the check line goroutine to stop +// +// This function checks whether a global channel used for signalling is set, +// sends a true value to that channel if it exists, then clears the reference so +// subsequent calls have no effect. It is called by various print functions when +// a check completes or is aborted, ensuring any ongoing line output goroutine +// terminates cleanly. func stopCheckLineGoroutine() { if stopChan == nil { // This may happen for checks that were skipped if no compliant nor non-compliant objects found. @@ -170,6 +236,13 @@ func stopCheckLineGoroutine() { stopChan = nil } +// PrintCheckSkipped Logs a skipped check with its reason +// +// This function stops the ongoing check line goroutine, then prints a formatted +// message indicating that the specified check was skipped along with the +// provided reason. The output includes control codes to clear the current +// terminal line and displays the skip tag followed by the check name and +// explanation. No value is returned. func PrintCheckSkipped(checkName, reason string) { // It shouldn't happen too often, but some checks might be set as skipped inside the checkFn // if neither compliant objects nor non-compliant objects were found. @@ -178,6 +251,13 @@ func PrintCheckSkipped(checkName, reason string) { fmt.Print(ClearLineCode + "[ " + CheckResultTagSkip + " ] " + checkName + " (" + reason + ")\n") } +// PrintCheckRunning Displays a running check status message +// +// The function prints an initial line indicating that a specific check is in +// progress, appending a newline when output is not a terminal to keep the +// display clean. It then starts a background goroutine that updates this line +// every second with elapsed time and any new log messages until the check +// completes and signals the stop channel. func PrintCheckRunning(checkName string) { stopChan = make(chan bool) checkLoggerChan = make(chan string) @@ -192,30 +272,60 @@ func PrintCheckRunning(checkName string) { go updateRunningCheckLine(checkName, stopChan) } +// PrintCheckPassed Shows a passed check with formatted output +// +// The function stops any active line‑printing goroutine, then writes a clear +// line indicator followed by a pass tag and the provided check name to standard +// output. It uses predefined constants for formatting and ensures the display +// is updated correctly before returning. func PrintCheckPassed(checkName string) { stopCheckLineGoroutine() fmt.Print(ClearLineCode + "[ " + CheckResultTagPass + " ] " + checkName + "\n") } +// PrintCheckFailed Displays a failed check status line +// +// The function stops the running goroutine that updates the check progress, +// then prints a formatted message indicating failure for the given check name. +// It writes the output directly to standard output with escape codes to clear +// the previous line and show a red "FAIL" tag followed by the check identifier. func PrintCheckFailed(checkName string) { stopCheckLineGoroutine() fmt.Print(ClearLineCode + "[ " + CheckResultTagFail + " ] " + checkName + "\n") } +// PrintCheckAborted Notifies the user that a check has been aborted +// +// This routine stops any ongoing line‑printing goroutine, then outputs a +// formatted message indicating the check’s name and the reason for abortion. +// The output includes special control codes to clear the current terminal line +// before displaying the status tag. No value is returned. func PrintCheckAborted(checkName, reason string) { stopCheckLineGoroutine() fmt.Print(ClearLineCode + "[ " + CheckResultTagAborted + " ] " + checkName + " (" + reason + ")\n") } +// PrintCheckErrored Stops the progress display and shows an error line +// +// This routine halts any ongoing check‑line goroutine, clears the current +// terminal line, and prints a formatted message indicating that the specified +// check has failed with an error. The output includes a clear line code, an +// error tag, and the check identifier. func PrintCheckErrored(checkName string) { stopCheckLineGoroutine() fmt.Print(ClearLineCode + "[ " + CheckResultTagError + " ] " + checkName + "\n") } +// WrapLines Breaks a string into lines that fit within a maximum width +// +// The function splits the input text on newline characters, then examines each +// line to see if it exceeds the specified width. Lines longer than the limit +// are broken into words and reassembled so no resulting line surpasses the +// maximum length. The wrapped lines are returned as a slice of strings. func WrapLines(text string, maxWidth int) []string { lines := strings.Split(text, "\n") wrappedLines := make([]string, 0, len(lines)) @@ -243,14 +353,34 @@ func WrapLines(text string, maxWidth int) []string { return wrappedLines } +// LineAlignLeft left‑justifies a string to a given column width +// +// The function takes an input string and a desired width, returning the string +// padded with spaces on the right so that its total length equals the specified +// width. It uses formatted printing with a negative field width to achieve left +// alignment. If the original string exceeds the requested width, it is returned +// unchanged without truncation. func LineAlignLeft(s string, w int) string { return fmt.Sprintf("%[1]*s", -w, s) } +// LineAlignCenter Centers a string within a specified width +// +// The function takes an input string and a target width, then returns the +// string padded with spaces so it appears centered when printed. It calculates +// padding by determining how many leading spaces are needed to shift the +// original text toward the middle of the given width. The resulting string is +// always exactly the specified length. func LineAlignCenter(s string, w int) string { return fmt.Sprintf("%[1]*s", -w, fmt.Sprintf("%[1]*s", (w+len(s))/2, s)) //nolint:mnd // magic number } +// LineColor Adds ANSI color codes around text +// +// This function takes a plain string and a color code, prefixes the string with +// the color escape sequence, appends the reset code, and returns the resulting +// colored string. It is used to display terminal output in different colors +// without altering the original content. func LineColor(s, color string) string { return color + s + Reset } diff --git a/internal/clientsholder/clientsholder.go b/internal/clientsholder/clientsholder.go index 4375028ba..bc5000a9a 100644 --- a/internal/clientsholder/clientsholder.go +++ b/internal/clientsholder/clientsholder.go @@ -60,6 +60,14 @@ const ( DefaultTimeout = 10 * time.Second ) +// ClientsHolder Holds configured Kubernetes API clients for cluster interaction +// +// This structure aggregates multiple client interfaces, including core, +// dynamic, extension, networking, and OLM clients, along with configuration +// data such as the REST config and kubeconfig bytes. It provides a single point +// from which tests or utilities can execute commands inside pods, query +// resources, or manipulate cluster objects. The ready flag indicates whether +// the holder has been fully initialized. type ClientsHolder struct { RestConfig *rest.Config DynamicClient dynamic.Interface @@ -81,16 +89,22 @@ type ClientsHolder struct { var clientsHolder = ClientsHolder{} -// SetupFakeOlmClient Overrides the OLM client with the fake interface object for unit testing. Loads -// the mocking objects so olmv interface methods can find them. +// SetupFakeOlmClient Replaces the real OLM client with a fake for testing +// +// This function takes a slice of Kubernetes objects that represent mocked OLM +// resources. It constructs a new fake client set containing those objects and +// assigns it to the package's client holder, enabling tests to interact with +// OLM APIs without contacting an actual cluster. func SetupFakeOlmClient(olmMockObjects []runtime.Object) { clientsHolder.OlmClient = olmFakeClient.NewSimpleClientset(olmMockObjects...) } -// GetTestClientHolder Overwrites the existing clientholders with a mocked version for unit testing. -// Only pure k8s interfaces will be available. The runtime objects must be pure k8s ones. -// For other (OLM, ) -// runtime mocking objects loading, use the proper clientset mocking function. +// GetTestClientsHolder Creates a mocked client holder for unit tests +// +// This function accepts a slice of runtime objects that represent Kubernetes +// resources and builds separate slices for each supported client type. It then +// initializes fake clients with these objects, marks the holder as ready, and +// returns it for use in testing scenarios. // //nolint:funlen,gocyclo func GetTestClientsHolder(k8sMockObjects []runtime.Object) *ClientsHolder { @@ -160,26 +174,57 @@ func GetTestClientsHolder(k8sMockObjects []runtime.Object) *ClientsHolder { return &clientsHolder } +// SetTestK8sClientsHolder Stores a Kubernetes client for test usage +// +// This function assigns the provided Kubernetes interface to an internal holder +// and marks it as ready. It is intended for tests that require a mock or real +// client without interacting with a live cluster. After execution, other +// components can retrieve the stored client from the holder. func SetTestK8sClientsHolder(k8sClient kubernetes.Interface) { clientsHolder.K8sClient = k8sClient clientsHolder.ready = true } +// SetTestK8sDynamicClientsHolder Assigns a test Kubernetes dynamic client to the internal holder +// +// This function stores the provided dynamic client instance in an internal +// structure used by tests, marking the holder as ready for use. It replaces any +// existing client reference and enables subsequent code that relies on the +// dynamic client to operate against this test instance. func SetTestK8sDynamicClientsHolder(dynamicClient dynamic.Interface) { clientsHolder.DynamicClient = dynamicClient clientsHolder.ready = true } +// SetTestClientGroupResources Stores a list of API resource group definitions +// +// This function receives an array of API resource lists and assigns it to the +// internal holder used by the client package. It updates the shared state that +// other components reference when interacting with Kubernetes groups. No value +// is returned, and the operation replaces any previously stored resources. func SetTestClientGroupResources(groupResources []*metav1.APIResourceList) { clientsHolder.GroupResources = groupResources } +// ClearTestClientsHolder Resets the Kubernetes client and marks holder as not ready +// +// This function clears the stored Kubernetes client reference, setting it to +// nil, and updates an internal flag to indicate that the holder is no longer +// ready for use. It does not return a value and has no parameters. After +// calling this, any attempt to access the client will need reinitialization. func ClearTestClientsHolder() { clientsHolder.K8sClient = nil clientsHolder.ready = false } -// GetClientsHolder returns the singleton ClientsHolder object. +// GetClientsHolder Returns a cached instance of the Kubernetes clients holder +// +// This function checks whether the global ClientsHolder has already been +// initialized and ready; if so, it returns that instance immediately. If not, +// it attempts to create a new holder by calling the internal constructor with +// any provided configuration filenames. Errors during creation are logged as +// fatal, terminating the program. The resulting holder is returned for use by +// other parts of the application. func GetClientsHolder(filenames ...string) *ClientsHolder { if clientsHolder.ready { return &clientsHolder @@ -191,6 +236,12 @@ func GetClientsHolder(filenames ...string) *ClientsHolder { return clientsHolder } +// GetNewClientsHolder Creates a Kubernetes clients holder from the provided kubeconfig +// +// The function takes a file path to a kubeconfig, uses an internal constructor +// to instantiate a ClientsHolder with all necessary API clients, and logs a +// fatal error if construction fails. On success it returns a pointer to the +// fully initialized holder for use by other components. func GetNewClientsHolder(kubeconfigFile string) *ClientsHolder { _, err := newClientsHolder(kubeconfigFile) if err != nil { @@ -200,6 +251,13 @@ func GetNewClientsHolder(kubeconfigFile string) *ClientsHolder { return &clientsHolder } +// createByteArrayKubeConfig Converts a Kubernetes configuration into YAML byte array +// +// The function takes a pointer to a client configuration structure and +// serializes it into its YAML representation using the client-go library. It +// returns the resulting bytes along with any error that occurs during +// serialization, allowing callers to use the data as a kubeconfig file in +// memory. func createByteArrayKubeConfig(kubeConfig *clientcmdapi.Config) ([]byte, error) { yamlBytes, err := clientcmd.Write(*kubeConfig) if err != nil { @@ -208,8 +266,13 @@ func createByteArrayKubeConfig(kubeConfig *clientcmdapi.Config) ([]byte, error) return yamlBytes, nil } -// Creates a clientcmdapi.Config object from a rest.Config. -// Based on https://github.com/kubernetes/client-go/issues/711#issuecomment-1666075787 +// GetClientConfigFromRestConfig Creates a kubeconfig configuration from a REST client +// +// It accepts a Kubernetes rest.Config pointer and builds an equivalent +// clientcmdapi.Config structure containing cluster, context, and authentication +// information. The resulting config includes the server URL, certificate +// authority path, bearer token, and sets a default cluster and context for use +// by other components. func GetClientConfigFromRestConfig(restConfig *rest.Config) *clientcmdapi.Config { return &clientcmdapi.Config{ Kind: "Config", @@ -235,6 +298,15 @@ func GetClientConfigFromRestConfig(restConfig *rest.Config) *clientcmdapi.Config } } +// getClusterRestConfig Retrieves a Kubernetes REST configuration from in‑cluster or kubeconfig files +// +// The function first attempts to obtain an in‑cluster configuration; if +// successful it converts that config into a kubeconfig byte array for +// downstream use and returns the rest.Config. If not running inside a cluster, +// it requires one or more kubeconfig file paths, merges them with precedence +// rules, creates a temporary kubeconfig representation, extracts the REST +// client configuration from it, and returns that configuration along with any +// error encountered. func getClusterRestConfig(filenames ...string) (*rest.Config, error) { restConfig, err := rest.InClusterConfig() if err == nil { @@ -288,7 +360,14 @@ func getClusterRestConfig(filenames ...string) (*rest.Config, error) { return restConfig, nil } -// GetClientsHolder instantiate an ocp client +// newClientsHolder Creates a holder of Kubernetes client interfaces based on provided kubeconfig files +// +// It loads a rest configuration from the given kubeconfig paths or in-cluster +// settings, then initializes numerous typed and dynamic clients for API +// extensions, OLM, OpenShift, networking, scaling, and CNCF networking. The +// function also retrieves cluster resource listings and prepares a REST mapper +// for scale operations. Upon successful setup, it marks the holder as ready and +// returns it; otherwise an error is returned. func newClientsHolder(filenames ...string) (*ClientsHolder, error) { //nolint:funlen // this is a special function with lots of assignments log.Info("Creating k8s go-clients holder.") @@ -369,12 +448,25 @@ func newClientsHolder(filenames ...string) (*ClientsHolder, error) { //nolint:fu return &clientsHolder, nil } +// Context Represents a target container within a pod +// +// This structure holds the namespace, pod name, and container name used when +// executing commands inside Kubernetes pods. It provides accessor methods to +// retrieve each field value. The context is typically created with NewContext +// and passed to command execution functions. type Context struct { namespace string podName string containerName string } +// NewContext Creates a context for running commands inside a specific pod container +// +// This function takes the namespace, pod name, and container name of a probe +// pod and returns a Context object that holds those values. The returned +// Context is used by other components to target the correct container when +// executing shell commands via the client holder. No additional processing or +// validation occurs; it simply packages the identifiers into the struct. func NewContext(namespace, podName, containerName string) Context { return Context{ namespace: namespace, @@ -383,14 +475,30 @@ func NewContext(namespace, podName, containerName string) Context { } } +// Context.GetNamespace retrieves the namespace from the context +// +// This method accesses the internal namespace field of a Context instance and +// returns it as a string. It does not modify any state or perform additional +// logic, simply exposing the value stored during context creation. func (c *Context) GetNamespace() string { return c.namespace } +// Context.GetPodName returns the pod name stored in the context +// +// This method retrieves and returns the podName field from a Context instance. +// It takes no arguments and always yields a string representing the current pod +// identifier used for Kubernetes API calls. func (c *Context) GetPodName() string { return c.podName } +// Context.GetContainerName Returns the current pod's container name +// +// This method retrieves the container name stored in the Context object. It +// accesses an internal field that holds the name of the container to which +// commands will be executed or operations will target. The returned string is +// used by other components when interacting with Kubernetes pods. func (c *Context) GetContainerName() string { return c.containerName } diff --git a/internal/clientsholder/command.go b/internal/clientsholder/command.go index 07d378007..e99a70c7d 100644 --- a/internal/clientsholder/command.go +++ b/internal/clientsholder/command.go @@ -27,12 +27,25 @@ import ( "k8s.io/kubectl/pkg/scheme" ) +// Command Executes a command inside a container +// +// This method runs the given shell command within a specified container context +// and captures both its standard output and error streams. It returns the +// captured stdout, stderr, and an error value that indicates whether the +// execution succeeded or failed. +// //go:generate moq -out command_moq.go . Command type Command interface { ExecCommandContainer(Context, string) (string, string, error) } -// ExecCommand runs command in the pod and returns buffer output. +// ClientsHolder.ExecCommandContainer Runs a shell command inside a specific pod container +// +// The function builds an exec request to the Kubernetes API, targeting the +// namespace, pod, and container provided by the context. It streams the command +// output into buffers, returning both standard output and error as strings +// along with any execution error. Logging is performed for debugging and error +// tracing. func (clientsholder *ClientsHolder) ExecCommandContainer( ctx Context, command string) (stdout, stderr string, err error) { commandStr := []string{"sh", "-c", command} diff --git a/internal/clientsholder/command_moq.go b/internal/clientsholder/command_moq.go index 63d050e2b..fed937f96 100644 --- a/internal/clientsholder/command_moq.go +++ b/internal/clientsholder/command_moq.go @@ -11,21 +11,12 @@ import ( // If this is not the case, regenerate this file with moq. var _ Command = &CommandMock{} -// CommandMock is a mock implementation of Command. +// CommandMock Provides a mock implementation of Command for testing // -// func TestSomethingThatUsesCommand(t *testing.T) { -// -// // make and configure a mocked Command -// mockedCommand := &CommandMock{ -// ExecCommandContainerFunc: func(context Context, s string) (string, string, error) { -// panic("mock out the ExecCommandContainer method") -// }, -// } -// -// // use mockedCommand in code that requires Command -// // and then make assertions. -// -// } +// The struct holds a function field that replaces the real ExecCommandContainer +// method, allowing tests to supply custom behavior. It records each call with +// its context and string arguments in an internal slice protected by a +// read‑write mutex. A helper returns the recorded calls for assertions. type CommandMock struct { // ExecCommandContainerFunc mocks the ExecCommandContainer method. ExecCommandContainerFunc func(context Context, s string) (string, string, error) @@ -43,7 +34,12 @@ type CommandMock struct { lockExecCommandContainer sync.RWMutex } -// ExecCommandContainer calls ExecCommandContainerFunc. +// CommandMock.ExecCommandContainer invokes a user-defined function to execute container commands +// +// This method records the call arguments, ensures thread safety with locks, and +// then delegates execution to the mock's ExecCommandContainerFunc. If no +// implementation is provided it panics to signal misuse. The return values are +// the stdout, stderr output strings and an error from the underlying function. func (mock *CommandMock) ExecCommandContainer(context Context, s string) (string, string, error) { if mock.ExecCommandContainerFunc == nil { panic("CommandMock.ExecCommandContainerFunc: method is nil but Command.ExecCommandContainer was just called") @@ -61,10 +57,14 @@ func (mock *CommandMock) ExecCommandContainer(context Context, s string) (string return mock.ExecCommandContainerFunc(context, s) } -// ExecCommandContainerCalls gets all the calls that were made to ExecCommandContainer. -// Check the length with: +// CommandMock.ExecCommandContainerCalls retrieves every ExecCommandContainer call that has been logged // -// len(mockedCommand.ExecCommandContainerCalls()) +// This method gathers all the calls made to ExecCommandContainer into a slice +// of structures containing the execution context and the string argument used. +// It acquires a read lock on the internal mutex to safely access the stored +// calls, then releases the lock before returning the slice. The result allows +// callers to inspect or assert how many times and with what parameters +// ExecCommandContainer was invoked. func (mock *CommandMock) ExecCommandContainerCalls() []struct { Context Context S string diff --git a/internal/crclient/crclient.go b/internal/crclient/crclient.go index 87785c11e..1f4a6736a 100644 --- a/internal/crclient/crclient.go +++ b/internal/crclient/crclient.go @@ -30,6 +30,13 @@ import ( const PsRegex = `(?m)^(\d+?)\s+?(\d+?)\s+?(\d+?)\s+?(.*?)$` +// Process Represents a running process inside a container +// +// This structure holds the identifier, parent identifier, namespace, and +// command line arguments for a single operating system process discovered +// within a container’s PID namespace. The fields enable callers to +// distinguish processes by their unique IDs and to trace relationships between +// child and parent processes during diagnostics. type Process struct { PidNs, Pid, PPid int Args string @@ -42,13 +49,24 @@ const ( RetrySleepSeconds = 3 ) +// Process.String Formats the process details into a readable string +// +// This method creates a human‑readable representation of a process by +// combining its command line arguments and identifiers. It uses string +// formatting to include the executable name, process ID, parent process ID, and +// PID namespace number in a single line. The resulting string is returned for +// logging or debugging purposes. func (p *Process) String() string { return fmt.Sprintf("cmd: %s, pid: %d, ppid: %d, pidNs: %d", p.Args, p.Pid, p.PPid, p.PidNs) } -// Helper function to create the clientsholder.Context of the first container of the probe pod -// that runs in the give node. This context is usually needed to run shell commands that get -// information from a node where a pod/container under test is running. +// GetNodeProbePodContext creates a context for the first container of a probe pod on a node +// +// The function looks up the probe pod assigned to the specified node from the +// test environment. If found, it constructs a clientsholder.Context using that +// pod’s namespace, name, and its first container’s name. The returned +// context is used to execute commands inside the probe pod; if no probe pod +// exists on the node an error is returned. func GetNodeProbePodContext(node string, env *provider.TestEnvironment) (clientsholder.Context, error) { probePod := env.ProbePods[node] if probePod == nil { @@ -58,6 +76,12 @@ func GetNodeProbePodContext(node string, env *provider.TestEnvironment) (clients return clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name), nil } +// GetPidFromContainer Retrieves the process ID of a container by executing a runtime-specific command +// +// The function determines which container runtime is in use and builds an +// appropriate shell command to query the container's PID, then runs that +// command inside a probe pod context. It returns the numeric PID if the command +// succeeds or an error if execution fails or the runtime is unsupported. func GetPidFromContainer(cut *provider.Container, ctx clientsholder.Context) (int, error) { var pidCmd string @@ -85,7 +109,13 @@ func GetPidFromContainer(cut *provider.Container, ctx clientsholder.Context) (in return strconv.Atoi(strings.TrimSuffix(outStr, "\n")) } -// To get the pid namespace of the container +// GetContainerPidNamespace Retrieves the PID namespace identifier for a container +// +// This function determines the process ID of a target container by executing an +// inspection command on its runtime environment. It then runs a namespace +// listing command against that PID to extract the namespace name, returning it +// as a string. Errors from context retrieval, PID extraction, or command +// execution are wrapped and returned with descriptive messages. func GetContainerPidNamespace(testContainer *provider.Container, env *provider.TestEnvironment) (string, error) { // Get the container pid ocpContext, err := GetNodeProbePodContext(testContainer.NodeName, env) @@ -108,6 +138,12 @@ func GetContainerPidNamespace(testContainer *provider.Container, env *provider.T return strings.Fields(stdout)[0], nil } +// GetContainerProcesses Retrieves all process information from a container's PID namespace +// +// The function first determines the PID namespace of the given container, then +// queries that namespace to list every running process. It returns a slice of +// Process structures containing each process's ID, parent ID, command line and +// namespace identifier, or an error if either step fails. func GetContainerProcesses(container *provider.Container, env *provider.TestEnvironment) ([]*Process, error) { pidNs, err := GetContainerPidNamespace(container, env) if err != nil { @@ -117,7 +153,13 @@ func GetContainerProcesses(container *provider.Container, env *provider.TestEnvi return GetPidsFromPidNamespace(pidNs, container) } -// ExecCommandContainerNSEnter executes a command in the specified container namespace using nsenter +// ExecCommandContainerNSEnter Executes a shell command inside a container’s namespace +// +// The function determines the PID of the target container, builds an nsenter +// command to run in that process’s namespace, and executes it on a probe pod +// with retry logic. It returns the standard output, standard error, and any +// execution error. If the probe context or PID retrieval fails, it reports an +// appropriate error. func ExecCommandContainerNSEnter(command string, aContainer *provider.Container) (outStr, errStr string, err error) { env := provider.GetTestEnvironment() @@ -154,6 +196,14 @@ func ExecCommandContainerNSEnter(command string, return outStr, errStr, err } +// GetPidsFromPidNamespace Retrieves processes running in a specific PID namespace +// +// The function runs a ps command inside the probe pod on the container's node +// to list all processes with their namespaces, then filters those whose pidns +// matches the supplied string. It parses each line of output, converting +// numeric fields to integers and constructs Process objects for matching +// entries. The resulting slice of Process pointers is returned; if any error +// occurs during execution or parsing, an error value is returned. func GetPidsFromPidNamespace(pidNamespace string, container *provider.Container) (p []*Process, err error) { const command = "trap \"\" SIGURG ; ps -e -o pidns,pid,ppid,args" env := provider.GetTestEnvironment() diff --git a/internal/datautil/data_util.go b/internal/datautil/data_util.go index 5936ed5e0..b2cf76663 100644 --- a/internal/datautil/data_util.go +++ b/internal/datautil/data_util.go @@ -1,5 +1,12 @@ package datautil +// IsMapSubset Determines if one map contains all key-value pairs of another +// +// It compares two generic maps, returning true only when every entry in the +// second map exists identically in the first. The function first checks that +// the second map is not larger than the first for efficiency. It then iterates +// through each key-value pair, verifying presence and equality; if any mismatch +// occurs, it returns false. func IsMapSubset[K, V comparable](m, s map[K]V) bool { if len(s) > len(m) { return false diff --git a/internal/log/custom_handler.go b/internal/log/custom_handler.go index e71edeafd..d6312cb07 100644 --- a/internal/log/custom_handler.go +++ b/internal/log/custom_handler.go @@ -18,6 +18,13 @@ var CustomLevelNames = map[slog.Leveler]string{ CustomLevelFatal: "FATAL", } +// CustomHandler Formats and writes structured log entries to an output stream +// +// The handler collects attributes and optional context information such as +// level, time, source file, and message. It serializes these into a single line +// using a custom attribute formatting routine before writing them atomically to +// the configured writer. The handler supports adding default attributes via +// WithAttrs while preserving thread safety with a mutex. type CustomHandler struct { opts slog.HandlerOptions attrs []slog.Attr @@ -25,6 +32,12 @@ type CustomHandler struct { out io.Writer } +// NewCustomHandler Creates a thread‑safe log handler that writes to an io.Writer +// +// This function constructs a CustomHandler with the supplied writer and +// optional slog.HandlerOptions. If options are nil or lack a level, it defaults +// to slog.LevelInfo. The resulting handler can be used by other components to +// emit structured logs in a concurrency‑safe manner. func NewCustomHandler(out io.Writer, opts *slog.HandlerOptions) *CustomHandler { h := &CustomHandler{out: out, mu: &sync.Mutex{}} if opts != nil { @@ -37,12 +50,23 @@ func NewCustomHandler(out io.Writer, opts *slog.HandlerOptions) *CustomHandler { return h } +// CustomHandler.Enabled Determines if a log level is enabled based on configuration +// +// The method compares the supplied logging level against the handler's +// configured threshold, returning true when the level meets or exceeds that +// threshold. It ignores the context parameter because the decision relies +// solely on static settings. func (h *CustomHandler) Enabled(_ context.Context, level slog.Level) bool { return level >= h.opts.Level.Level() } -// The Handle method will write a log line with the following format: -// LOG_LEVEL [TIME] [SOURCE_FILE] [CUSTOM_ATTRS] MSG +// CustomHandler.Handle writes a formatted log line to the output +// +// This method receives a context and a slog.Record, builds a byte buffer +// containing level, time, source file, custom attributes, and message in a +// specific format, then writes it to an underlying writer. It locks a mutex +// during the write to ensure thread safety and returns any error from the write +// operation. // //nolint:gocritic // r param is heavy but defined in the slog.Handler interface func (h *CustomHandler) Handle(_ context.Context, r slog.Record) error { @@ -78,11 +102,23 @@ func (h *CustomHandler) Handle(_ context.Context, r slog.Record) error { return err } -// Not implemented. Returns the nil handler. +// CustomHandler.WithGroup Returns a new handler scoped to a named group +// +// When called, this method ignores the provided group name and simply returns a +// nil handler, indicating that grouping functionality is not implemented for +// CustomHandler. It satisfies the slog.Handler interface but does not create +// any new handler instance or modify state. func (h *CustomHandler) WithGroup(_ string) slog.Handler { return nil } +// CustomHandler.WithAttrs Creates a handler that includes additional attributes +// +// The method takes a slice of attributes, merges them with the handler’s +// existing ones, and returns a new handler instance containing the combined +// set. If no attributes are supplied it simply returns the original handler to +// avoid unnecessary copying. The returned handler is a copy of the receiver so +// that modifications do not affect the original. func (h *CustomHandler) WithAttrs(attrs []slog.Attr) slog.Handler { if len(attrs) == 0 { return h @@ -98,6 +134,13 @@ func (h *CustomHandler) WithAttrs(attrs []slog.Attr) slog.Handler { return &h2 } +// CustomHandler.appendAttr Formats a logging attribute for output +// +// The function resolves the attribute’s value, skips empty attributes, then +// formats the output based on the kind of value. String values are printed +// plainly or in brackets; time values use a millisecond timestamp; other kinds +// include level or key/value pairs with appropriate spacing. The resulting +// bytes are appended to the buffer and returned. func (h *CustomHandler) appendAttr(buf []byte, a slog.Attr) []byte { // Resolve the Attr's value before doing anything else. a.Value = a.Value.Resolve() diff --git a/internal/log/log.go b/internal/log/log.go index cfd48a56d..d01483d7c 100644 --- a/internal/log/log.go +++ b/internal/log/log.go @@ -25,6 +25,12 @@ const ( LevelFatal = "fatal" ) +// Logger Encapsulates a structured logger with convenience methods +// +// This type wraps an slog.Logger to provide simple debug, info, warn, error, +// fatal, and context‑aware logging functions. It also offers a With method +// that attaches key/value pairs to the underlying logger, returning a new +// Logger instance for fluent chaining. type Logger struct { l *slog.Logger } @@ -35,6 +41,13 @@ var ( globalLogFile *os.File ) +// CreateGlobalLogFile Creates or replaces the global log file for test output +// +// The function removes any existing log file in the specified directory, then +// opens a new one with read/write permissions. It configures the logger to +// write to this file at the requested level and stores the file handle +// globally. Errors during removal or opening are returned as formatted +// messages. func CreateGlobalLogFile(outputDir, logLevel string) error { logFilePath := outputDir + "/" + LogFileName err := os.Remove(logFilePath) @@ -53,10 +66,23 @@ func CreateGlobalLogFile(outputDir, logLevel string) error { return nil } +// CloseGlobalLogFile Closes the globally opened log file +// +// The function invokes the Close method on the global log file handle and +// returns any error that occurs during closure. It does not take any arguments +// and only provides an error result indicating success or failure of the +// operation. func CloseGlobalLogFile() error { return globalLogFile.Close() } +// SetupLogger configures global logging with a custom level and writer +// +// This function parses the supplied log level string, falling back to INFO if +// parsing fails, and sets the global logger to write formatted slog entries to +// the provided io.Writer. It uses a custom handler that replaces standard level +// strings with user‑defined names when necessary. The resulting Logger +// instance is stored globally for use throughout the application. func SetupLogger(logWriter io.Writer, level string) { logLevel, err := parseLevel(level) if err != nil { @@ -88,14 +114,34 @@ func SetupLogger(logWriter io.Writer, level string) { } } +// SetLogger Sets the package-wide logger instance +// +// This function assigns the provided Logger to a global variable used +// throughout the logging package, making it available for all subsequent log +// operations. It performs no validation or side effects beyond the assignment +// and does not return any value. func SetLogger(l *Logger) { globalLogger = l } +// GetLogger Retrieves the package-wide logger instance +// +// This function provides access to a globally shared Logger object that is used +// throughout the application for consistent logging behavior. It simply returns +// the reference stored in the internal variable, allowing other packages to +// obtain and use the same logger without creating new instances. func GetLogger() *Logger { return globalLogger } +// GetMultiLogger Creates a logger that writes to multiple destinations +// +// The function builds a set of slog handlers, including an optional global +// handler if one is configured, and wraps each supplied writer in a custom +// handler with the current log level settings. It then combines these handlers +// into a multi-handler so that every log record is emitted to all specified +// writers simultaneously. The resulting Logger instance is returned for use +// throughout the application. func GetMultiLogger(writers ...io.Writer) *Logger { opts := slog.HandlerOptions{ Level: globalLogLevel, @@ -126,58 +172,135 @@ func GetMultiLogger(writers ...io.Writer) *Logger { return &Logger{l: slog.New(NewMultiHandler(handlers...))} } -// Top-level log functions +// Debug Logs a message at the debug level +// +// This function forwards its arguments to the internal logging system, tagging +// them with a debug severity. It accepts a format string followed by any number +// of values, which are passed to the underlying logger for formatting and +// output. The global logger instance is used, ensuring consistent log +// configuration across the application. func Debug(msg string, args ...any) { Logf(globalLogger, LevelDebug, msg, args...) } +// Info Logs a message at the informational level +// +// This function sends a formatted log entry to the package's global logger with +// an informational severity. It accepts a message string and optional +// arguments, which are passed through to formatting before dispatching to the +// underlying logging system. The call is non‑blocking and does not return any +// value. func Info(msg string, args ...any) { Logf(globalLogger, LevelInfo, msg, args...) } +// Warn Logs a message at warning level +// +// The function forwards its arguments to Logf, supplying the global logger and +// a warning severity indicator. It accepts a format string followed by optional +// values, which are interpolated into the log entry. The resulting record is +// written using slog's handling mechanisms. func Warn(msg string, args ...any) { Logf(globalLogger, LevelWarn, msg, args...) } +// Error Logs an error message with optional formatting +// +// This function accepts a format string and optional arguments, then forwards +// the call to a lower-level logging routine that writes the message at the +// error severity level. It uses the global logger instance, ensuring +// consistency across the application. The formatted output is sent to the +// configured log handler. func Error(msg string, args ...any) { Logf(globalLogger, LevelError, msg, args...) } +// Fatal Logs a fatal error message and terminates the program +// +// This function writes the supplied formatted message to both the configured +// logger at the fatal level and directly to standard error. After logging, it +// exits the process with status code one, ensuring that the application stops +// immediately. func Fatal(msg string, args ...any) { Logf(globalLogger, LevelFatal, msg, args...) fmt.Fprintf(os.Stderr, "\nFATAL: "+msg+"\n", args...) os.Exit(1) } -// Log methods for a logger instance +// Logger.Debug Logs a formatted message at debug level +// +// The method calls the generic logging helper Logf, passing the logger instance +// and the debug log level together with the supplied format string and +// arguments. It formats the message using fmt.Sprintf before emitting it +// through the underlying slog handler, only if the current logger is enabled +// for debug logs. func (logger *Logger) Debug(msg string, args ...any) { Logf(logger, LevelDebug, msg, args...) } +// Logger.Info Logs an informational message +// +// This method forwards the supplied format string and arguments to the internal +// logging routine at the info level. It relies on Logf to create a log record +// with the appropriate severity, ensuring the message is emitted only if the +// logger’s configuration allows that level. No value is returned. func (logger *Logger) Info(msg string, args ...any) { Logf(logger, LevelInfo, msg, args...) } +// Logger.Warn Logs a warning message with optional formatting +// +// This method takes a format string and an arbitrary number of arguments, +// passes them to the underlying Logf function along with the warning level +// constant. It records the warning using the logger's handler if the warning +// level is enabled for the current context. The call does not return any value. func (logger *Logger) Warn(msg string, args ...any) { Logf(logger, LevelWarn, msg, args...) } +// Logger.Error Logs a formatted message at the error level +// +// This method receives a format string followed by optional arguments, then +// delegates to the generic logging helper passing the error severity. It uses +// the Logger instance if provided; otherwise it falls back to the default +// logger. The resulting entry is emitted immediately without returning any +// value. func (logger *Logger) Error(msg string, args ...any) { Logf(logger, LevelError, msg, args...) } +// Logger.Fatal Outputs a fatal error message, writes to stderr and exits the program +// +// The method logs a formatted fatal message using the Logger’s Logf helper, +// then prints the same message prefixed with "FATAL:" to standard error for +// visibility. After displaying the message it terminates the process by calling +// os.. No return value is produced because execution stops immediately. func (logger *Logger) Fatal(msg string, args ...any) { Logf(logger, LevelFatal, msg, args...) fmt.Fprintf(os.Stderr, "\nFATAL: "+msg+"\n", args...) os.Exit(1) } +// Logger.With Creates a child logger with added contextual fields +// +// The method accepts any number of key-value pairs or structured arguments and +// forwards them to the underlying logger’s With function. It constructs a new +// Logger instance that preserves the original logger while extending its +// context, allowing subsequent log entries to include these additional fields. +// The returned logger can be used independently for further logging calls. func (logger *Logger) With(args ...any) *Logger { return &Logger{ l: logger.l.With(args...), } } +// parseLevel Converts a string into a slog logging level +// +// The function takes a textual log level, normalizes it to lowercase, and +// matches it against known levels such as debug, info, warn, error, and fatal. +// If the input corresponds to one of these names, the matching slog.Level +// constant is returned; otherwise an error is produced indicating the value is +// invalid. func parseLevel(level string) (slog.Level, error) { switch strings.ToLower(level) { case "debug": @@ -195,8 +318,13 @@ func parseLevel(level string) (slog.Level, error) { return 0, fmt.Errorf("not a valid slog Level: %q", level) } -// The Logf function should be called inside a log wrapper function. -// Otherwise the code source reference will be invalid. +// Logf Logs a formatted message at the specified level +// +// The function accepts a logger, a string representing the log level, a format +// string, and optional arguments. It parses the level, checks if logging is +// enabled for that level, retrieves the caller information, creates a slog +// record with a timestamp and formatted message, and passes it to the +// logger’s handler. func Logf(logger *Logger, level, format string, args ...any) { if logger == nil { logger = &Logger{ diff --git a/internal/log/multi_handler.go b/internal/log/multi_handler.go index 92b8f0e03..1b6e9622c 100644 --- a/internal/log/multi_handler.go +++ b/internal/log/multi_handler.go @@ -5,16 +5,37 @@ import ( "log/slog" ) +// MultiHandler combines multiple logging handlers into one +// +// It holds a slice of slog.Handler values and forwards each logging call to +// every handler in the slice. For enabled checks, it returns true if any +// underlying handler is enabled for the given level. When handling a record, it +// clones the record before passing it to each handler, stopping early only if +// an error occurs. Attribute and group additions are propagated by creating new +// handlers with the specified attributes or groups. type MultiHandler struct { handlers []slog.Handler } +// NewMultiHandler Creates a composite handler for multiple slog handlers +// +// This function takes any number of slog.Handler instances and returns a new +// MultiHandler that aggregates them. The returned object holds the provided +// handlers in order, enabling log records to be dispatched to each underlying +// handler when emitted. No additional processing or filtering is performed; it +// simply stores the handlers for later use. func NewMultiHandler(handlers ...slog.Handler) *MultiHandler { return &MultiHandler{ handlers: handlers, } } +// MultiHandler.Enabled True when any contained handler accepts the log level +// +// The method iterates over all handlers stored in the MultiHandler and queries +// each one to see if it would handle messages at the specified level. If any +// handler reports enabled, the function immediately returns true; otherwise it +// returns false after checking all handlers. func (h *MultiHandler) Enabled(ctx context.Context, level slog.Level) bool { for i := range h.handlers { if h.handlers[i].Enabled(ctx, level) { @@ -25,6 +46,14 @@ func (h *MultiHandler) Enabled(ctx context.Context, level slog.Level) bool { return false } +// MultiHandler.Handle distributes a log record to all registered handlers +// +// The method iterates over each handler stored in the MultiHandler, cloning the +// incoming record before passing it to ensure isolation between handlers. If +// any handler returns an error, that error is immediately returned and no +// further handlers are invoked. When all handlers succeed, the method completes +// without error. +// //nolint:gocritic func (h *MultiHandler) Handle(ctx context.Context, r slog.Record) error { for i := range h.handlers { @@ -36,6 +65,13 @@ func (h *MultiHandler) Handle(ctx context.Context, r slog.Record) error { return nil } +// MultiHandler.WithAttrs creates a new handler that adds attributes to all sub-handlers +// +// This method iterates over each contained handler, invoking its +// attribute-adding function with the supplied slice of attributes. It collects +// the resulting handlers into a new slice and constructs a fresh multi-handler +// from them. The returned handler behaves like the original but ensures every +// log record includes the provided attributes. func (h *MultiHandler) WithAttrs(attrs []slog.Attr) slog.Handler { handlersWithAttrs := make([]slog.Handler, len(h.handlers)) for i := range h.handlers { @@ -44,6 +80,13 @@ func (h *MultiHandler) WithAttrs(attrs []slog.Attr) slog.Handler { return NewMultiHandler(handlersWithAttrs...) } +// MultiHandler.WithGroup Adds a named group to all underlying handlers +// +// This method creates a new slice of slog.Handler by iterating over the +// existing handlers and invoking each one's WithGroup method with the provided +// name. The resulting handlers are then wrapped into a new MultiHandler +// instance, which is returned as a slog.Handler. This allows grouping log +// entries consistently across multiple output destinations. func (h *MultiHandler) WithGroup(name string) slog.Handler { handlersWithGroup := make([]slog.Handler, len(h.handlers)) for i := range h.handlers { diff --git a/internal/results/archiver.go b/internal/results/archiver.go index 82b4ad3dc..541d24628 100644 --- a/internal/results/archiver.go +++ b/internal/results/archiver.go @@ -18,11 +18,23 @@ const ( tarGzFileNameSuffix = "cnf-test-results.tar.gz" ) +// generateZipFileName creates a timestamped name for the archive file +// +// The function generates a string by formatting the current time with a +// predefined layout and appending a suffix to produce a unique filename. It +// uses the system clock to ensure each call returns a different value, suitable +// for naming compressed result artifacts. The returned string is later combined +// with a directory path to create the full file location. func generateZipFileName() string { return time.Now().Format(tarGzFileNamePrefixLayout) + "-" + tarGzFileNameSuffix } -// Helper function to get the tar file header from a file. +// getFileTarHeader Creates a tar header for a given file +// +// The function retrieves the file’s metadata using the operating system, then +// converts that information into a tar header structure suitable for archiving. +// It returns the header or an error if either the stat call or the conversion +// fails. func getFileTarHeader(file string) (*tar.Header, error) { info, err := os.Stat(file) if err != nil { @@ -37,7 +49,12 @@ func getFileTarHeader(file string) (*tar.Header, error) { return header, nil } -// Creates a zip file in the outputDir containing each file in the filePaths slice. +// CompressResultsArtifacts Creates a compressed archive of specified files +// +// The function builds a zip file in the given output directory, including each +// path from the slice. It streams each file into a tar writer wrapped by gzip +// for compression, handling errors during header creation or file access. The +// absolute path to the resulting archive is returned. func CompressResultsArtifacts(outputDir string, filePaths []string) (string, error) { zipFileName := generateZipFileName() zipFilePath := filepath.Join(outputDir, zipFileName) diff --git a/internal/results/html.go b/internal/results/html.go index 63a03ec22..f4436279e 100644 --- a/internal/results/html.go +++ b/internal/results/html.go @@ -17,7 +17,12 @@ const ( //go:embed html/results.html var htmlResultsFileContent []byte -// Creates the claimjson.js file from the claim.json file. +// createClaimJSFile Creates a JavaScript file containing the claim JSON data +// +// The function reads the contents of a specified claim.json file, prefixes it +// with a JavaScript variable declaration, and writes this combined string to a +// new file in the given output directory. It returns the path to the newly +// created file or an error if reading or writing fails. func createClaimJSFile(claimFilePath, outputDir string) (filePath string, err error) { // Read claim.json content. claimContent, err := os.ReadFile(claimFilePath) @@ -37,11 +42,13 @@ func createClaimJSFile(claimFilePath, outputDir string) (filePath string, err er return filePath, nil } -// Creates all the html/web related files needed for parsing the claim file in outputDir. -// - claimjson.js -// - results.html -// - classification.js -// Returns a slice with the paths of every file created. +// CreateResultsWebFiles Creates HTML web assets for claim data +// +// The function generates the JavaScript file that exposes the claim JSON +// content, writes a static results page, and returns their paths. It accepts an +// output directory and a claim file name, constructs the necessary files, +// handles any I/O errors, and collects the created file locations in a slice. +// The returned slice contains the paths to all web artifacts for later use. func CreateResultsWebFiles(outputDir, claimFileName string) (filePaths []string, err error) { type file struct { Path string diff --git a/internal/results/rhconnect.go b/internal/results/rhconnect.go index 9ab30c4ae..6ed480b94 100644 --- a/internal/results/rhconnect.go +++ b/internal/results/rhconnect.go @@ -20,6 +20,11 @@ const ( redHatConnectAPITimeout = 60 * time.Second ) +// createFormField Creates a single form field in a multipart payload +// +// The function accepts a multipart writer, a field name, and its value. It uses +// the writer to create the field and writes the provided string into it. Errors +// during creation or writing are wrapped with context and returned. func createFormField(w *multipart.Writer, field, value string) error { fw, err := w.CreateFormField(field) if err != nil { @@ -34,6 +39,12 @@ func createFormField(w *multipart.Writer, field, value string) error { return nil } +// CertIDResponse Represents a certification case response from RHConnect +// +// This struct holds information returned by the RHConnect API for a +// certification request, including the internal ID, external case number, +// status, level, URL, and whether the partner initiated it. It also embeds a +// nested type providing the certification category's identifier and name. type CertIDResponse struct { ID int `json:"id"` CaseNumber string `json:"caseNumber"` @@ -47,7 +58,14 @@ type CertIDResponse struct { } `json:"certificationType"` } -// GetCertIDFromConnectAPI gets the certification ID from the Red Hat Connect API +// GetCertIDFromConnectAPI Retrieves a certification identifier from the Red Hat Connect service +// +// The function builds a JSON payload containing a project ID, sends it as a +// POST request to the Connect API endpoint for certifications, and decodes the +// returned JSON to extract the numeric certification ID. It supports optional +// proxy configuration, sanitizes input strings, and applies a timeout to the +// HTTP client. The resulting ID is returned as a string; errors are reported if +// any step fails. func GetCertIDFromConnectAPI(apiKey, projectID, connectAPIBaseURL, proxyURL, proxyPort string) (string, error) { log.Info("Getting certification ID from Red Hat Connect API") @@ -106,6 +124,13 @@ func GetCertIDFromConnectAPI(apiKey, projectID, connectAPIBaseURL, proxyURL, pro return fmt.Sprintf("%d", certIDResponse.ID), nil } +// UploadResult Details the outcome of a file upload operation +// +// This structure holds information about an uploaded artifact, including its +// unique identifier, type, name, size, MIME type, description, download link, +// uploader, upload timestamp, and related certificate ID. It is used to convey +// all relevant metadata back to clients or services that need to reference the +// stored file. type UploadResult struct { UUID string `json:"uuid"` Type string `json:"type"` @@ -119,7 +144,14 @@ type UploadResult struct { CertID int `json:"certId"` } -// SendResultsToConnectAPI sends the results to the Red Hat Connect API +// SendResultsToConnectAPI Uploads a ZIP file of test results to the Red Hat Connect API +// +// The function takes a zip file path, an API key, base URL, certification ID, +// and optional proxy settings. It builds a multipart/form‑data POST request +// containing the file and metadata fields, then sends it with a +// timeout‑limited HTTP client that can use a proxy if configured. On success +// it logs the returned download URL and upload date; otherwise it returns an +// error describing what failed. // //nolint:funlen func SendResultsToConnectAPI(zipFile, apiKey, connectBaseURL, certID, proxyURL, proxyPort string) error { @@ -210,6 +242,12 @@ func SendResultsToConnectAPI(zipFile, apiKey, connectBaseURL, certID, proxyURL, return nil } +// sendRequest Sends an HTTP request using a client and checks for success +// +// This function logs the target URL, executes the request with the provided +// http.Client, and returns the response if the status code is 200 OK. If an +// error occurs during execution or the status code differs from OK, it returns +// a formatted error describing the failure. func sendRequest(req *http.Request, client *http.Client) (*http.Response, error) { // print the request log.Debug("Sending request to %s", req.URL) @@ -227,6 +265,12 @@ func sendRequest(req *http.Request, client *http.Client) (*http.Response, error) return res, nil } +// setProxy configures an HTTP client to use a proxy when provided +// +// When both the proxy address and port are supplied, the function builds a +// proxy URL, parses it, logs the configuration, and assigns a transport with +// that proxy to the client. If parsing fails, an error is logged but no panic +// occurs. The client remains unchanged if either value is empty. func setProxy(client *http.Client, proxyURL, proxyPort string) { if proxyURL != "" && proxyPort != "" { log.Debug("Proxy is set. Using proxy %s:%s", proxyURL, proxyPort) diff --git a/pkg/arrayhelper/arrayhelper.go b/pkg/arrayhelper/arrayhelper.go index 6229e7e75..5528f806f 100644 --- a/pkg/arrayhelper/arrayhelper.go +++ b/pkg/arrayhelper/arrayhelper.go @@ -20,8 +20,12 @@ import ( "strings" ) -// ArgListToMap takes a list of strings of the form "key=value" and translate it into a map -// of the form {key: value} +// ArgListToMap Converts key=value strings into a map +// +// It receives an array of strings, each representing a kernel argument or +// configuration pair. For every entry it removes surrounding quotes, splits on +// the first equals sign, and stores the key with its corresponding in a new +// map. The resulting map is returned for further processing. func ArgListToMap(lst []string) map[string]string { retval := make(map[string]string) for _, arg := range lst { @@ -36,7 +40,11 @@ func ArgListToMap(lst []string) map[string]string { return retval } -// FilterArray takes a list and a predicate and returns a list of all elements for whom the predicate returns true +// FilterArray Filters elements of a slice based on a predicate +// +// It iterates over each string in the input slice, applies the provided +// function to decide if an element should be kept, and collects those that +// satisfy the condition into a new slice which is then returned. func FilterArray(vs []string, f func(string) bool) []string { vsf := make([]string, 0) for _, v := range vs { @@ -47,6 +55,12 @@ func FilterArray(vs []string, f func(string) bool) []string { return vsf } +// Unique Eliminates duplicate strings from a slice +// +// The function receives a slice of strings and returns a new slice containing +// each distinct element exactly once. It builds a map to track seen values, +// then collects the unique keys into a result slice. The order of elements is +// not preserved. func Unique(slice []string) []string { // create a map with all the values as key uniqMap := make(map[string]struct{}) diff --git a/pkg/autodiscover/autodiscover.go b/pkg/autodiscover/autodiscover.go index ba0ecd06a..46efe1296 100644 --- a/pkg/autodiscover/autodiscover.go +++ b/pkg/autodiscover/autodiscover.go @@ -59,11 +59,26 @@ const ( labelTemplate = "%s/%s" ) +// PodStates Tracks pod counts before and after execution +// +// This structure holds two maps that record the number of pods per namespace or +// label set before an operation begins and after it completes. The keys +// represent identifiers such as namespace names, while the values are integer +// counters. By comparing these maps, callers can determine how many pods were +// added, removed, or remained unchanged during the execution phase. type PodStates struct { BeforeExecution map[string]int AfterExecution map[string]int } +// DiscoveredTestData Contains all resources discovered during test setup +// +// The structure holds metadata, configuration parameters, and collections of +// Kubernetes objects such as pods, services, CRDs, and operator information +// collected by the autodiscovery routine. It aggregates stateful data like pod +// statuses, resource quotas, network policies, and role bindings to provide a +// comprehensive snapshot of the cluster for testing purposes. The fields are +// used downstream to evaluate test conditions and report results. type DiscoveredTestData struct { Env configuration.TestParameters PodStates PodStates @@ -126,6 +141,12 @@ type DiscoveredTestData struct { ConnectAPIProxyPort string } +// labelObject Represents a single key/value pair used to identify Kubernetes resources +// +// This structure holds the label's key and its corresponding value, allowing +// code to match or filter objects such as Pods, Deployments, or Operators based +// on those labels. It is used throughout the discovery logic to build selectors +// for listing resources that satisfy one or more specified label conditions. type labelObject struct { LabelKey string LabelValue string @@ -136,6 +157,13 @@ var data = DiscoveredTestData{} const labelRegex = `(\S*)\s*:\s*(\S*)` const labelRegexMatches = 3 +// CreateLabels Parses label expressions into key-value objects +// +// The function iterates over a slice of strings, each representing a label in +// the form "key=value". It uses a regular expression to extract the key and +// value; if parsing fails it logs an error and skips that entry. Valid pairs +// are wrapped into labelObject structs and collected into a slice that is +// returned. func CreateLabels(labelStrings []string) (labelObjects []labelObject) { for _, label := range labelStrings { r := regexp.MustCompile(labelRegex) @@ -153,7 +181,14 @@ func CreateLabels(labelStrings []string) (labelObjects []labelObject) { return labelObjects } -// DoAutoDiscover finds objects under test +// DoAutoDiscover Collects comprehensive Kubernetes and OpenShift discovery data +// +// The function gathers a wide range of cluster information such as namespaces, +// pods, operators, subscriptions, CRDs, storage classes, network policies, role +// bindings, and more. It uses client holders to query the API, applies label +// filtering for test objects, handles errors with fatal logging, and populates +// a DiscoveredTestData structure that is later used to build the test +// environment. // //nolint:funlen,gocyclo func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData { @@ -349,6 +384,12 @@ func DoAutoDiscover(config *configuration.TestConfiguration) DiscoveredTestData return data } +// namespacesListToStringList Converts a list of namespace objects to a slice of their names +// +// The function iterates over each Namespace in the input slice, extracting its +// Name field and appending it to a new string slice. It returns this slice of +// strings representing all namespace names. This conversion is used to provide +// a simple list for further processing elsewhere in the package. func namespacesListToStringList(namespaceList []configuration.Namespace) (stringList []string) { for _, ns := range namespaceList { stringList = append(stringList, ns.Name) @@ -356,6 +397,13 @@ func namespacesListToStringList(namespaceList []configuration.Namespace) (string return stringList } +// getOpenshiftVersion retrieves the OpenShift version from the cluster +// +// The function queries the openshift-apiserver ClusterOperator resource to +// obtain its status versions. It searches for a version entry matching a +// specific label, logs the found version, and returns it. If the operator is +// missing or no matching version exists, it returns an error or a sentinel +// value indicating a non‑OpenShift cluster. func getOpenshiftVersion(oClient clientconfigv1.ConfigV1Interface) (ver string, err error) { var clusterOperator *configv1.ClusterOperator clusterOperator, err = oClient.ClusterOperators().Get(context.TODO(), "openshift-apiserver", metav1.GetOptions{}) @@ -381,7 +429,13 @@ func getOpenshiftVersion(oClient clientconfigv1.ConfigV1Interface) (ver string, return "", errors.New("could not get openshift version from clusterOperator") } -// Get a map of csvs with its managed operator/controller pods from its installation namespace. +// getOperatorCsvPods Retrieves operator controller pods for each CSV +// +// For every ClusterServiceVersion in the list, it looks up the namespace +// annotation to locate where the operator runs. It then lists all pods in that +// namespace, filters those owned by the CSV, and builds a map keyed by the +// CSV’s namespaced name to its managed pods. Errors are returned if +// annotations or pod retrieval fail. func getOperatorCsvPods(csvList []*olmv1Alpha.ClusterServiceVersion) (map[types.NamespacedName][]*corev1.Pod, error) { const nsAnnotation = "olm.operatorNamespace" @@ -405,7 +459,13 @@ func getOperatorCsvPods(csvList []*olmv1Alpha.ClusterServiceVersion) (map[types. return csvToPodsMapping, nil } -// This function gets the operator/controller pods of the specified csv name in from the installation namespace. +// getPodsOwnedByCsv retrieves operator pods owned by a specified CSV +// +// The function lists all pods in the given namespace, then checks each pod’s +// top-level owner references to find those whose owner is a +// ClusterServiceVersion matching the provided name and namespace. Matching pods +// are collected into a slice that is returned. If any error occurs while +// listing pods or determining owners, it returns an error. func getPodsOwnedByCsv(csvName, operatorNamespace string, client *clientsholder.ClientsHolder) (managedPods []*corev1.Pod, err error) { // Get all pods from the target namespace podsList, err := client.K8sClient.CoreV1().Pods(operatorNamespace).List(context.TODO(), metav1.ListOptions{}) diff --git a/pkg/autodiscover/autodiscover_clusteroperators.go b/pkg/autodiscover/autodiscover_clusteroperators.go index b9aa0f0a4..d3edfe017 100644 --- a/pkg/autodiscover/autodiscover_clusteroperators.go +++ b/pkg/autodiscover/autodiscover_clusteroperators.go @@ -10,6 +10,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// findClusterOperators Retrieves all ClusterOperator resources from the cluster +// +// The function calls the client to list ClusterOperator objects, handling +// errors that may occur during the request. If the API returns a not‑found +// error, it logs a debug message and returns nil without error. On success, it +// returns a slice of the retrieved items. func findClusterOperators(client clientconfigv1.ClusterOperatorInterface) ([]configv1.ClusterOperator, error) { clusterOperators, err := client.List(context.TODO(), metav1.ListOptions{}) if err != nil && !k8serrors.IsNotFound(err) { diff --git a/pkg/autodiscover/autodiscover_crds.go b/pkg/autodiscover/autodiscover_crds.go index e4eaa73d6..ec931dd39 100644 --- a/pkg/autodiscover/autodiscover_crds.go +++ b/pkg/autodiscover/autodiscover_crds.go @@ -31,7 +31,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// getClusterCrdNames returns a list of crd names found in the cluster. +// getClusterCrdNames Retrieves all CustomResourceDefinition objects from the cluster +// +// The function obtains a client holder, lists CRDs via the API extensions +// client, and returns a slice of pointers to each CustomResourceDefinition. If +// listing fails it wraps the error with context. The result is used by +// autodiscovery to filter relevant CRDs. func getClusterCrdNames() ([]*apiextv1.CustomResourceDefinition, error) { oc := clientsholder.GetClientsHolder() crds, err := oc.APIExtClient.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), metav1.ListOptions{}) @@ -46,7 +51,12 @@ func getClusterCrdNames() ([]*apiextv1.CustomResourceDefinition, error) { return crdList, nil } -// FindTestCrdNames gets a list of CRD names based on configured groups. +// FindTestCrdNames Selects CRDs that match configured suffixes +// +// The function scans a list of cluster CRDs, comparing each name against a set +// of suffix filters defined in the configuration. When a CRD’s name ends with +// any specified suffix, it is added to the result slice. If no CRDs are +// present, an error is logged and an empty slice is returned. func FindTestCrdNames(clusterCrds []*apiextv1.CustomResourceDefinition, crdFilters []configuration.CrdFilter) (targetCrds []*apiextv1.CustomResourceDefinition) { if len(clusterCrds) == 0 { log.Error("Cluster does not have any CRDs") diff --git a/pkg/autodiscover/autodiscover_events.go b/pkg/autodiscover/autodiscover_events.go index d5c48bb25..70248cede 100644 --- a/pkg/autodiscover/autodiscover_events.go +++ b/pkg/autodiscover/autodiscover_events.go @@ -25,6 +25,12 @@ import ( corev1client "k8s.io/client-go/kubernetes/typed/core/v1" ) +// findAbnormalEvents collects non-normal events from specified namespaces +// +// The function iterates over each namespace provided, querying the Kubernetes +// API for events whose type is not Normal. It aggregates these events into a +// single slice, logging an error and skipping any namespace where the list +// operation fails. The resulting slice of corev1.Event objects is returned. func findAbnormalEvents(oc corev1client.CoreV1Interface, namespaces []string) (abnormalEvents []corev1.Event) { abnormalEvents = []corev1.Event{} for _, ns := range namespaces { diff --git a/pkg/autodiscover/autodiscover_nads.go b/pkg/autodiscover/autodiscover_nads.go index 5d7a670b1..5b017ea8a 100644 --- a/pkg/autodiscover/autodiscover_nads.go +++ b/pkg/autodiscover/autodiscover_nads.go @@ -9,6 +9,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// getNetworkAttachmentDefinitions Retrieves all network attachment definitions from specified namespaces +// +// The function iterates over a list of namespace names, querying each for its +// NetworkAttachmentDefinition resources via the CNCF networking client. It +// collects any found items into a single slice, handling missing namespaces +// gracefully by ignoring not‑found errors. The resulting slice and an are +// returned to the caller. func getNetworkAttachmentDefinitions(client *clientsholder.ClientsHolder, namespaces []string) ([]nadClient.NetworkAttachmentDefinition, error) { var nadList []nadClient.NetworkAttachmentDefinition diff --git a/pkg/autodiscover/autodiscover_networkpolicies.go b/pkg/autodiscover/autodiscover_networkpolicies.go index 3c9236227..1d12df843 100644 --- a/pkg/autodiscover/autodiscover_networkpolicies.go +++ b/pkg/autodiscover/autodiscover_networkpolicies.go @@ -24,6 +24,12 @@ import ( networkingv1client "k8s.io/client-go/kubernetes/typed/networking/v1" ) +// getNetworkPolicies Retrieves all network policies in the cluster +// +// The function calls the NetworkingV1 client to list network policies across +// every namespace by using an empty string for the namespace parameter. It +// returns a slice of NetworkPolicy objects and any error encountered during the +// API call. func getNetworkPolicies(oc networkingv1client.NetworkingV1Interface) ([]networkingv1.NetworkPolicy, error) { nps, err := oc.NetworkPolicies("").List(context.TODO(), metav1.ListOptions{}) if err != nil { diff --git a/pkg/autodiscover/autodiscover_operators.go b/pkg/autodiscover/autodiscover_operators.go index 95fd5f176..cc19b9f27 100644 --- a/pkg/autodiscover/autodiscover_operators.go +++ b/pkg/autodiscover/autodiscover_operators.go @@ -46,6 +46,13 @@ const ( istioDeploymentName = "istiod" ) +// isIstioServiceMeshInstalled checks for an installed Istio service mesh +// +// The function verifies that the special Istio namespace exists in the cluster +// and then looks for a Deployment named istiod within that namespace. If either +// the namespace or deployment is missing, it logs appropriate messages and +// returns false; otherwise it confirms detection with an info log and returns +// true. func isIstioServiceMeshInstalled(appClient appv1client.AppsV1Interface, allNs []string) bool { // The Istio namespace must be present if !stringhelper.StringInSlice(allNs, istioNamespace, false) { @@ -68,6 +75,14 @@ func isIstioServiceMeshInstalled(appClient appv1client.AppsV1Interface, allNs [] return true } +// findOperatorsMatchingAtLeastOneLabel Retrieves operators whose CSVs match any of the provided labels +// +// The function queries the OLM client for ClusterServiceVersions in a specific +// namespace, filtering by each label in turn and aggregating all matching CSV +// items into a single list. It logs debug information for each search attempt +// and records errors if a query fails, continuing with remaining labels. The +// returned list contains every CSV that satisfies at least one of the supplied +// label selectors. func findOperatorsMatchingAtLeastOneLabel(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespace configuration.Namespace) *olmv1Alpha.ClusterServiceVersionList { csvList := &olmv1Alpha.ClusterServiceVersionList{} for _, l := range labels { @@ -84,6 +99,14 @@ func findOperatorsMatchingAtLeastOneLabel(olmClient v1alpha1.OperatorsV1alpha1In return csvList } +// findOperatorsByLabels Retrieves operator CSVs matching given labels across specified namespaces +// +// The function iterates over each target namespace, collecting +// ClusterServiceVersions that either match provided label selectors or are +// listed without filters when no labels exist. It then verifies the +// operator’s controller pod resides in a configured test namespace by +// checking an annotation and includes only those CSVs in the result set. Each +// discovered CSV is logged for visibility before being returned as a slice. func findOperatorsByLabels(olmClient v1alpha1.OperatorsV1alpha1Interface, labels []labelObject, namespaces []configuration.Namespace) (csvs []*olmv1Alpha.ClusterServiceVersion) { const nsAnnotation = "olm.operatorNamespace" @@ -129,6 +152,12 @@ func findOperatorsByLabels(olmClient v1alpha1.OperatorsV1alpha1Interface, labels return csvs } +// getAllNamespaces Retrieves the names of all namespaces in a cluster +// +// The function queries the Kubernetes API for every namespace, collects each +// name into a slice, and returns that list. If the list request fails, it wraps +// the error with context before returning. The returned slice contains plain +// string names and may be empty if no namespaces exist. func getAllNamespaces(oc corev1client.CoreV1Interface) (allNs []string, err error) { nsList, err := oc.Namespaces().List(context.TODO(), metav1.ListOptions{}) if err != nil { @@ -140,6 +169,12 @@ func getAllNamespaces(oc corev1client.CoreV1Interface) (allNs []string, err erro return allNs, nil } +// getAllOperators Retrieves all operator CSVs from every namespace +// +// The function queries the OLM client for ClusterServiceVersion objects across +// all namespaces, collecting them into a slice. It logs each found CSV name and +// namespace for visibility. Errors during listing are wrapped with context and +// returned to the caller. func getAllOperators(olmClient v1alpha1.OperatorsV1alpha1Interface) ([]*olmv1Alpha.ClusterServiceVersion, error) { csvs := []*olmv1Alpha.ClusterServiceVersion{} @@ -157,6 +192,14 @@ func getAllOperators(olmClient v1alpha1.OperatorsV1alpha1Interface) ([]*olmv1Alp return csvs, nil } +// findSubscriptions Collects operator subscriptions across specified namespaces +// +// This routine iterates over a list of namespace identifiers, querying the +// OpenShift Operator Lifecycle Manager for Subscription objects in each. It +// logs debug information for each namespace, handles errors by logging them and +// skipping problematic ones, and aggregates all found subscriptions into a +// single slice. After gathering, it emits informational logs detailing each +// subscription’s name and namespace before returning the compiled collection. func findSubscriptions(olmClient v1alpha1.OperatorsV1alpha1Interface, namespaces []string) []olmv1Alpha.Subscription { subscriptions := []olmv1Alpha.Subscription{} for _, ns := range namespaces { @@ -179,6 +222,12 @@ func findSubscriptions(olmClient v1alpha1.OperatorsV1alpha1Interface, namespaces return subscriptions } +// getHelmList Collects deployed Helm releases from given namespaces +// +// The function creates a Helm client for each namespace using the provided REST +// configuration, then retrieves all deployed releases in that namespace. +// Results are stored in a map keyed by namespace name. If client creation fails +// it panics; otherwise the mapping of namespace to release slices is returned. func getHelmList(restConfig *rest.Config, namespaces []string) map[string][]*release.Release { helmChartReleases := map[string][]*release.Release{} for _, ns := range namespaces { @@ -204,7 +253,12 @@ func getHelmList(restConfig *rest.Config, namespaces []string) map[string][]*rel return helmChartReleases } -// getAllInstallPlans is a helper function to get the all the installPlans in a cluster. +// getAllInstallPlans Retrieves all operator install plans from the cluster +// +// The function queries the OpenShift Operator Lifecycle Manager for every +// InstallPlan resource across all namespaces. If the API call fails, it logs an +// error and returns an empty slice; otherwise it collects pointers to each +// InstallPlan item into a new slice and returns that list. func getAllInstallPlans(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*olmv1Alpha.InstallPlan) { installPlanList, err := olmClient.InstallPlans("").List(context.TODO(), metav1.ListOptions{}) if err != nil { @@ -217,7 +271,12 @@ func getAllInstallPlans(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*o return out } -// getAllCatalogSources is a helper function to get the all the CatalogSources in a cluster. +// getAllCatalogSources Retrieves all CatalogSource objects from the cluster +// +// The function queries the operator lifecycle manager for catalog sources in +// every namespace, handling any errors by logging them and returning an empty +// slice. It iterates over the returned list, appending pointers to each item +// into a result slice which is then returned. func getAllCatalogSources(olmClient v1alpha1.OperatorsV1alpha1Interface) (out []*olmv1Alpha.CatalogSource) { catalogSourcesList, err := olmClient.CatalogSources("").List(context.TODO(), metav1.ListOptions{}) if err != nil { @@ -230,7 +289,13 @@ func getAllCatalogSources(olmClient v1alpha1.OperatorsV1alpha1Interface) (out [] return out } -// getAllPackageManifests is a helper function to get the all the PackageManifests in a cluster. +// getAllPackageManifests Retrieves all PackageManifest resources from the cluster +// +// The function calls the client’s List method to obtain a list of +// PackageManifests, handling any error by logging it and returning an empty +// slice. It then iterates over the returned items, appending pointers to each +// manifest into a new slice. The resulting slice of pointers is returned to the +// caller. func getAllPackageManifests(olmPkgClient olmpkgclient.PackageManifestInterface) (out []*olmpkgv1.PackageManifest) { packageManifestsList, err := olmPkgClient.List(context.TODO(), metav1.ListOptions{}) if err != nil { @@ -243,7 +308,12 @@ func getAllPackageManifests(olmPkgClient olmpkgclient.PackageManifestInterface) return out } -// getOperandPodsFromTestCsvs returns a subset of pods whose owner CRs are managed by any of the testCsvs. +// getOperandPodsFromTestCsvs Identifies pods whose owner custom resources are managed by the provided operators +// +// The function scans each supplied operator CSV to build a map of the CRDs it +// owns, then iterates through all pods, retrieving their top‑level owners. If +// a pod’s owning CRD matches one in the map, that pod is added to the result +// list. It returns the filtered slice and an error if any step fails. func getOperandPodsFromTestCsvs(testCsvs []*olmv1Alpha.ClusterServiceVersion, pods []corev1.Pod) ([]*corev1.Pod, error) { // Helper var to store all the managed crds from the operators under test // They map key is "Kind.group/version" or "Kind.APIversion", which should be the same. diff --git a/pkg/autodiscover/autodiscover_pdbs.go b/pkg/autodiscover/autodiscover_pdbs.go index c355c5fd4..727bd5fa0 100644 --- a/pkg/autodiscover/autodiscover_pdbs.go +++ b/pkg/autodiscover/autodiscover_pdbs.go @@ -24,6 +24,13 @@ import ( policyv1client "k8s.io/client-go/kubernetes/typed/policy/v1" ) +// getPodDisruptionBudgets Collects pod disruption budgets across specified namespaces +// +// The function iterates over a list of namespace names, requesting the pod +// disruption budgets present in each one via the Kubernetes policy client. It +// aggregates all retrieved items into a single slice and returns them along +// with any error that occurs during listing. If an error is encountered for a +// namespace, the function aborts immediately and propagates the error. func getPodDisruptionBudgets(oc policyv1client.PolicyV1Interface, namespaces []string) ([]policyv1.PodDisruptionBudget, error) { podDisruptionBudgets := []policyv1.PodDisruptionBudget{} for _, ns := range namespaces { diff --git a/pkg/autodiscover/autodiscover_pods.go b/pkg/autodiscover/autodiscover_pods.go index 59339a22e..1f61805db 100644 --- a/pkg/autodiscover/autodiscover_pods.go +++ b/pkg/autodiscover/autodiscover_pods.go @@ -26,6 +26,13 @@ import ( corev1client "k8s.io/client-go/kubernetes/typed/core/v1" ) +// findPodsMatchingAtLeastOneLabel Retrieves pods that match any provided label in a namespace +// +// The function iterates over each supplied label, querying the Kubernetes API +// for pods that have the corresponding key-value pair. It accumulates all +// matching pod objects into a single list, logging errors but continuing on +// failures. The resulting list is returned, containing every pod that satisfies +// at least one of the specified labels. func findPodsMatchingAtLeastOneLabel(oc corev1client.CoreV1Interface, labels []labelObject, namespace string) *corev1.PodList { allPods := &corev1.PodList{} for _, l := range labels { @@ -42,6 +49,14 @@ func findPodsMatchingAtLeastOneLabel(oc corev1client.CoreV1Interface, labels []l return allPods } +// FindPodsByLabels Retrieves pods matching specified labels across namespaces +// +// The function queries each provided namespace for pods, optionally filtering +// by one or more label key/value pairs. It returns two slices: runningPods +// contains only those that are not marked for deletion and either in the +// Running phase or allowed non‑running per configuration; allPods includes +// every pod found regardless of status. Errors during listing are logged and +// skipped. func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, namespaces []string) (runningPods, allPods []corev1.Pod) { runningPods = []corev1.Pod{} allPods = []corev1.Pod{} @@ -75,6 +90,12 @@ func FindPodsByLabels(oc corev1client.CoreV1Interface, labels []labelObject, nam return runningPods, allPods } +// CountPodsByStatus Counts running versus non‑running pods +// +// The function iterates over a slice of pod objects, incrementing counters for +// those in the Running phase versus all others. It returns a map with keys +// "ready" and "non-ready" holding the respective counts. The result is used to +// track pod state before and after test execution. func CountPodsByStatus(allPods []corev1.Pod) map[string]int { podStates := map[string]int{ "ready": 0, diff --git a/pkg/autodiscover/autodiscover_podset.go b/pkg/autodiscover/autodiscover_podset.go index bc42e2a9d..c3d8050eb 100644 --- a/pkg/autodiscover/autodiscover_podset.go +++ b/pkg/autodiscover/autodiscover_podset.go @@ -29,6 +29,12 @@ import ( "k8s.io/client-go/scale" ) +// FindDeploymentByNameByNamespace Retrieves a deployment by name within a specified namespace +// +// The function queries the Kubernetes API for a Deployment object using the +// provided client, namespace, and name. If the query fails, it logs an error +// and returns the encountered error; otherwise it returns the retrieved +// Deployment pointer. func FindDeploymentByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.Deployment, error) { dp, err := appClient.Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { @@ -37,6 +43,13 @@ func FindDeploymentByNameByNamespace(appClient appv1client.AppsV1Interface, name } return dp, nil } + +// FindStatefulsetByNameByNamespace Retrieves a StatefulSet by name within a specified namespace +// +// The function calls the Kubernetes API to fetch a StatefulSet resource using +// the provided client, namespace, and name. If the retrieval fails, it logs an +// error message and returns nil along with the encountered error; otherwise, it +// returns the fetched StatefulSet and a nil error. func FindStatefulsetByNameByNamespace(appClient appv1client.AppsV1Interface, namespace, name string) (*appsv1.StatefulSet, error) { ss, err := appClient.StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { @@ -46,6 +59,11 @@ func FindStatefulsetByNameByNamespace(appClient appv1client.AppsV1Interface, nam return ss, nil } +// FindCrObjectByNameByNamespace Retrieves a scaling object for a given resource +// +// The function queries the Kubernetes API to obtain a Scale resource identified +// by namespace, name, and group‑resource schema. It returns the retrieved +// scale object or an error if the request fails, logging a message on failure. func FindCrObjectByNameByNamespace(scalesGetter scale.ScalesGetter, ns, name string, groupResourceSchema schema.GroupResource) (*scalingv1.Scale, error) { crScale, err := scalesGetter.Scales(ns).Get(context.TODO(), groupResourceSchema, name, metav1.GetOptions{}) if err != nil { @@ -55,6 +73,12 @@ func FindCrObjectByNameByNamespace(scalesGetter scale.ScalesGetter, ns, name str return crScale, nil } +// isDeploymentsPodsMatchingAtLeastOneLabel checks if a deployment’s pod template contains any of the specified labels +// +// The function iterates over each provided label object, comparing its +// key/value pair against the labels defined in the deployment’s pod template. +// If it finds a match, it logs the discovery and returns true immediately. If +// no labels match after examining all options, it returns false. func isDeploymentsPodsMatchingAtLeastOneLabel(labels []labelObject, namespace string, deployment *appsv1.Deployment) bool { for _, aLabelObject := range labels { log.Debug("Searching pods in deployment %q found in ns %q using label %s=%s", deployment.Name, namespace, aLabelObject.LabelKey, aLabelObject.LabelValue) @@ -66,6 +90,14 @@ func isDeploymentsPodsMatchingAtLeastOneLabel(labels []labelObject, namespace st return false } +// findDeploymentsByLabels collects deployments matching specified labels across namespaces +// +// The function iterates over each namespace, listing all deployment objects. +// For every deployment it checks whether any of the provided label key/value +// pairs match the pod template labels; if so or if no labels were supplied, the +// deployment is added to a result slice. Errors during listing are logged and +// skipped, and a warning is emitted when a namespace contains no deployments. +// //nolint:dupl func findDeploymentsByLabels( appClient appv1client.AppsV1Interface, @@ -103,6 +135,12 @@ func findDeploymentsByLabels( return allDeployments } +// isStatefulSetsMatchingAtLeastOneLabel checks if a StatefulSet contains at least one pod label that matches the given list +// +// The function iterates over each supplied label object, comparing its key and +// value against the labels defined in the StatefulSet's pod template. If any +// match is found, it logs the discovery and returns true; otherwise it returns +// false after examining all labels. func isStatefulSetsMatchingAtLeastOneLabel(labels []labelObject, namespace string, statefulSet *appsv1.StatefulSet) bool { for _, aLabelObject := range labels { log.Debug("Searching pods in statefulset %q found in ns %q using label %s=%s", statefulSet.Name, namespace, aLabelObject.LabelKey, aLabelObject.LabelValue) @@ -114,6 +152,15 @@ func isStatefulSetsMatchingAtLeastOneLabel(labels []labelObject, namespace strin return false } +// findStatefulSetsByLabels Retrieves statefulsets matching specified labels across namespaces +// +// The function iterates over each provided namespace, listing all StatefulSet +// objects via the client interface. It then filters those sets by checking if +// any of the supplied label key/value pairs match the pod template labels +// inside a StatefulSet; if no labels are given it includes every set found. +// Matching or included StatefulSets are collected into a slice that is +// returned, with warnings logged when none are found. +// //nolint:dupl func findStatefulSetsByLabels( appClient appv1client.AppsV1Interface, @@ -151,6 +198,13 @@ func findStatefulSetsByLabels( return allStatefulSets } +// findHpaControllers Collects all HorizontalPodAutoscaler objects across given namespaces +// +// The function iterates over each namespace provided, listing the +// HorizontalPodAutoscalers in that namespace using the Kubernetes client. Each +// discovered HPA is appended to a slice which is returned after all namespaces +// are processed. If no HPAs are found or an error occurs during listing, +// appropriate log messages are emitted and an empty slice may be returned. func findHpaControllers(cs kubernetes.Interface, namespaces []string) []*scalingv1.HorizontalPodAutoscaler { var m []*scalingv1.HorizontalPodAutoscaler for _, ns := range namespaces { diff --git a/pkg/autodiscover/autodiscover_pv.go b/pkg/autodiscover/autodiscover_pv.go index ac5029a39..fb30a62d4 100644 --- a/pkg/autodiscover/autodiscover_pv.go +++ b/pkg/autodiscover/autodiscover_pv.go @@ -27,6 +27,10 @@ import ( storagev1typed "k8s.io/client-go/kubernetes/typed/storage/v1" ) +// getPersistentVolumes Retrieves all persistent volumes in the cluster +// +// The function calls the core V1 client to list PersistentVolume resources, +// returning a slice of those objects or an error if the API call fails. func getPersistentVolumes(oc corev1client.CoreV1Interface) ([]corev1.PersistentVolume, error) { pvs, err := oc.PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) if err != nil { @@ -35,6 +39,13 @@ func getPersistentVolumes(oc corev1client.CoreV1Interface) ([]corev1.PersistentV return pvs.Items, nil } +// getPersistentVolumeClaims Retrieves all PersistentVolumeClaim objects from the cluster +// +// This function queries the Kubernetes API for every PersistentVolumeClaim +// across all namespaces, returning a slice of claim objects or an error if the +// request fails. It performs a List operation with no namespace filter and uses +// a context placeholder. The resulting claims are extracted from the response +// items field. func getPersistentVolumeClaims(oc corev1client.CoreV1Interface) ([]corev1.PersistentVolumeClaim, error) { pvcs, err := oc.PersistentVolumeClaims("").List(context.TODO(), metav1.ListOptions{}) if err != nil { @@ -43,6 +54,12 @@ func getPersistentVolumeClaims(oc corev1client.CoreV1Interface) ([]corev1.Persis return pvcs.Items, nil } +// getAllStorageClasses Retrieves all storage classes from the cluster +// +// The function queries the Kubernetes API for a list of StorageClass objects +// using the provided client interface. It returns the slice of discovered +// storage classes or an error if the list operation fails, logging any errors +// encountered. func getAllStorageClasses(client storagev1typed.StorageV1Interface) ([]storagev1.StorageClass, error) { storageclasslist, err := client.StorageClasses().List(context.TODO(), metav1.ListOptions{}) if err != nil { diff --git a/pkg/autodiscover/autodiscover_rbac.go b/pkg/autodiscover/autodiscover_rbac.go index a049b9a37..8bcfa8db0 100644 --- a/pkg/autodiscover/autodiscover_rbac.go +++ b/pkg/autodiscover/autodiscover_rbac.go @@ -25,7 +25,12 @@ import ( rbacv1typed "k8s.io/client-go/kubernetes/typed/rbac/v1" ) -// getRoleBindings returns all of the rolebindings in the cluster +// getRoleBindings retrieves all rolebindings across every namespace +// +// This function queries the Kubernetes RBAC API for RoleBinding objects in +// every namespace by using an empty string selector. It returns a slice of +// RoleBinding instances or an error if the list operation fails, logging the +// failure before propagating it. func getRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.RoleBinding, error) { // Get all of the rolebindings from all namespaces roleList, roleErr := client.RoleBindings("").List(context.TODO(), metav1.ListOptions{}) @@ -36,7 +41,12 @@ func getRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.RoleBinding, return roleList.Items, nil } -// getClusterRoleBindings returns all of the clusterrolebindings in the cluster +// getClusterRoleBindings retrieves all cluster‑level role bindings +// +// This function calls the Kubernetes RBAC API to list every ClusterRoleBinding +// in the cluster, ignoring namespaces because they are cluster scoped. It +// returns a slice of the bindings or an error if the request fails, logging any +// failure for debugging purposes. func getClusterRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.ClusterRoleBinding, error) { // Get all of the clusterrolebindings from the cluster // These are not namespaced so we want all of them @@ -48,7 +58,12 @@ func getClusterRoleBindings(client rbacv1typed.RbacV1Interface) ([]rbacv1.Cluste return crbList.Items, nil } -// getRoles returns all of the roles in the cluster +// getRoles retrieves all cluster roles +// +// The function queries the Kubernetes RBAC API to list every Role resource +// across all namespaces, returning a slice of role objects or an error if the +// request fails. It logs any errors encountered during the API call before +// propagating them to the caller. func getRoles(client rbacv1typed.RbacV1Interface) ([]rbacv1.Role, error) { // Get all of the roles from all namespaces roleList, roleErr := client.Roles("").List(context.TODO(), metav1.ListOptions{}) diff --git a/pkg/autodiscover/autodiscover_resources.go b/pkg/autodiscover/autodiscover_resources.go index 072032921..a7e57b60e 100644 --- a/pkg/autodiscover/autodiscover_resources.go +++ b/pkg/autodiscover/autodiscover_resources.go @@ -24,6 +24,14 @@ import ( corev1client "k8s.io/client-go/kubernetes/typed/core/v1" ) +// getResourceQuotas Retrieves all resource quotas from the cluster +// +// The function queries the Kubernetes API for every ResourceQuota object across +// all namespaces by calling List on the client’s ResourceQuotas interface +// with an empty namespace and default list options. It returns a slice +// containing each quota found and propagates any error that occurs during the +// request. The result is used to populate autodiscovery data about cluster +// limits. func getResourceQuotas(oc corev1client.CoreV1Interface) ([]corev1.ResourceQuota, error) { rql, err := oc.ResourceQuotas("").List(context.TODO(), metav1.ListOptions{}) if err != nil { diff --git a/pkg/autodiscover/autodiscover_scales.go b/pkg/autodiscover/autodiscover_scales.go index e9196f44e..5d7f1d46e 100644 --- a/pkg/autodiscover/autodiscover_scales.go +++ b/pkg/autodiscover/autodiscover_scales.go @@ -12,11 +12,23 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) +// ScaleObject represents a scalable custom resource +// +// This structure holds the scale subresource of a custom resource, along with +// its group‑resource identity. It is used to read or modify the replica count +// for that resource via the Kubernetes scaling API. type ScaleObject struct { Scale *scalingv1.Scale GroupResourceSchema schema.GroupResource } +// GetScaleCrUnderTest Retrieves scalable custom resources across specified namespaces +// +// It iterates over a list of CustomResourceDefinitions, filtering for +// namespace-scoped and having a scale subresource. For each qualifying CRD it +// lists the custom resources in the provided namespaces using a dynamic client, +// then gathers their scale objects. The result is a slice of ScaleObject +// containing scaling information for each found resource. func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDefinition) []ScaleObject { dynamicClient := clientsholder.GetClientsHolder().DynamicClient @@ -62,6 +74,13 @@ func GetScaleCrUnderTest(namespaces []string, crds []*apiextv1.CustomResourceDef return scaleObjects } +// getCrScaleObjects Retrieves scaling information for custom resources +// +// This function iterates over a list of unstructured custom resources, querying +// the Kubernetes scaling API to obtain each resource's scale subresource. It +// constructs a group-resource schema from the CRD metadata and appends each +// retrieved ScaleObject to a slice. Errors during retrieval are logged fatally, +// ensuring only successfully fetched scales are returned. func getCrScaleObjects(crs []unstructured.Unstructured, crd *apiextv1.CustomResourceDefinition) []ScaleObject { var scaleObjects []ScaleObject clients := clientsholder.GetClientsHolder() diff --git a/pkg/autodiscover/autodiscover_service_accounts.go b/pkg/autodiscover/autodiscover_service_accounts.go index 4567c2833..68d2a1f66 100644 --- a/pkg/autodiscover/autodiscover_service_accounts.go +++ b/pkg/autodiscover/autodiscover_service_accounts.go @@ -23,6 +23,13 @@ import ( corev1client "k8s.io/client-go/kubernetes/typed/core/v1" ) +// getServiceAccounts Collects all ServiceAccount objects from specified namespaces +// +// The function iterates over each namespace in the input list, querying the +// Kubernetes API to retrieve the ServiceAccounts present there. Each retrieved +// account is appended to a slice of pointers that is returned to the caller. If +// any API call fails, the error is propagated immediately and no further +// namespaces are processed. func getServiceAccounts(oc corev1client.CoreV1Interface, namespaces []string) (servicesAccounts []*corev1.ServiceAccount, err error) { for _, ns := range namespaces { s, err := oc.ServiceAccounts(ns).List(context.TODO(), metav1.ListOptions{}) diff --git a/pkg/autodiscover/autodiscover_services.go b/pkg/autodiscover/autodiscover_services.go index d7370e875..f920d1fd4 100644 --- a/pkg/autodiscover/autodiscover_services.go +++ b/pkg/autodiscover/autodiscover_services.go @@ -24,6 +24,13 @@ import ( corev1client "k8s.io/client-go/kubernetes/typed/core/v1" ) +// getServices Retrieves services from specified namespaces while excluding ignored names +// +// The function iterates over a list of namespace strings, querying the +// Kubernetes API for services in each one. It filters out any service whose +// name appears in an ignore list using a helper that checks string membership. +// Matching services are collected into a slice and returned; if any API call +// fails, the error is propagated immediately. func getServices(oc corev1client.CoreV1Interface, namespaces, ignoreList []string) (allServices []*corev1.Service, err error) { for _, ns := range namespaces { s, err := oc.Services(ns).List(context.TODO(), metav1.ListOptions{}) diff --git a/pkg/autodiscover/autodiscover_sriov.go b/pkg/autodiscover/autodiscover_sriov.go index cc8978149..863bfa1d2 100644 --- a/pkg/autodiscover/autodiscover_sriov.go +++ b/pkg/autodiscover/autodiscover_sriov.go @@ -24,6 +24,12 @@ var SriovNetworkNodePolicyGVR = schema.GroupVersionResource{ Resource: "sriovnetworknodepolicies", } +// getSriovNetworks Retrieves all SR‑IOV network resources from the specified namespaces +// +// The function iterates over each namespace, using a dynamic client to list +// objects of the SR‑IOV Network type. It skips namespaces where the resource +// is not found and aggregates the items into a single slice. If the client or +// its DynamicClient is nil, it safely returns an empty result without error. func getSriovNetworks(client *clientsholder.ClientsHolder, namespaces []string) (sriovNetworks []unstructured.Unstructured, err error) { // Check for nil client or DynamicClient to prevent panic if client == nil || client.DynamicClient == nil { @@ -46,6 +52,13 @@ func getSriovNetworks(client *clientsholder.ClientsHolder, namespaces []string) return sriovNetworkList, nil } +// getSriovNetworkNodePolicies Collects SR-IOV network node policies from specified namespaces +// +// The function iterates over each provided namespace, querying the dynamic +// client for SR‑IOV network node policy resources. It aggregates all found +// items into a single slice, handling missing clients or non‑existent +// resources gracefully by returning an empty list instead of panicking. Errors +// unrelated to a resource not being found are propagated back to the caller. func getSriovNetworkNodePolicies(client *clientsholder.ClientsHolder, namespaces []string) (sriovNetworkNodePolicies []unstructured.Unstructured, err error) { // Check for nil client or DynamicClient to prevent panic if client == nil || client.DynamicClient == nil { diff --git a/pkg/certsuite/certsuite.go b/pkg/certsuite/certsuite.go index 35ff91597..b2869d327 100644 --- a/pkg/certsuite/certsuite.go +++ b/pkg/certsuite/certsuite.go @@ -30,6 +30,13 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/tests/preflight" ) +// LoadInternalChecksDB Initializes all test suites for internal checks +// +// This function calls the LoadChecks functions of each test package, +// registering their individual tests with the shared checks database. It +// ensures that all internal test groups are available before any preflight or +// label filtering occurs. No return value is produced and it performs no error +// handling itself. func LoadInternalChecksDB() { accesscontrol.LoadChecks() certification.LoadChecks() @@ -42,6 +49,12 @@ func LoadInternalChecksDB() { operator.LoadChecks() } +// LoadChecksDB Initializes test checks based on a label expression +// +// The function loads internal check definitions, then evaluates whether +// preflight tests should run for the provided labels. If allowed, it triggers +// the loading of preflight-specific checks. It performs no return value and +// relies on side effects to prepare the checks database. func LoadChecksDB(labelsExpr string) { LoadInternalChecksDB() @@ -58,6 +71,13 @@ const ( noLabelsFilterExpr = "none" ) +// getK8sClientsConfigFileNames Collects Kubernetes configuration file paths +// +// The function retrieves test parameters to determine if a custom kubeconfig +// path is specified, then adds it to the list of filenames. It also checks for +// a default config in the user's home directory under .kube/config, appending +// it only if the file exists. The resulting slice contains zero or more valid +// configuration paths used elsewhere to initialize client holders. func getK8sClientsConfigFileNames() []string { params := configuration.GetTestParameters() fileNames := []string{} @@ -81,6 +101,14 @@ func getK8sClientsConfigFileNames() []string { return fileNames } +// Startup Initializes the certification suite runtime +// +// The function retrieves global test parameters, prepares a label expression +// evaluator for filtering tests, creates or replaces the log file in the output +// directory, and warns if no labels are provided. It then loads Kubernetes +// client configurations, initializes the checks database according to the label +// filter, and outputs version and configuration information to both the console +// and log. Finally, it displays a banner before the suite begins execution. func Startup() { testParams := configuration.GetTestParameters() @@ -119,6 +147,12 @@ func Startup() { fmt.Printf("\n") } +// Shutdown Closes the global log file +// +// The function attempts to close the globally opened log file used throughout +// the test suite. If an error occurs during closure, it writes a message to +// standard error and terminates the program with a non‑zero exit code. No +// value is returned. func Shutdown() { err := log.CloseGlobalLogFile() if err != nil { @@ -127,6 +161,17 @@ func Shutdown() { } } +// Run Executes the certification test suite and produces results artifacts +// +// This function initiates discovery of CNF target resources, runs all +// configured checks with a timeout, and records pod states before and after +// execution. It builds a claim file containing check outcomes, optionally +// generates JUnit XML, sanitizes claims based on label filters, and may send +// the collected results to an external collector or Red Hat Connect API. +// Finally it creates HTML artifacts for viewing, compresses all outputs into a +// zip file if requested, and cleans up temporary files according to user +// preferences. +// //nolint:funlen,gocyclo func Run(labelsFilter, outputFolder string) error { testParams := configuration.GetTestParameters() diff --git a/pkg/checksdb/check.go b/pkg/checksdb/check.go index 9c33bfa5d..847692890 100644 --- a/pkg/checksdb/check.go +++ b/pkg/checksdb/check.go @@ -29,10 +29,24 @@ const ( type CheckResult string +// CheckResult.String Converts the result code to a readable string +// +// This method casts the underlying CheckResult type, which is an alias of +// string, into a standard Go string. It returns the textual representation of +// the check outcome, such as "Passed", "Failed" or "Skipped". The conversion +// allows callers to use the result in logs and comparisons without needing to +// know the internal type. func (cr CheckResult) String() string { return string(cr) } +// Check Represents an individual compliance check +// +// This type holds configuration, state, and results for a single test. It +// tracks identifiers, labels, timing, timeouts, and any error that occurs +// during execution. The struct also contains optional functions to run before, +// after, or as the main check logic, along with mechanisms for skipping, +// aborting, and logging output. type Check struct { mutex sync.Mutex ID string @@ -58,6 +72,13 @@ type Check struct { abortChan chan string } +// NewCheck Creates a new check instance +// +// This function constructs a Check object with the provided identifier and +// label set. It assigns an initial passed result status, creates a string +// builder for log storage, and attaches a multi‑logger that records events +// specific to this check. The fully initialized Check is then returned as a +// pointer. func NewCheck(id string, labels []string) *Check { check := &Check{ ID: id, @@ -71,6 +92,12 @@ func NewCheck(id string, labels []string) *Check { return check } +// Check.Abort Aborts a check immediately with an error message +// +// The method locks the check’s mutex, constructs a descriptive abort message +// using the check ID and the supplied reason, sends this message on the abort +// channel, then panics to terminate execution. It is used to halt a check that +// encounters a non‑graceful failure condition. func (check *Check) Abort(reason string) { check.mutex.Lock() defer check.mutex.Unlock() @@ -81,40 +108,94 @@ func (check *Check) Abort(reason string) { panic(AbortPanicMsg(abortMsg)) } +// Check.SetAbortChan Assigns a channel to signal check abortion +// +// This method records the supplied channel into the check instance so that the +// check can listen for abort signals during execution. It performs a simple +// field assignment and does not return any value. The stored channel is later +// used by other parts of the framework to terminate the check prematurely when +// needed. func (check *Check) SetAbortChan(abortChan chan string) { check.abortChan = abortChan } +// Check.LogDebug logs a debug message with optional formatting +// +// This method sends a formatted string to the check's logger at the debug +// level, allowing additional arguments for interpolation. It forwards the call +// to an internal logging helper that determines if the debug level is enabled +// before emitting the record. No value is returned. func (check *Check) LogDebug(msg string, args ...any) { log.Logf(check.logger, log.LevelDebug, msg, args...) } +// Check.LogInfo Logs an informational message for a check +// +// This method forwards the supplied format string and arguments to a logging +// helper, tagging the output with the Info level. It uses the check's internal +// logger if available or falls back to a default logger. The function does not +// return any value; it simply emits the formatted log entry. func (check *Check) LogInfo(msg string, args ...any) { log.Logf(check.logger, log.LevelInfo, msg, args...) } +// Check.LogWarn logs a warning message for the check +// +// The method formats a message with optional arguments and forwards it to the +// internal logger at the warning level. It does not alter any state of the +// Check instance, only records diagnostic information that can be inspected +// later. func (check *Check) LogWarn(msg string, args ...any) { log.Logf(check.logger, log.LevelWarn, msg, args...) } +// Check.LogError logs an error message for the check +// +// This method sends a formatted string and optional arguments to the logging +// system at the error level, associating the log with the specific check +// instance. It uses the check's logger field or falls back to a default if nil. +// The function does not return any value. func (check *Check) LogError(msg string, args ...any) { log.Logf(check.logger, log.LevelError, msg, args...) } +// Check.LogFatal Logs a fatal message and terminates the program +// +// The method records a fatal log entry using the provided logger, prints the +// message to standard error prefixed with "FATAL:", and then exits the process +// with status code 1. It accepts a format string and optional arguments, which +// are passed to both the logger and the formatted output. func (check *Check) LogFatal(msg string, args ...any) { log.Logf(check.logger, log.LevelFatal, msg, args...) fmt.Fprintf(os.Stderr, "\nFATAL: "+msg+"\n", args...) os.Exit(1) } +// Check.GetLogs Retrieves stored log output +// +// This method returns the accumulated log data for a check as a single string. +// The logs are gathered during the check's execution and stored in an internal +// buffer, which this function simply exposes to callers such as reporting or +// result recording functions. func (check *Check) GetLogs() string { return check.logArchive.String() } +// Check.GetLogger Provides access to the check's logger +// +// The method returns the logger stored in the Check instance, allowing callers +// to log messages related to that specific check. It does not modify the state +// and simply exposes the internal logger pointer. func (check *Check) GetLogger() *log.Logger { return check.logger } +// Check.WithCheckFn Assigns a new check function only when no previous error exists +// +// This method first checks whether the Check instance already contains an +// error; if so, it returns the instance unchanged. Otherwise, it assigns the +// provided function to the CheckFn field and then returns the modified instance +// for chaining. func (check *Check) WithCheckFn(checkFn func(check *Check) error) *Check { if check.Error != nil { return check @@ -124,6 +205,12 @@ func (check *Check) WithCheckFn(checkFn func(check *Check) error) *Check { return check } +// Check.WithBeforeCheckFn Assigns a custom function to run before the main check +// +// The method accepts a callback that receives the current Check instance and +// may return an error. If the Check already contains an error, it skips +// assignment and returns the Check unchanged; otherwise, it stores the callback +// in BeforeCheckFn and returns the same Check pointer for chaining. func (check *Check) WithBeforeCheckFn(beforeCheckFn func(check *Check) error) *Check { if check.Error != nil { return check @@ -133,6 +220,12 @@ func (check *Check) WithBeforeCheckFn(beforeCheckFn func(check *Check) error) *C return check } +// Check.WithAfterCheckFn Sets a callback to run after the check completes +// +// The method attaches a function that will be invoked once the check finishes, +// provided no error has already occurred. It stores the supplied function in +// the AfterCheckFn field of the Check instance and returns the same instance +// for chaining. func (check *Check) WithAfterCheckFn(afterCheckFn func(check *Check) error) *Check { if check.Error != nil { return check @@ -142,6 +235,13 @@ func (check *Check) WithAfterCheckFn(afterCheckFn func(check *Check) error) *Che return check } +// Check.WithSkipCheckFn Adds functions that decide whether a test should be skipped +// +// When called, this method appends one or more supplied functions to the +// receiver's list of skip-check callbacks, but only if no previous error has +// been recorded on the Check instance. Each added function returns a boolean +// indicating whether skipping is required and an optional reason string. The +// updated Check pointer is then returned for chaining. func (check *Check) WithSkipCheckFn(skipCheckFn ...func() (skip bool, reason string)) *Check { if check.Error != nil { return check @@ -152,8 +252,13 @@ func (check *Check) WithSkipCheckFn(skipCheckFn ...func() (skip bool, reason str return check } -// This modifier is provided for the sake of completeness, but it's not necessary to use it, -// as the SkipModeAny is the default skip mode. +// Check.WithSkipModeAny sets the check to always skip when appropriate +// +// This method changes the internal skip mode of a check to allow it to be +// skipped under any circumstance that matches the default logic. If an error is +// already present on the check, the call becomes a no‑op and simply returns +// the existing instance. Otherwise it assigns SkipModeAny to the check and +// returns the updated object for chaining. func (check *Check) WithSkipModeAny() *Check { if check.Error != nil { return check @@ -164,6 +269,14 @@ func (check *Check) WithSkipModeAny() *Check { return check } +// Check.WithSkipModeAll enables all-skip mode +// +// This method changes a check's configuration so that it will skip any +// remaining steps or validations, effectively marking the check as fully +// skipped. It first verifies that no error has already been recorded on the +// check; if an error exists, it returns immediately without modifying the +// state. When successful, it assigns the SkipModeAll constant to the check and +// returns the modified check for further chaining. func (check *Check) WithSkipModeAll() *Check { if check.Error != nil { return check @@ -174,6 +287,12 @@ func (check *Check) WithSkipModeAll() *Check { return check } +// Check.WithTimeout assigns a timeout value to the check +// +// If the check has not already encountered an error, this method updates its +// Timeout field with the supplied duration and returns the modified check for +// chaining. If an error is present, it simply returns the check unchanged so +// that subsequent operations are skipped. func (check *Check) WithTimeout(duration time.Duration) *Check { if check.Error != nil { return check @@ -184,6 +303,14 @@ func (check *Check) WithTimeout(duration time.Duration) *Check { return check } +// Check.SetResult stores compliance results for a check +// +// This method records the list of compliant and non‑compliant objects for a +// check, converting them into a JSON string that is kept in the details field. +// It locks the check’s mutex to ensure thread safety, skips any changes if +// the check has already been aborted or errored, and updates the result status +// based on whether there are failures or no objects at all. Errors during +// serialization are logged as error messages. func (check *Check) SetResult(compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { check.mutex.Lock() defer check.mutex.Unlock() @@ -215,6 +342,12 @@ func (check *Check) SetResult(compliantObjects, nonCompliantObjects []*testhelpe } } +// Check.SetResultSkipped Marks a check as skipped with an optional reason +// +// When invoked, this method acquires the check’s mutex to ensure thread +// safety, then sets the result status to skipped unless the check has already +// been aborted. It records the provided reason for skipping, which can be used +// for reporting or debugging purposes. func (check *Check) SetResultSkipped(reason string) { check.mutex.Lock() defer check.mutex.Unlock() @@ -227,6 +360,12 @@ func (check *Check) SetResultSkipped(reason string) { check.skipReason = reason } +// Check.SetResultError Marks a check as failed with an error reason +// +// This method locks the check’s mutex, verifies that it has not already been +// aborted or marked as an error, then sets its result to error and records the +// supplied reason. If the check is already in an error state, a warning log is +// emitted instead of changing the state. func (check *Check) SetResultError(reason string) { check.mutex.Lock() defer check.mutex.Unlock() @@ -243,6 +382,11 @@ func (check *Check) SetResultError(reason string) { check.skipReason = reason } +// Check.SetResultAborted Marks a check as aborted with a reason +// +// This method records that the check has been aborted, setting its result state +// accordingly. It stores the provided abort reason for later reference and +// protects the update with a mutex to ensure thread safety. func (check *Check) SetResultAborted(reason string) { check.mutex.Lock() defer check.mutex.Unlock() @@ -251,6 +395,14 @@ func (check *Check) SetResultAborted(reason string) { check.skipReason = reason } +// Check.Run Runs a check through its pre‑check, main, and post‑check stages +// +// The method first validates the receiver and any prior errors, then signals +// that the check is starting and records timestamps. It executes an optional +// before function, followed by the core check function, and finally an optional +// after function, each returning an error if they fail. If all stages succeed, +// it prints the final result based on the check's outcome and returns nil; +// otherwise it propagates the encountered error. func (check *Check) Run() error { if check == nil { return fmt.Errorf("check is a nil pointer") @@ -289,6 +441,12 @@ func (check *Check) Run() error { return nil } +// printCheckResult Displays the final status of a check +// +// The function examines the result field of a check object and calls an +// appropriate CLI helper to print a formatted message indicating pass, fail, +// skip, abort or error. It uses the check's ID and any skip reason when +// relevant, ensuring that the output line is cleared before printing. func printCheckResult(check *Check) { switch check.Result { case CheckResultPassed: diff --git a/pkg/checksdb/checksdb.go b/pkg/checksdb/checksdb.go index a0febdd7b..a89c530cc 100644 --- a/pkg/checksdb/checksdb.go +++ b/pkg/checksdb/checksdb.go @@ -29,6 +29,14 @@ var ( type AbortPanicMsg string +// RunChecks Executes all check groups with timeout and signal handling +// +// The function locks the database, starts a timeout timer, and listens for +// SIGINT or SIGTERM signals. It iterates over each check group, launching a +// goroutine to run its checks while monitoring for aborts or timeouts. After +// execution it records results, prints a summary table, logs failures, and +// returns the count of failed checks or an error if any occurred. +// //nolint:funlen func RunChecks(timeout time.Duration) (failedCtr int, err error) { dbLock.Lock() @@ -106,6 +114,14 @@ func RunChecks(timeout time.Duration) (failedCtr int, err error) { return failedCtr, nil } +// recordCheckResult Stores the check result in the results database +// +// The function looks up a claim ID for a given test, logs debugging information +// if none is found, and otherwise records various fields such as state, +// timestamps, duration, skip reason, captured output, details, category +// classification, and catalog metadata into the global resultsDB map. It +// formats strings to uppercase for logging and calculates duration in seconds +// from start and end times. func recordCheckResult(check *Check) { claimID, ok := identifiers.TestIDToClaimID[check.ID] if !ok { @@ -138,8 +154,13 @@ func recordCheckResult(check *Check) { } } -// GetReconciledResults is a function added to aggregate a Claim's results. Due to the limitations of -// certsuite-claim's Go Client, results are generalized to map[string]interface{}. +// GetReconciledResults Aggregates all stored check results into a map +// +// The function collects entries from an internal database of test outcomes, +// mapping each key to its corresponding claim result object. It ensures every +// key is represented in the returned map, initializing missing entries before +// assigning the actual data. The resulting map is used by other components to +// populate the final claim report. func GetReconciledResults() map[string]claim.Result { resultMap := make(map[string]claim.Result) for key := range resultsDB { @@ -159,6 +180,13 @@ const ( SKIPPED = 2 ) +// getResultsSummary generates a table of check results per group +// +// This function builds a map where each key is the name of a check group and +// the value is a slice of three integers counting passed, failed, and skipped +// checks. It iterates over all groups in the database, tallies results for each +// check according to its status, and stores the counts. The resulting map is +// returned for use by the CLI output. func getResultsSummary() map[string][]int { results := make(map[string][]int) for groupName, group := range dbByGroup { @@ -180,6 +208,14 @@ func getResultsSummary() map[string][]int { const nbColorSymbols = 9 +// printFailedChecksLog Displays logs for checks that failed +// +// This function iterates over all check groups and their individual checks, +// printing a formatted header and the log content only for those that did not +// succeed. For each failed check it calculates the appropriate number of dashes +// to align the header, prints separators, the colored header indicating the +// check ID, and then either the captured log or a message if no output was +// recorded. The function writes directly to standard output using fmt.Println. func printFailedChecksLog() { for _, group := range dbByGroup { for _, check := range group.checks { @@ -201,10 +237,23 @@ func printFailedChecksLog() { } } +// GetResults Retrieves the current mapping of check identifiers to their results +// +// The function returns a map where each key is a string identifier for a +// specific compliance check, and the corresponding value contains the result +// data for that check. It simply exposes an internal database that holds all +// recorded outcomes. No parameters are required or modified during its +// execution. func GetResults() map[string]claim.Result { return resultsDB } +// GetTestSuites Retrieves a list of unique test suite identifiers from the database +// +// This function iterates over all keys in an internal results map, collecting +// each distinct test suite name into a slice. It ensures no duplicates by +// checking membership before appending. The resulting slice of strings is +// returned for further processing. func GetTestSuites() []string { // Collect all of the unique test suites from the resultsDB var suites []string @@ -217,10 +266,22 @@ func GetTestSuites() []string { return suites } +// GetTotalTests Retrieves the number of tests stored in the database +// +// This function accesses an internal slice that holds test results and returns +// its length as an integer. It provides a quick way to determine how many tests +// are currently recorded without exposing the underlying data structure. The +// result is returned immediately after calculating the count. func GetTotalTests() int { return len(resultsDB) } +// GetTestsCountByState Counts tests that match a given state +// +// The function iterates over the global results database, incrementing a +// counter each time an entry’s state equals the provided string. It then +// returns the total number of matching entries as an integer. This is useful +// for summarizing how many tests are in a particular status. func GetTestsCountByState(state string) int { count := 0 for r := range resultsDB { @@ -231,6 +292,12 @@ func GetTestsCountByState(state string) int { return count } +// FilterCheckIDs Retrieves test case identifiers that satisfy the current label filter +// +// The function iterates through all check groups in the database, evaluating +// each check's labels against a global expression evaluator. If a check passes +// the evaluation, its identifier is appended to a result slice. After +// processing all checks, the slice of matching IDs is returned with no error. func FilterCheckIDs() ([]string, error) { filteredCheckIDs := []string{} for _, group := range dbByGroup { @@ -244,6 +311,13 @@ func FilterCheckIDs() ([]string, error) { return filteredCheckIDs, nil } +// InitLabelsExprEvaluator Creates a label evaluator from a filter expression +// +// This function takes a string representing a label filter, expands the special +// keyword "all" into a comma‑separated list of known tags, then constructs a +// LabelsExprEvaluator using the helper in the labels package. If construction +// fails, it returns an error describing the problem; otherwise it stores the +// evaluator in a global variable for later use by other parts of the program. func InitLabelsExprEvaluator(labelsFilter string) error { // Expand the abstract "all" label into actual existing labels if labelsFilter == "all" { diff --git a/pkg/checksdb/checksgroup.go b/pkg/checksdb/checksgroup.go index 2dc76150a..b8a2b68c8 100644 --- a/pkg/checksdb/checksgroup.go +++ b/pkg/checksdb/checksgroup.go @@ -14,6 +14,13 @@ const ( checkIdxNone = -1 ) +// ChecksGroup Holds a collection of checks and orchestrates their execution +// +// This structure stores the group's name, the list of checks to run, and +// optional callback functions for before/after all and before/after each check. +// It tracks which check is currently executing to handle aborts or failures +// correctly. The group provides methods to add checks, run them with support +// for labeling, and record results. type ChecksGroup struct { name string checks []*Check @@ -25,6 +32,13 @@ type ChecksGroup struct { currentRunningCheckIdx int } +// NewChecksGroup creates or retrieves a checks group by name +// +// This function locks the global database, ensuring thread safety while +// accessing the map of groups. It initializes the map if necessary, then looks +// up an existing group with the given key. If found it returns that instance; +// otherwise it constructs a new ChecksGroup with default fields, stores it in +// the map, and returns it. func NewChecksGroup(groupName string) *ChecksGroup { dbLock.Lock() defer dbLock.Unlock() @@ -48,30 +62,60 @@ func NewChecksGroup(groupName string) *ChecksGroup { return group } +// ChecksGroup.WithBeforeAllFn Registers a function to run before all checks +// +// This method assigns the provided callback to the group, which will be +// executed with the slice of checks prior to any other operations. It returns +// the modified group for chaining purposes. func (group *ChecksGroup) WithBeforeAllFn(beforeAllFn func(checks []*Check) error) *ChecksGroup { group.beforeAllFn = beforeAllFn return group } +// ChecksGroup.WithBeforeEachFn Assigns a callback to execute prior to each check +// +// This method accepts a function that takes a check pointer and may return an +// error. It stores this function in the group's internal field so it will be +// invoked before each individual check runs. The group instance is returned, +// allowing further chained configuration calls. func (group *ChecksGroup) WithBeforeEachFn(beforeEachFn func(check *Check) error) *ChecksGroup { group.beforeEachFn = beforeEachFn return group } +// ChecksGroup.WithAfterEachFn Assigns a function that runs after every individual check +// +// This method stores the provided function as the group's post‑check hook, +// ensuring it is invoked with a reference to each Check object once the check +// completes. The stored callback can modify or inspect the check before the +// group continues processing. It returns the same ChecksGroup instance for +// chaining. func (group *ChecksGroup) WithAfterEachFn(afterEachFn func(check *Check) error) *ChecksGroup { group.afterEachFn = afterEachFn return group } +// ChecksGroup.WithAfterAllFn Assigns a callback to execute after all checks complete +// +// This method stores the supplied function in the ChecksGroup so it will be +// called with the list of executed checks once processing is finished. The +// stored function can perform cleanup or result aggregation. It returns the +// same group instance, allowing method chaining. func (group *ChecksGroup) WithAfterAllFn(afterAllFn func(checks []*Check) error) *ChecksGroup { group.afterAllFn = afterAllFn return group } +// ChecksGroup.Add Adds a check to the group +// +// This method acquires a global lock, appends the provided check to the group's +// internal slice, and then releases the lock. It ensures thread‑safe +// modification of the checks collection while keeping the operation simple and +// efficient. func (group *ChecksGroup) Add(check *Check) { dbLock.Lock() defer dbLock.Unlock() @@ -79,18 +123,38 @@ func (group *ChecksGroup) Add(check *Check) { group.checks = append(group.checks, check) } +// skipCheck Marks a check as skipped with a reason +// +// This function records an informational message indicating that the specified +// check will not be executed due to the supplied reason. It then updates the +// check’s status to skipped and displays the outcome using the standard +// output routine. func skipCheck(check *Check, reason string) { check.LogInfo("Skipping check %s, reason: %s", check.ID, reason) check.SetResultSkipped(reason) printCheckResult(check) } +// skipAll marks all remaining checks as skipped with a given reason +// +// This routine iterates over a slice of check objects, calling an internal +// helper for each one to log the skip action, set its result state to skipped, +// and output its status. The provided reason string is passed unchanged to +// every check so that downstream reporting can identify why the checks were not +// executed. No value is returned; the function simply updates each check's +// internal state. func skipAll(checks []*Check, reason string) { for _, check := range checks { skipCheck(check, reason) } } +// onFailure Handles a failure during group or check execution +// +// When a before/after or check function fails, this routine marks the current +// check as an error with a descriptive message. It then skips all remaining +// checks in the same group using a concise skip reason. Finally it returns a +// generic error that indicates which failure type occurred. func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck *Check, remainingChecks []*Check) error { // Set current Check's result as error. fmt.Printf("\r[ %s ] %-60s\n", cli.CheckResultTagError, currentCheck.ID) @@ -102,6 +166,12 @@ func onFailure(failureType, failureMsg string, group *ChecksGroup, currentCheck return errors.New(reason) } +// runBeforeAllFn Executes a group-wide setup routine before any checks run +// +// This function calls the optional beforeAllFn defined on a ChecksGroup, +// passing all checks to it. If the function panics or returns an error, the +// first check is marked as failed and all remaining checks are skipped with an +// explanatory reason. No other actions occur if beforeAllFn is nil. func runBeforeAllFn(group *ChecksGroup, checks []*Check) (err error) { log.Debug("GROUP %s - Running beforeAll", group.name) if group.beforeAllFn == nil { @@ -127,6 +197,13 @@ func runBeforeAllFn(group *ChecksGroup, checks []*Check) (err error) { return nil } +// runAfterAllFn Executes the group's final cleanup routine +// +// When a checks group has finished running all its checks, this function +// invokes any registered afterAll hook with the entire list of checks. It logs +// the start and handles both panics and returned errors by marking the last +// executed check as failed and preventing further actions. The result is an +// error if the cleanup fails; otherwise nil. func runAfterAllFn(group *ChecksGroup, checks []*Check) (err error) { log.Debug("GROUP %s - Running afterAll", group.name) @@ -154,6 +231,13 @@ func runAfterAllFn(group *ChecksGroup, checks []*Check) (err error) { return nil } +// runBeforeEachFn Executes a group’s beforeEach hook for a specific check +// +// This function runs the optional beforeEachFn defined on a ChecksGroup, +// passing it the current Check. It captures panics or returned errors, logs +// diagnostic information, and records the failure by marking the check as +// errored and skipping subsequent checks. If no issues occur, the function +// simply returns nil. func runBeforeEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) { log.Debug("GROUP %s - Running beforeEach for check %s", group.name, check.ID) if group.beforeEachFn == nil { @@ -178,6 +262,13 @@ func runBeforeEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) return nil } +// runAfterEachFn Handles post‑check cleanup and error reporting +// +// This routine runs a group's afterEach function for each check, logging its +// start and capturing any panic or returned error. If the function panics, it +// logs the stack trace and marks the current check as failed without skipping +// subsequent checks. On a normal error, it reports the issue, sets the check +// result to an error state, and returns that error. func runAfterEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) (err error) { log.Debug("GROUP %s - Running afterEach for check %s", group.name, check.ID) @@ -203,6 +294,14 @@ func runAfterEachFn(group *ChecksGroup, check *Check, remainingChecks []*Check) return nil } +// shouldSkipCheck decides whether a check should be skipped based on its skip functions +// +// The function evaluates each user-provided skip function, collecting any +// reasons for skipping. If any reason is found, it applies the check's SkipMode +// policy: SkipModeAny skips if at least one reason exists, while SkipModeAll +// requires all skip functions to indicate a skip. The function also recovers +// from panics in skip functions, logs an error, and treats that as a skip with +// a panic reason. func shouldSkipCheck(check *Check) (skip bool, reasons []string) { if len(check.SkipCheckFns) == 0 { return false, []string{} @@ -253,6 +352,13 @@ func shouldSkipCheck(check *Check) (skip bool, reasons []string) { return false, []string{} } +// runCheck Executes a check with error handling and panic recovery +// +// The function runs the provided check, capturing any panics or errors that +// occur during its execution. If a panic is detected, it distinguishes between +// an intentional abort and unexpected failures, logs detailed information, and +// marks subsequent checks as skipped. Successful completion returns nil, while +// any failure results in an error describing the issue. func runCheck(check *Check, group *ChecksGroup, remainingChecks []*Check) (err error) { defer func() { if r := recover(); r != nil { @@ -278,19 +384,13 @@ func runCheck(check *Check, group *ChecksGroup, remainingChecks []*Check) (err e return nil } -// Runs all the checks in the group whose labels match the label expression filter. -// 1. Calls group.BeforeAll(). Then, for each Check in the group: -// 2. Calls group.BeforeEach() -> normally used to get/refresh the test environment variable. -// 3. Calls check.SkipCheckFn() -> if true, skip the check.Run() (step 4) -// 4. Calls check.Run() -> Will call the actual CNF Cert requirement check function. -// 5. Calls group.AfterEach() -// 6. Calls group.AfterAll() +// ChecksGroup.RunChecks Executes a filtered set of checks with lifecycle hooks // -// Issues/errors/panics: -// - BeforeAll panic/error: Set first check as error. Run AfterAll() -// - BeforeEach panic/error: Set check as error and skip remaining. Skip check.Run(), run AfterEach + AfterAll. -// - Check.Run() panic/error: Set check as panicked. Run AfterEach + AfterAll -// - AfterEach panic: Set check as error. +// The method gathers checks whose labels match the group’s filter, then runs +// them in order while invoking BeforeAll, BeforeEach, AfterEach, and AfterAll +// callbacks. It handles skipping logic, abort signals, and panics by recording +// errors or marking checks as skipped/failed. The function returns any +// collected errors and a count of failed checks. // //nolint:funlen func (group *ChecksGroup) RunChecks(stopChan <-chan bool, abortChan chan string) (errs []error, failedChecks int) { @@ -377,6 +477,13 @@ func (group *ChecksGroup) RunChecks(stopChan <-chan bool, abortChan chan string) return errs, failedChecks } +// ChecksGroup.OnAbort Handles a group’s abort by setting check results accordingly +// +// When an abort occurs, this method iterates over all checks in the group. +// Checks that do not match labels are marked as skipped with a label reason. If +// no check had started yet, every remaining check is skipped with the abort +// reason; otherwise the currently running check is marked aborted and +// subsequent checks are skipped. Each result is printed immediately. func (group *ChecksGroup) OnAbort(abortReason string) error { // If this wasn't the group with the aborted check. if group.currentRunningCheckIdx == checkIdxNone { @@ -408,6 +515,13 @@ func (group *ChecksGroup) OnAbort(abortReason string) error { return nil } +// ChecksGroup.RecordChecksResults Logs each check result and stores it in the results database +// +// The method iterates over all checks in the group, invoking a helper that logs +// information about the test ID, state, and duration. For each check, it +// records the outcome in a shared map keyed by the test identifier, including +// metadata such as timestamps, skip reasons, and catalog references. This +// ensures that results are persisted for later reporting or further processing. func (group *ChecksGroup) RecordChecksResults() { log.Info("Recording checks results of group %s", group.name) for _, check := range group.checks { diff --git a/pkg/claimhelper/claimhelper.go b/pkg/claimhelper/claimhelper.go index c3c6fea50..fc70d5789 100644 --- a/pkg/claimhelper/claimhelper.go +++ b/pkg/claimhelper/claimhelper.go @@ -49,17 +49,36 @@ const ( TestStateSkipped = "skipped" ) +// SkippedMessage signals a skipped claim during processing +// +// This struct holds the text of a message that is omitted from normal output +// and any associated metadata. The Text field contains the raw XML character +// data while Messages stores an optional attribute providing additional +// context. It is used by the claim helper to record items that were +// intentionally left out during certificate claim generation. type SkippedMessage struct { Text string `xml:",chardata"` Messages string `xml:"message,attr,omitempty"` } +// FailureMessage Represents an error message returned by a claim helper operation +// +// The structure holds the error text as well as optional attributes for the +// message and its type. It is used to convey failure information in XML +// responses, with the Text field containing the main content, while Message and +// Type provide metadata that can be omitted if empty. type FailureMessage struct { Text string `xml:",chardata"` Message string `xml:"message,attr,omitempty"` Type string `xml:"type,attr,omitempty"` } +// TestCase Holds the results of an individual test run +// +// This structure stores metadata and outcome information for a single test +// case, including its name, class context, execution status, duration, and any +// error output. It also provides optional sub-structures to represent skipped +// or failed executions, enabling detailed reporting in XML format. type TestCase struct { Text string `xml:",chardata"` Name string `xml:"name,attr,omitempty"` @@ -71,6 +90,13 @@ type TestCase struct { Failure *FailureMessage `xml:"failure"` } +// Testsuite Represents the results of a test suite execution +// +// This struct holds metadata about a collection of tests, including counts for +// total tests, failures, errors, skipped and disabled cases. It also stores +// timing information, timestamps, and any properties that may be attached to +// the suite. Each individual test case is captured in a slice of TestCase +// structs, allowing detailed inspection of each test's outcome. type Testsuite struct { Text string `xml:",chardata"` Name string `xml:"name,attr,omitempty"` @@ -93,6 +119,13 @@ type Testsuite struct { Testcase []TestCase `xml:"testcase"` } +// TestSuitesXML Represents an XML report of test suite results +// +// This struct holds attributes such as the total number of tests, failures, +// disabled tests, errors, and elapsed time for a test run. It also contains a +// nested Testsuite element that provides more detailed information about each +// individual test case. The fields are marshaled into XML with corresponding +// attribute tags. type TestSuitesXML struct { XMLName xml.Name `xml:"testsuites"` Text string `xml:",chardata"` @@ -104,10 +137,23 @@ type TestSuitesXML struct { Testsuite Testsuite `xml:"testsuite"` } +// ClaimBuilder Creates and writes claim reports in various formats +// +// It gathers test results, populates the claim structure with metadata, +// configurations, and node information, then serializes the data to a file. The +// builder can also reset timestamps or output JUnit XML for CI integration. +// Errors during marshaling or file writing are logged as fatal. type ClaimBuilder struct { claimRoot *claim.Root } +// NewClaimBuilder Creates a claim builder from test environment +// +// The function accepts a test environment, marshals its configuration into +// JSON, unmarshals it back into a map, and populates a new claim root with +// configurations, node information, and version data. It handles unit test mode +// by skipping marshalling steps. The resulting ClaimBuilder contains the fully +// prepared claim structure for later serialization. func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) { if os.Getenv("UNIT_TEST") == "true" { return &ClaimBuilder{ @@ -142,6 +188,13 @@ func NewClaimBuilder(env *provider.TestEnvironment) (*ClaimBuilder, error) { }, nil } +// ClaimBuilder.Build generates a claim file with results and timestamps +// +// This method records the current time as the claim's end time, retrieves +// reconciled test results from the database, marshals the complete claim +// structure into JSON, writes that data to the specified output file, and logs +// the creation location. It relies on helper functions for marshalling and file +// writing and uses UTC formatting for consistency. func (c *ClaimBuilder) Build(outputFile string) { endTime := time.Now() @@ -155,6 +208,14 @@ func (c *ClaimBuilder) Build(outputFile string) { log.Info("Claim file created at %s", outputFile) } +// populateXMLFromClaim Builds a JUnit XML representation of claim test results +// +// The function collects all test IDs from the claim, counts failures and skips, +// and constructs a TestSuitesXML structure with aggregated suite metrics. It +// iterates over sorted test IDs to create individual TestCase entries, +// calculating each case's duration and attaching skipped or failure messages as +// needed. The resulting XML object is returned for marshaling into a file. +// //nolint:funlen func populateXMLFromClaim(c claim.Claim, startTime, endTime time.Time) TestSuitesXML { const ( @@ -253,6 +314,12 @@ func populateXMLFromClaim(c claim.Claim, startTime, endTime time.Time) TestSuite return xmlOutput } +// ClaimBuilder.ToJUnitXML Generate a JUnit XML file from claim data +// +// This method builds a structured JUnit XML representation of the current claim +// results, marshals it into indented XML, and writes it to the specified file +// path with appropriate permissions. It logs progress and aborts execution if +// marshalling or file writing fails. func (c *ClaimBuilder) ToJUnitXML(outputFile string, startTime, endTime time.Time) { // Create the JUnit XML file from the claim output. xmlOutput := populateXMLFromClaim(*c.claimRoot.Claim, startTime, endTime) @@ -270,12 +337,22 @@ func (c *ClaimBuilder) ToJUnitXML(outputFile string, startTime, endTime time.Tim } } +// ClaimBuilder.Reset Updates the claim's start timestamp +// +// The method assigns the current UTC time, formatted with the predefined +// directive, to the Claim.Metadata.StartTime field of the builder. It performs +// this operation in place and does not return a value. func (c *ClaimBuilder) Reset() { c.claimRoot.Claim.Metadata.StartTime = time.Now().UTC().Format(DateTimeFormatDirective) } -// MarshalConfigurations creates a byte stream representation of the test configurations. In the event of an error, -// this method fatally fails. +// MarshalConfigurations Converts test environment data into JSON bytes +// +// This routine accepts a pointer to the test configuration structure, falls +// back to a default instance if nil, and marshals it into a JSON byte slice. +// Errors during marshalling are logged as errors and returned for callers to +// handle. The function returns the resulting byte slice along with any error +// encountered. func MarshalConfigurations(env *provider.TestEnvironment) (configurations []byte, err error) { config := env if config == nil { @@ -289,8 +366,12 @@ func MarshalConfigurations(env *provider.TestEnvironment) (configurations []byte return configurations, nil } -// UnmarshalConfigurations creates a map from configurations byte stream. In the event of an error, this method fatally -// fails. +// UnmarshalConfigurations converts a JSON byte stream into a map of configurations +// +// The function takes raw configuration data as a byte slice and decodes it into +// a provided map using the standard JSON unmarshaler. If decoding fails, it +// logs a fatal error and terminates the program. The resulting map is populated +// with key/value pairs representing configuration settings. func UnmarshalConfigurations(configurations []byte, claimConfigurations map[string]interface{}) { err := j.Unmarshal(configurations, &claimConfigurations) if err != nil { @@ -298,7 +379,13 @@ func UnmarshalConfigurations(configurations []byte, claimConfigurations map[stri } } -// UnmarshalClaim unmarshals the claim file +// UnmarshalClaim parses a claim file into a structured root object +// +// This function takes raw bytes of a claim file and a pointer to a Root +// structure, attempting to unmarshal the data using JSON decoding. If +// unmarshalling fails, it logs a fatal error and terminates the program. On +// success, the provided Root instance is populated with the decoded +// information. func UnmarshalClaim(claimFile []byte, claimRoot *claim.Root) { err := j.Unmarshal(claimFile, &claimRoot) if err != nil { @@ -306,7 +393,12 @@ func UnmarshalClaim(claimFile []byte, claimRoot *claim.Root) { } } -// ReadClaimFile writes the output payload to the claim file. In the event of an error, this method fatally fails. +// ReadClaimFile Reads the contents of a claim file +// +// The function attempts to read a file at the provided path using standard I/O +// operations. It logs any errors encountered during reading but always returns +// the data slice, even if an error occurs, leaving error handling to the +// caller. A log entry records the file path that was accessed. func ReadClaimFile(claimFileName string) (data []byte, err error) { data, err = os.ReadFile(claimFileName) if err != nil { @@ -316,7 +408,14 @@ func ReadClaimFile(claimFileName string) (data []byte, err error) { return data, nil } -// GetConfigurationFromClaimFile retrieves configuration details from claim file +// GetConfigurationFromClaimFile extracts test environment configuration from a claim file +// +// The function reads the specified claim file, unmarshals its JSON contents +// into an intermediate structure, then marshals the embedded configuration +// section back to JSON before decoding it into a TestEnvironment object. It +// returns that object and any error encountered during reading or parsing. The +// process uses logging for read failures and ensures errors propagate to the +// caller. func GetConfigurationFromClaimFile(claimFileName string) (env *provider.TestEnvironment, err error) { data, err := ReadClaimFile(claimFileName) if err != nil { @@ -334,8 +433,12 @@ func GetConfigurationFromClaimFile(claimFileName string) (env *provider.TestEnvi return env, err } -// MarshalClaimOutput is a helper function to serialize a claim as JSON for output. In the event of an error, this -// method fatally fails. +// MarshalClaimOutput Serializes a claim structure into formatted JSON +// +// The function receives a pointer to the root of a claim object and attempts to +// marshal it into indented JSON. If marshalling fails, it logs a fatal error +// and terminates the program. On success, it returns the resulting byte slice +// for further use. func MarshalClaimOutput(claimRoot *claim.Root) []byte { payload, err := j.MarshalIndent(claimRoot, "", " ") if err != nil { @@ -344,7 +447,11 @@ func MarshalClaimOutput(claimRoot *claim.Root) []byte { return payload } -// WriteClaimOutput writes the output payload to the claim file. In the event of an error, this method fatally fails. +// WriteClaimOutput Saves claim payload to a file +// +// This routine writes a byte slice containing claim data to the specified path +// using standard file permissions. If the write fails, it logs a fatal error +// and terminates the program. The function provides no return value. func WriteClaimOutput(claimOutputFile string, payload []byte) { log.Info("Writing claim data to %s", claimOutputFile) err := os.WriteFile(claimOutputFile, payload, claimFilePermissions) @@ -353,6 +460,13 @@ func WriteClaimOutput(claimOutputFile string, payload []byte) { } } +// GenerateNodes Collects node information for claim files +// +// This function aggregates several pieces of data about the cluster nodes, +// including a JSON representation of each node, CNI plugin details, hardware +// characteristics, and CSI driver status. It retrieves this information by +// calling diagnostic helpers that query the test environment or Kubernetes API. +// The resulting map is returned for inclusion in claim documents. func GenerateNodes() map[string]interface{} { const ( nodeSummaryField = "nodeSummary" @@ -368,8 +482,12 @@ func GenerateNodes() map[string]interface{} { return nodes } -// CreateClaimRoot creates the claim based on the model created in -// https://github.com/redhat-best-practices-for-k8s/certsuite-claim. +// CreateClaimRoot Initializes a claim root with current UTC timestamp +// +// The function obtains the present moment, formats it as an ISO‑8601 string +// in UTC, and embeds that value into a new claim structure. It returns a +// pointer to this freshly constructed root object for use by higher‑level +// builders. func CreateClaimRoot() *claim.Root { // Initialize the claim with the start time. startTime := time.Now() @@ -382,6 +500,14 @@ func CreateClaimRoot() *claim.Root { } } +// SanitizeClaimFile Removes results that do not match a labels filter +// +// The function reads the claim file, unmarshals it into a structured claim +// object, and then iterates over each test result. For every result it +// evaluates the provided label expression against the test’s labels; if the +// evaluation fails, that result is deleted from the claim. After filtering, the +// modified claim is written back to the original file path, which is returned +// along with any error encountered during processing. func SanitizeClaimFile(claimFileName, labelsFilter string) (string, error) { log.Info("Sanitizing claim file %s", claimFileName) data, err := ReadClaimFile(claimFileName) diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index cf5f2ea67..c0fb558da 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -14,6 +14,13 @@ const ( collectorUploadTimeout = 30 * time.Second ) +// addClaimFileToPostRequest Adds a claim file as multipart form data +// +// The function opens the specified file, creates a new part in the multipart +// writer using that file's name, copies the file contents into the part, and +// then returns any error encountered during these steps. It closes the file +// automatically with defer to avoid resource leaks. The result is ready for +// inclusion in an HTTP POST request. func addClaimFileToPostRequest(w *multipart.Writer, claimFilePath string) error { claimFile, err := os.Open(claimFilePath) if err != nil { @@ -30,6 +37,13 @@ func addClaimFileToPostRequest(w *multipart.Writer, claimFilePath string) error return nil } +// addVarFieldsToPostRequest Adds form fields for execution details +// +// This function writes three key-value pairs into a multipart request: the user +// who executed the operation, the partner name, and the decoded password. It +// creates each field using the writer's CreateFormField method and then writes +// the corresponding string value. If any step fails it returns an error; +// otherwise it completes silently. func addVarFieldsToPostRequest(w *multipart.Writer, executedBy, partnerName, password string) error { fw, err := w.CreateFormField("executed_by") if err != nil { @@ -57,6 +71,13 @@ func addVarFieldsToPostRequest(w *multipart.Writer, executedBy, partnerName, pas return nil } +// createSendToCollectorPostRequest Creates a multipart POST request to upload a claim file +// +// This function builds an HTTP POST request with form-data that includes the +// specified claim file and several text fields: executed_by, partner_name, and +// decoded_password. It writes these parts into a buffer using a multipart +// writer, sets the appropriate content type header, and returns the constructed +// request or an error if any step fails. func createSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partnerName, password string) (*http.Request, error) { // Create a new buffer to hold the form-data var buffer bytes.Buffer @@ -86,6 +107,12 @@ func createSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partn return req, nil } +// SendClaimFileToCollector Sends a claim file to a collector endpoint +// +// The function builds an HTTP POST request that includes the claim file and +// authentication fields, then executes it with a timeout. It returns any error +// encountered during request creation or execution; successful completion +// results in nil. func SendClaimFileToCollector(endPoint, claimFilePath, executedBy, partnerName, password string) error { // Temporary end point postReq, err := createSendToCollectorPostRequest(endPoint, claimFilePath, executedBy, partnerName, password) diff --git a/pkg/compatibility/compatibility.go b/pkg/compatibility/compatibility.go index e101a4a72..054ed9b57 100644 --- a/pkg/compatibility/compatibility.go +++ b/pkg/compatibility/compatibility.go @@ -40,6 +40,14 @@ const ( OCPStatusPreGA = "pre-general-availability" ) +// VersionInfo Holds release cycle dates and supported OS versions +// +// This structure stores the General Availability, Full Support Ends, and +// Maintenance Support Ends dates along with minimum supported RHCOS version and +// a list of accepted RHEL versions. The date fields are time.Time values that +// indicate key lifecycle milestones for an OpenShift product. The string slice +// records which Red Hat Enterprise Linux releases are compatible, allowing +// callers to validate platform compatibility. type VersionInfo struct { GADate time.Time // General Availability Date FSEDate time.Time // Full Support Ends Date @@ -235,10 +243,24 @@ var ( } ) +// GetLifeCycleDates Retrieves a map of OpenShift version lifecycle information +// +// This function returns a predefined mapping that associates each major.minor +// OpenShift release with its lifecycle dates, minimum supported RHEL versions, +// and accepted RHEL releases. The returned data structure is used by other +// functions to determine compatibility status for clusters, machines, and +// operating systems. No parameters are required, and the map is returned +// directly from an internal variable. func GetLifeCycleDates() map[string]VersionInfo { return ocpLifeCycleDates } +// BetaRHCOSVersionsFoundToMatch Determines if both machine and OCP versions are beta releases that match +// +// The function reduces each input to its major.minor form and checks whether +// these truncated versions appear in a predefined list of beta releases. If +// either version is not listed, it returns false. When both are present, it +// confirms they are identical and returns true. func BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion string) bool { ocpVersion = FindMajorMinor(ocpVersion) machineVersion = FindMajorMinor(machineVersion) @@ -252,6 +274,14 @@ func BetaRHCOSVersionsFoundToMatch(machineVersion, ocpVersion string) bool { return ocpVersion == machineVersion } +// IsRHELCompatible Determines if a machine’s RHEL version is supported for a given OpenShift release +// +// The function takes the short RHEL version of a node and an OpenShift cluster +// version, then checks against a lifecycle database to see if that RHEL release +// is accepted. If multiple RHEL versions are listed for the OpenShift release +// it requires an exact match; otherwise it compares major.minor numbers to +// ensure the machine version is not older. It returns true only when the +// version criteria are satisfied, otherwise false. func IsRHELCompatible(machineVersion, ocpVersion string) bool { if machineVersion == "" || ocpVersion == "" { return false @@ -279,11 +309,25 @@ func IsRHELCompatible(machineVersion, ocpVersion string) bool { return false } +// FindMajorMinor Extracts the major and minor components of a version string +// +// The function splits an input string on periods, then concatenates the first +// two segments separated by a dot to form a "major.minor" representation. It is +// used to normalize full version strings before comparison or lookup. The +// returned value is a plain string containing only the major and minor parts. func FindMajorMinor(version string) string { splitVersion := strings.Split(version, ".") return splitVersion[0] + "." + splitVersion[1] } +// IsRHCOSCompatible Determines if a machine’s RHCOS version is supported for a given OpenShift release +// +// The function checks whether the supplied machine version meets the minimum +// required RHCOS version for the specified OpenShift version. It first handles +// beta releases by comparing major.minor versions, then looks up lifecycle data +// to retrieve the minimum acceptable RHCOS version and verifies compatibility +// using semantic version comparison. If any validation fails, it logs an error +// and returns false. func IsRHCOSCompatible(machineVersion, ocpVersion string) bool { if machineVersion == "" || ocpVersion == "" { return false @@ -318,6 +362,14 @@ func IsRHCOSCompatible(machineVersion, ocpVersion string) bool { return false } +// DetermineOCPStatus Determine the support status of an OpenShift version based on lifecycle dates +// +// The function accepts a version string and a date, normalizes the version to +// major.minor form, looks up lifecycle information from a local map, then +// compares the provided date against GA, FSE, and MSE milestones. It returns +// one of several status strings indicating whether the version is pre‑GA, +// generally available, in maintenance support, or end‑of‑life. If the input +// is empty or not found in the map, an unknown status is returned. func DetermineOCPStatus(version string, date time.Time) string { // Safeguard against empty values being passed in if version == "" || date.IsZero() { diff --git a/pkg/configuration/configuration.go b/pkg/configuration/configuration.go index 86c41fe0b..7156f7603 100644 --- a/pkg/configuration/configuration.go +++ b/pkg/configuration/configuration.go @@ -22,19 +22,37 @@ const ( defaultProbeDaemonSetNamespace = "cnf-suite" ) +// SkipHelmChartList Specifies a Helm chart to exclude from catalog checks +// +// This structure holds the identifier for an operator bundle package or image +// version that should be omitted when verifying existence against the RedHat +// catalog. The Name field contains the exact name used in the catalog lookup. +// When populated, the system will skip any validation or processing related to +// this chart. type SkipHelmChartList struct { // Name is the name of the `operator bundle package name` or `image-version` that you want to check if exists in the RedHat catalog Name string `yaml:"name" json:"name"` } -// AcceptedKernelTaintsInfo contains all certified operator request info +// AcceptedKernelTaintsInfo stores information about kernel module taints used in tests +// +// This structure holds the name of a kernel module that, when loaded, causes +// specific taints on nodes. The module field is used by the test suite to +// identify which taints should be accepted during certification testing. It +// facilitates configuration of test environments that require certain kernel +// behavior. type AcceptedKernelTaintsInfo struct { // Accepted modules that cause taints that we want to supply to the test suite Module string `yaml:"module" json:"module"` } -// SkipScalingTestDeploymentsInfo contains a list of names of deployments that should be skipped by the scaling tests to prevent issues +// SkipScalingTestDeploymentsInfo Lists deployments excluded from scaling tests +// +// This structure stores a deployment's name and namespace that should be +// ignored during scaling test runs. By including these entries in the +// configuration, the testing framework bypasses any checks or actions that +// could interfere with or corrupt the selected deployments. type SkipScalingTestDeploymentsInfo struct { // Deployment name and namespace that can be skipped by the scaling tests @@ -42,7 +60,12 @@ type SkipScalingTestDeploymentsInfo struct { Namespace string `yaml:"namespace" json:"namespace"` } -// SkipScalingTestStatefulSetsInfo contains a list of names of statefulsets that should be skipped by the scaling tests to prevent issues +// SkipScalingTestStatefulSetsInfo Specifies statefulsets excluded from scaling tests +// +// This structure holds the name and namespace of a StatefulSet that should be +// ignored during scaling test runs to avoid potential failures or conflicts. By +// listing such StatefulSets, the testing framework can bypass them while still +// evaluating other components. type SkipScalingTestStatefulSetsInfo struct { // StatefulSet name and namespace that can be skipped by the scaling tests @@ -50,21 +73,43 @@ type SkipScalingTestStatefulSetsInfo struct { Namespace string `yaml:"namespace" json:"namespace"` } -// Namespace struct defines namespace properties +// Namespace Represents a Kubernetes namespace configuration +// +// This structure holds information about a single namespace, primarily its name +// used for identification in the cluster. The name is serialized to YAML or +// JSON under the key "name". It serves as a basic unit for configuring +// namespace-specific settings within the application. type Namespace struct { Name string `yaml:"name" json:"name"` } -// CrdFilter defines a CustomResourceDefinition config filter. +// CrdFilter filters CustomResourceDefinitions by name suffix and scaling capability +// +// This structure holds criteria for selecting CRDs from a configuration. The +// NameSuffix field specifies a string that must appear at the end of a CRD’s +// name to be considered a match. The Scalable boolean indicates whether only +// scalable CRDs should be included in the filtered set. type CrdFilter struct { NameSuffix string `yaml:"nameSuffix" json:"nameSuffix"` Scalable bool `yaml:"scalable" json:"scalable"` } + +// ManagedDeploymentsStatefulsets Represents the identifier of a StatefulSet in a managed deployment +// +// This structure stores the name of a Kubernetes StatefulSet that should be +// tracked or controlled by the system. It is used as part of configuration +// data, typically loaded from YAML or JSON files, to specify which stateful +// sets are relevant for monitoring or management tasks. type ManagedDeploymentsStatefulsets struct { Name string `yaml:"name" json:"name"` } -// ConnectAPIConfig contains the configuration for the Red Hat Connect API +// ConnectAPIConfig configuration holder for accessing the Red Hat Connect API +// +// It stores the credentials, project identifier, endpoint address, and optional +// proxy settings required to communicate with the Red Hat Connect service. Each +// field is mapped to YAML and JSON keys so it can be loaded from configuration +// files or environment variables. type ConnectAPIConfig struct { // APIKey is the API key for the Red Hat Connect APIKey string `yaml:"apiKey" json:"apiKey"` @@ -78,7 +123,13 @@ type ConnectAPIConfig struct { ProxyPort string `yaml:"proxyPort" json:"proxyPort"` } -// TestConfiguration provides test related configuration +// TestConfiguration holds configuration values used during test execution +// +// This struct groups settings that control which namespaces, pods, operators, +// and CRDs are considered in a test run. It also contains parameters for the +// collector application and connection to an external API. The fields support +// filtering, skipping certain resources, and specifying accepted kernel taints +// or protocol names. type TestConfiguration struct { // targetNameSpaces to be used in TargetNameSpaces []Namespace `yaml:"targetNameSpaces,omitempty" json:"targetNameSpaces,omitempty"` @@ -110,6 +161,13 @@ type TestConfiguration struct { ConnectAPIConfig ConnectAPIConfig `yaml:"connectAPIConfig,omitempty" json:"connectAPIConfig,omitempty"` } +// TestParameters holds configuration settings for test execution +// +// This structure contains a collection of fields that control how tests are +// run, including resource limits, image repositories, API connection details, +// and output options. It also flags whether to include non-running pods, enable +// data collection or XML creation, and sets timeouts and log levels for the +// test environment. type TestParameters struct { Kubeconfig string ConfigFile string diff --git a/pkg/configuration/utils.go b/pkg/configuration/utils.go index 7c9ef2bb8..24fcc8160 100644 --- a/pkg/configuration/utils.go +++ b/pkg/configuration/utils.go @@ -29,8 +29,13 @@ var ( parameters = TestParameters{} ) -// LoadConfiguration return a function that loads -// the configuration from a file once +// LoadConfiguration Loads and parses a configuration file once +// +// The function reads the specified YAML file, unmarshals its contents into a +// TestConfiguration structure, and caches the result for subsequent calls. It +// logs progress and warns if the probe daemonset namespace is missing, +// defaulting it to a predefined value. Errors during reading or unmarshalling +// are returned alongside the configuration. func LoadConfiguration(filePath string) (TestConfiguration, error) { if confLoaded { log.Debug("config file already loaded, return previous element") @@ -60,6 +65,14 @@ func LoadConfiguration(filePath string) (TestConfiguration, error) { return configuration, nil } +// GetTestParameters Retrieves the current global test configuration +// +// This function returns a pointer to the singleton TestParameters instance that +// holds all runtime settings for the certification suite. The parameters are +// initialized once at program start and can be modified through command‑line +// flags or environment variables before use. Subsequent calls return the same +// instance, allowing different parts of the application to read shared +// configuration values. func GetTestParameters() *TestParameters { return ¶meters } diff --git a/pkg/diagnostics/diagnostics.go b/pkg/diagnostics/diagnostics.go index 2741a3fde..7dd142607 100644 --- a/pkg/diagnostics/diagnostics.go +++ b/pkg/diagnostics/diagnostics.go @@ -42,10 +42,13 @@ const ( cniPluginsCommand = `cat /host/etc/cni/net.d/[0-999]* | jq -s` ) -// CniPlugin holds info about a CNI plugin -// The JSON fields come from the jq output - -// NodeHwInfo node HW info +// NodeHwInfo Container for node hardware details +// +// This structure stores parsed output from various system utilities, including +// CPU information, IP configuration, block device layout, and PCI devices. Each +// field holds the raw or processed data returned by the diagnostics functions. +// The struct is populated per-node and used to aggregate hardware profiles +// across a cluster. type NodeHwInfo struct { Lscpu interface{} IPconfig interface{} @@ -53,7 +56,13 @@ type NodeHwInfo struct { Lspci []string } -// GetCniPlugins gets a json representation of the CNI plugins installed in each nodes +// GetCniPlugins Retrieves CNI plugin information from probe pods +// +// This function gathers the JSON output of a command run inside each probe pod +// to collect installed CNI plugins for every node. It executes the command, +// parses the returned JSON into generic interface slices, and maps them by node +// name. Errors during execution or decoding are logged and that node is +// skipped. func GetCniPlugins() (out map[string][]interface{}) { env := provider.GetTestEnvironment() o := clientsholder.GetClientsHolder() @@ -76,7 +85,14 @@ func GetCniPlugins() (out map[string][]interface{}) { return out } -// GetHwInfoAllNodes gets the Hardware information for each nodes +// GetHwInfoAllNodes Collects hardware details from all probe pods +// +// This function iterates over each probe pod defined in the test environment, +// executing a series of commands to gather CPU, memory, network, block device, +// and PCI information. The results are parsed into a structured map keyed by +// node name, with errors logged but not stopping the collection for other +// nodes. It returns a map where each entry contains a NodeHwInfo struct holding +// the gathered data. func GetHwInfoAllNodes() (out map[string]NodeHwInfo) { env := provider.GetTestEnvironment() o := clientsholder.GetClientsHolder() @@ -112,7 +128,12 @@ func GetHwInfoAllNodes() (out map[string]NodeHwInfo) { return out } -// getHWJsonOutput performs a query via probe pod and returns the JSON blob +// getHWJsonOutput Executes a command in a pod and decodes its JSON output +// +// This function runs the supplied shell command inside a specified container of +// a pod, captures the standard output, and unmarshals it into an interface. If +// the command fails or returns non‑empty stderr, an error is returned. +// Successful execution yields the parsed JSON data. func getHWJsonOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) (out interface{}, err error) { ctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name) outStr, errStr, err := o.ExecCommandContainer(ctx, cmd) @@ -126,7 +147,13 @@ func getHWJsonOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) return out, nil } -// getHWTextOutput performs a query via debug and returns plaintext lines +// getHWTextOutput Runs a command in a pod container and returns its output lines +// +// The function constructs a context for the specified pod and container, then +// executes the given command using the client holder. If the command fails or +// produces error output, it returns an error describing the failure. On +// success, it splits the standard output by newline characters and returns the +// resulting slice of strings. func getHWTextOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) (out []string, err error) { ctx := clientsholder.NewContext(probePod.Namespace, probePod.Name, probePod.Spec.Containers[0].Name) outStr, errStr, err := o.ExecCommandContainer(ctx, cmd) @@ -137,7 +164,12 @@ func getHWTextOutput(probePod *corev1.Pod, o clientsholder.Command, cmd string) return strings.Split(outStr, "\n"), nil } -// GetNodeJSON gets the nodes summary in JSON (similar to: oc get nodes -json) +// GetNodeJSON Retrieves a JSON representation of node information +// +// The function obtains the test environment, marshals its Nodes field into +// JSON, then unmarshals that data back into a generic map structure for use +// elsewhere. It logs errors if either marshaling or unmarshaling fails and +// returns the resulting map. func GetNodeJSON() (out map[string]interface{}) { env := provider.GetTestEnvironment() @@ -154,7 +186,13 @@ func GetNodeJSON() (out map[string]interface{}) { return out } -// GetCsiDriver Gets the CSI driver list +// GetCsiDriver Retrieves a list of CSI drivers from the Kubernetes cluster +// +// This function accesses the Kubernetes client holder to query the StorageV1 +// API for all CSI drivers, encodes the result into JSON, and then unmarshals it +// into a map. Errors during listing, scheme setup, encoding, or decoding are +// logged and cause an empty map to be returned. The resulting map contains +// driver details suitable for inclusion in diagnostic reports. func GetCsiDriver() (out map[string]interface{}) { o := clientsholder.GetClientsHolder() csiDriver, err := o.K8sClient.StorageV1().CSIDrivers().List(context.TODO(), apimachineryv1.ListOptions{}) @@ -183,11 +221,23 @@ func GetCsiDriver() (out map[string]interface{}) { return out } +// GetVersionK8s Returns the Kubernetes version used in the test environment +// +// This function obtains the current test environment configuration and extracts +// the Kubernetes version string. It accesses the global environment state via +// provider.and returns the K8sVersion field. The result is a plain string +// representing the cluster's Kubernetes release. func GetVersionK8s() (out string) { env := provider.GetTestEnvironment() return env.K8sVersion } +// GetVersionOcp Retrieves the OpenShift version of the current environment +// +// This function first obtains test environment data, then checks whether the +// cluster is an OpenShift instance. If it is not, a placeholder string +// indicating a non‑OpenShift cluster is returned; otherwise the stored +// OpenshiftVersion value is provided as output. func GetVersionOcp() (out string) { env := provider.GetTestEnvironment() if !provider.IsOCPCluster() { @@ -196,6 +246,10 @@ func GetVersionOcp() (out string) { return env.OpenshiftVersion } +// GetVersionOcClient Returns a placeholder indicating oc client is not used +// +// The function simply provides the text "n/a, " to signal that no OpenShift +// client version information is available in this context. func GetVersionOcClient() (out string) { return "n/a, (not using oc or kubectl client)" } diff --git a/pkg/labels/labels.go b/pkg/labels/labels.go index 48b626f26..de633f99d 100644 --- a/pkg/labels/labels.go +++ b/pkg/labels/labels.go @@ -10,14 +10,33 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" ) +// LabelsExprEvaluator Evaluates label sets for compliance +// +// The evaluator takes an array of strings representing labels and returns true +// if they satisfy the underlying expression rules, otherwise false. It +// encapsulates the logic needed to determine whether a given set of labels +// matches the expected pattern or condition defined by the system. type LabelsExprEvaluator interface { Eval(labels []string) bool } +// labelsExprParser Parses and evaluates label expressions against a list of labels +// +// It walks the abstract syntax tree of an expression, checking identifiers +// against provided labels, handling parentheses, logical NOT, AND, OR +// operators, and reporting unexpected nodes. The result is true if the +// expression matches the label set, otherwise false. type labelsExprParser struct { astRootNode ast.Expr } +// NewLabelsExprEvaluator Creates an evaluator that checks label expressions +// +// The function transforms a comma-separated string of labels into a +// Go-compatible boolean expression, parses it into an abstract syntax tree, and +// returns an evaluator object. It replaces hyphens with underscores and commas +// with logical OR operators before parsing. If the input cannot be parsed, an +// error is returned. func NewLabelsExprEvaluator(labelsExpr string) (LabelsExprEvaluator, error) { goLikeExpr := strings.ReplaceAll(labelsExpr, "-", "_") goLikeExpr = strings.ReplaceAll(goLikeExpr, ",", "||") @@ -32,7 +51,13 @@ func NewLabelsExprEvaluator(labelsExpr string) (LabelsExprEvaluator, error) { }, nil } -// Evaluates the labels expression against the labels slice. +// labelsExprParser.Eval Evaluates a logical expression against a set of labels +// +// This method builds a lookup map from the supplied label strings, normalizing +// dashes to underscores for matching. It then recursively traverses an abstract +// syntax tree representing the expression, evaluating identifiers, parentheses, +// unary NOT, and binary AND/OR operators using the lookup map. The result is a +// boolean indicating whether the labels satisfy the expression. func (exprParser labelsExprParser) Eval(labels []string) bool { // Define a map for fast name/ident checking when visiting nodes. labelsMap := make(map[string]bool) diff --git a/pkg/podhelper/podhelper.go b/pkg/podhelper/podhelper.go index 0ba3d3659..474ad776c 100644 --- a/pkg/podhelper/podhelper.go +++ b/pkg/podhelper/podhelper.go @@ -11,7 +11,12 @@ import ( "k8s.io/client-go/dynamic" ) -// Structure to describe a top owner of a pod +// TopOwner represents the highest-level resource owning a pod +// +// The structure holds identifying information about a pod's ultimate owner, +// including its API version, kind, name, and namespace. It is used by helper +// functions to map pods back to the root resource that created them. The fields +// are all strings and can be populated from Kubernetes object metadata. type TopOwner struct { APIVersion string Kind string @@ -19,7 +24,14 @@ type TopOwner struct { Namespace string } -// Get the list of top owners of pods +// GetPodTopOwner Finds the highest-level owners of a pod +// +// This function starts with the namespace and owner references of a pod, then +// walks through each reference to resolve the actual resource objects via +// dynamic client calls. It recursively follows owner chains until it reaches +// resources without further owners, recording those as top owners in a map +// keyed by name. The result is returned along with any errors encountered +// during resolution. func GetPodTopOwner(podNamespace string, podOwnerReferences []metav1.OwnerReference) (topOwners map[string]TopOwner, err error) { topOwners = make(map[string]TopOwner) err = followOwnerReferences( @@ -34,7 +46,14 @@ func GetPodTopOwner(podNamespace string, podOwnerReferences []metav1.OwnerRefere return topOwners, nil } -// Recursively follow the ownership tree to find the top owners +// followOwnerReferences traverses owner references to discover top‑level resources +// +// The routine walks the chain of OwnerReference objects for a given Kubernetes +// resource, querying each referenced object until it reaches those without +// further owners. It records these highest-level owners in a map keyed by name, +// storing API version, kind, and namespace information. Errors during lookup or +// parsing are returned to allow callers to handle missing or malformed +// references. func followOwnerReferences(resourceList []*metav1.APIResourceList, dynamicClient dynamic.Interface, topOwners map[string]TopOwner, namespace string, ownerRefs []metav1.OwnerReference) (err error) { for _, ownerRef := range ownerRefs { apiResource, err := searchAPIResource(ownerRef.Kind, ownerRef.APIVersion, resourceList) @@ -82,8 +101,13 @@ func followOwnerReferences(resourceList []*metav1.APIResourceList, dynamicClient return nil } -// searchAPIResource is a helper func that returns the metav1.APIResource pointer of the resource by kind and apiVersion. -// from a metav1.APIResourceList. +// searchAPIResource Finds an API resource by kind and version +// +// The function iterates through a list of APIResourceList objects, matching the +// supplied group-version string to each list's GroupVersion field. Within each +// matching list it scans the contained resources for one whose Kind equals the +// provided kind value. If found, it returns a pointer to that resource; +// otherwise it reports an error indicating no match was located. func searchAPIResource(kind, apiVersion string, apis []*metav1.APIResourceList) (*metav1.APIResource, error) { for _, api := range apis { if api.GroupVersion != apiVersion { diff --git a/pkg/postmortem/postmortem.go b/pkg/postmortem/postmortem.go index c957ee488..e4a0a3eb1 100644 --- a/pkg/postmortem/postmortem.go +++ b/pkg/postmortem/postmortem.go @@ -23,6 +23,15 @@ import ( corev1 "k8s.io/api/core/v1" ) +// Log Provides a diagnostic snapshot of the test environment +// +// The function retrieves the current test environment, refreshes its state, and +// then builds a multiline string summarizing node taints, pending pods that are +// not running or succeeded, and any abnormal events. It loops over nodes to +// list their names and taint configurations, iterates through all pods +// filtering by status, and appends each relevant pod's string representation. +// Finally, it gathers abnormal events from the environment and returns the +// combined output. func Log() (out string) { // Get current environment env := provider.GetTestEnvironment() diff --git a/pkg/provider/catalogsources.go b/pkg/provider/catalogsources.go index 14a631d15..11e75b9c4 100644 --- a/pkg/provider/catalogsources.go +++ b/pkg/provider/catalogsources.go @@ -10,6 +10,15 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" ) +// GetCatalogSourceBundleCount Returns the number of bundles for a catalog source +// +// The function determines how many bundles are associated with a given catalog +// source by examining either probe container data or package manifests, +// depending on the OpenShift version. It first checks if the cluster is running +// an OCP version less than or equal to 4.12; if so, it retrieves the count via +// a probe container. Otherwise, it falls back to counting bundles listed in the +// package manifests. The result is returned as an integer, with -1 indicating +// failure to determine the count. func GetCatalogSourceBundleCount(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int { // Now that we know the catalog source, we are going to count up all of the relatedImages // that are associated with the catalog source. This will give us the number of bundles that @@ -42,6 +51,13 @@ func GetCatalogSourceBundleCount(env *TestEnvironment, cs *olmv1Alpha.CatalogSou return getCatalogSourceBundleCountFromPackageManifests(env, cs) } +// getCatalogSourceBundleCountFromProbeContainer retrieves the number of bundles for a catalog source via probe container +// +// The function locates the service linked to the given catalog source, then +// runs a grpcurl command inside each available probe pod to list registry +// bundles. It parses the output into an integer and returns that count. If no +// matching service or probe pod yields a valid result, it logs a warning and +// returns -1. func getCatalogSourceBundleCountFromProbeContainer(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int { // We need to use the probe container to get the bundle count // This is because the package manifests are not available in the cluster @@ -88,6 +104,13 @@ func getCatalogSourceBundleCountFromProbeContainer(env *TestEnvironment, cs *olm return -1 } +// getCatalogSourceBundleCountFromPackageManifests Counts bundles from package manifests linked to a catalog source +// +// It iterates over all known package manifests in the test environment, filters +// those that belong to the specified catalog source by name and namespace, then +// sums the number of entries across every channel for each matching manifest. +// The total count is returned as an integer representing how many bundles are +// available via the manifests. func getCatalogSourceBundleCountFromPackageManifests(env *TestEnvironment, cs *olmv1Alpha.CatalogSource) int { totalRelatedBundles := 0 for _, pm := range env.AllPackageManifests { diff --git a/pkg/provider/containers.go b/pkg/provider/containers.go index 9bdb5d185..6bec294c0 100644 --- a/pkg/provider/containers.go +++ b/pkg/provider/containers.go @@ -40,7 +40,13 @@ var ( ignoredContainerNames = []string{"istio-proxy"} ) -// Tag and Digest should not be populated at the same time. Digest takes precedence if both are populated +// ContainerImageIdentifier Represents a container image reference with optional tag or digest +// +// This structure holds the components of a container image: registry, +// repository name, an optional tag, and an optional digest. When both tag and +// digest are provided, the digest is used to uniquely identify the image, +// overriding the tag. The fields map directly to YAML and JSON keys for easy +// serialization. type ContainerImageIdentifier struct { // Repository is the name of the image that you want to check if exists in the RedHat catalog Repository string `yaml:"repository" json:"repository"` @@ -56,6 +62,14 @@ type ContainerImageIdentifier struct { Digest string `yaml:"digest" json:"digest"` } +// Container Represents a Kubernetes container with its status and metadata +// +// This structure holds information about a container running in a pod, +// including the core container spec, runtime details, node assignment, and +// namespace. It tracks the container’s current state through the status field +// and stores a unique identifier for the container instance. The struct also +// keeps an image identifier and any preflight test results that have been run +// against the container. type Container struct { *corev1.Container Status corev1.ContainerStatus @@ -68,12 +82,24 @@ type Container struct { PreflightResults PreflightResultsDB } +// NewContainer Creates an empty Container instance +// +// The function returns a pointer to a new Container struct with its embedded +// corev1.Container field initialized to an empty object. No parameters are +// required, and the returned value can be used as a starting point for building +// a container configuration. func NewContainer() *Container { return &Container{ Container: &corev1.Container{}, // initialize the corev1.Container object } } +// Container.GetUID Retrieves the unique identifier of a container +// +// The method splits the container’s ID string on "://" and uses the last +// segment as the UID, handling empty results with an error. It logs debug +// messages indicating success or failure and returns the UID along with any +// error encountered. func (c *Container) GetUID() (string, error) { split := strings.Split(c.Status.ContainerID, "://") uid := "" @@ -88,6 +114,14 @@ func (c *Container) GetUID() (string, error) { return uid, nil } +// Container.SetPreflightResults Stores preflight test results for a container image +// +// This method runs the OpenShift Preflight container checks on the image +// associated with the receiver, capturing logs and test outcomes. If the image +// has been processed before, it reuses cached results; otherwise it configures +// Docker credentials and optional insecure connections, executes the check, +// converts raw results into a structured database format, and caches them for +// future use. The function returns an error if any part of the execution fails. func (c *Container) SetPreflightResults(preflightImageCache map[string]PreflightResultsDB, env *TestEnvironment) error { log.Info("Running Preflight container test for container %q with image %q", c, c.Image) @@ -146,6 +180,13 @@ func (c *Container) SetPreflightResults(preflightImageCache map[string]Preflight return nil } +// Container.StringLong Formats container details into a readable string +// +// This method assembles key fields from the container such as node name, +// namespace, pod name, container name, UID, and runtime into a single formatted +// line. It uses standard string formatting to produce a concise representation +// of the container’s identity. The resulting text is returned for logging or +// display purposes. func (c *Container) StringLong() string { return fmt.Sprintf("node: %s ns: %s podName: %s containerName: %s containerUID: %s containerRuntime: %s", c.NodeName, @@ -156,6 +197,13 @@ func (c *Container) StringLong() string { c.Runtime, ) } + +// Container.String Formats container details into a readable string +// +// This method returns a string that describes the container by combining its +// name, pod name, and namespace in a single line. It uses standard formatting +// to create a clear human-readable representation of the container's identity +// within the cluster. func (c *Container) String() string { return fmt.Sprintf("container: %s pod: %s ns: %s", c.Name, @@ -164,6 +212,12 @@ func (c *Container) String() string { ) } +// Container.HasIgnoredContainerName Determines if the container should be excluded from processing +// +// This method checks each name in a predefined ignore list against the +// container’s name, also treating any Istio proxy container as ignored. If a +// match is found it returns true; otherwise false. The result guides callers to +// skip containers that are not relevant for certain operations. func (c *Container) HasIgnoredContainerName() bool { for _, ign := range ignoredContainerNames { if c.IsIstioProxy() || strings.Contains(c.Name, ign) { @@ -173,20 +227,43 @@ func (c *Container) HasIgnoredContainerName() bool { return false } +// Container.IsIstioProxy Determines if the container is an Istio proxy +// +// It checks whether the container’s name matches the predefined Istio proxy +// name. If it does, the function returns true; otherwise, it returns false. +// This simple check is used to identify and potentially ignore Istio-related +// containers in other logic. func (c *Container) IsIstioProxy() bool { return c.Name == IstioProxyContainerName } +// Container.HasExecProbes Checks if any probe uses an exec command +// +// The method inspects the container's liveness, readiness, and startup probes +// for non-nil Exec fields. It returns true if at least one of these probes has +// an Exec configuration defined; otherwise it returns false. func (c *Container) HasExecProbes() bool { return c.LivenessProbe != nil && c.LivenessProbe.Exec != nil || c.ReadinessProbe != nil && c.ReadinessProbe.Exec != nil || c.StartupProbe != nil && c.StartupProbe.Exec != nil } +// Container.IsTagEmpty Checks whether the container image tag is unset +// +// This method inspects the container's image identifier and compares its Tag +// field to an empty string. It returns true when no tag has been specified, +// indicating a default or unspecified tag. The result helps callers determine +// if they need to supply a tag value. func (c *Container) IsTagEmpty() bool { return c.ContainerImageIdentifier.Tag == "" } +// Container.IsReadOnlyRootFilesystem Determines if the container’s root filesystem is read‑only +// +// It logs a message indicating the container being tested, then checks whether +// the security context and its ReadOnlyRootFilesystem field are defined. If +// either is missing it returns false; otherwise it returns the value of that +// field. func (c *Container) IsReadOnlyRootFilesystem(logger *log.Logger) bool { logger.Info("Testing Container %q", c) if c.SecurityContext == nil || c.SecurityContext.ReadOnlyRootFilesystem == nil { @@ -195,6 +272,14 @@ func (c *Container) IsReadOnlyRootFilesystem(logger *log.Logger) bool { return *c.SecurityContext.ReadOnlyRootFilesystem } +// Container.IsContainerRunAsNonRoot Determines if a container should run as non-root +// +// The method checks the container’s security context for a RunAsNonRoot +// setting, falling back to an optional pod-level value if the container does +// not specify one. It returns a boolean indicating whether the container will +// run as non‑root and a descriptive reason explaining which level provided +// the decision. If neither level supplies a value, it reports that both are +// unset. func (c *Container) IsContainerRunAsNonRoot(podRunAsNonRoot *bool) (isContainerRunAsNonRoot bool, reason string) { if c.SecurityContext != nil && c.SecurityContext.RunAsNonRoot != nil { return *c.SecurityContext.RunAsNonRoot, fmt.Sprintf("RunAsNonRoot is set to %t at the container level, overriding a %v value defined at pod level", @@ -208,6 +293,13 @@ func (c *Container) IsContainerRunAsNonRoot(podRunAsNonRoot *bool) (isContainerR return false, "RunAsNonRoot is set to nil at pod and container level" } +// Container.IsContainerRunAsNonRootUserID checks whether the container is running as a non-root user +// +// The function evaluates the container’s security context to determine if it +// has a RunAsUser value different from zero, indicating a non‑root user ID. +// It also considers any pod-level RunAsUser setting that may be inherited when +// the container does not specify its own. The result is a boolean flag and a +// descriptive reason explaining which level provided the decision. func (c *Container) IsContainerRunAsNonRootUserID(podRunAsNonRootUserID *int64) (isContainerRunAsNonRootUserID bool, reason string) { if c.SecurityContext != nil && c.SecurityContext.RunAsUser != nil { return *c.SecurityContext.RunAsUser != 0, fmt.Sprintf("RunAsUser is set to %v at the container level, overriding a %s value defined at pod level", diff --git a/pkg/provider/deployments.go b/pkg/provider/deployments.go index 68377ac34..7fad9e9ea 100644 --- a/pkg/provider/deployments.go +++ b/pkg/provider/deployments.go @@ -24,10 +24,22 @@ import ( appv1client "k8s.io/client-go/kubernetes/typed/apps/v1" ) +// Deployment Represents a Kubernetes deployment with helper methods +// +// This type wraps the standard appsv1.Deployment object to provide convenient +// operations such as checking readiness and generating a string representation. +// It exposes the embedded Deployment fields directly while adding methods that +// evaluate status conditions and replica counts for quick health checks. type Deployment struct { *appsv1.Deployment } +// Deployment.IsDeploymentReady Determines whether a deployment has reached the desired state +// +// It inspects the deployment’s status conditions to see if an available +// condition is present, then compares replica counts from the spec with various +// status fields such as unavailable, ready, available, and updated replicas. If +// any of these checks fail, it returns false; otherwise true. func (d *Deployment) IsDeploymentReady() bool { notReady := true @@ -58,6 +70,11 @@ func (d *Deployment) IsDeploymentReady() bool { return true } +// Deployment.ToString Formats deployment details into a human‑readable string +// +// This method creates a concise representation of a Deployment by combining its +// name and namespace. It uses standard formatting to return the result as a +// single string, which can be printed or logged for debugging purposes. func (d *Deployment) ToString() string { return fmt.Sprintf("deployment: %s ns: %s", d.Name, @@ -65,6 +82,12 @@ func (d *Deployment) ToString() string { ) } +// GetUpdatedDeployment Retrieves the latest state of a Kubernetes deployment +// +// The function queries the cluster for a specific deployment in a given +// namespace, then wraps the result in a custom Deployment type that exposes +// helper methods. It returns a pointer to this wrapper and an error if the +// lookup fails or the API call encounters an issue. func GetUpdatedDeployment(ac appv1client.AppsV1Interface, namespace, name string) (*Deployment, error) { result, err := autodiscover.FindDeploymentByNameByNamespace(ac, namespace, name) return &Deployment{ diff --git a/pkg/provider/events.go b/pkg/provider/events.go index 26a1837da..809d5c78a 100644 --- a/pkg/provider/events.go +++ b/pkg/provider/events.go @@ -22,14 +22,36 @@ import ( corev1 "k8s.io/api/core/v1" ) +// Event Represents a Kubernetes event with access to all core event data +// +// The type embeds the standard Kubernetes Event structure, giving it direct +// access to fields such as CreationTimestamp, InvolvedObject, Reason, and +// Message. It provides a convenient String method that formats these key +// properties into a single readable string for logging or debugging purposes. +// This struct is used throughout the provider package to encapsulate event +// information while keeping the original corev1.Event behavior intact. type Event struct { *corev1.Event } +// NewEvent Wraps a Kubernetes event object +// +// The function receives a pointer to a corev1.Event and returns an Event +// instance that encapsulates the original event. It assigns the passed event to +// the internal field of the returned struct, enabling further processing within +// the provider package. No additional transformation or validation is +// performed. func NewEvent(aEvent *corev1.Event) (out Event) { out.Event = aEvent return out } + +// Event.String Formats event data into a readable string +// +// This method constructs a formatted text representation of an event, including +// its timestamp, involved object, reason, and message. It uses standard +// formatting utilities to combine these fields into a single line. The +// resulting string is returned for display or logging purposes. func (e *Event) String() string { return fmt.Sprintf("timestamp=%s involved object=%s reason=%s message=%s", e.CreationTimestamp.Time, e.InvolvedObject, e.Reason, e.Message) } diff --git a/pkg/provider/filters.go b/pkg/provider/filters.go index 4d53999d5..dbf37a519 100644 --- a/pkg/provider/filters.go +++ b/pkg/provider/filters.go @@ -24,9 +24,13 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" ) -// GetGuaranteedPodsWithExclusiveCPUs returns a slice of Pod objects that are guaranteed to have exclusive CPUs. -// It iterates over the Pods in the TestEnvironment and filters out the Pods that do not have exclusive CPUs. -// The filtered Pods are then returned as a slice. +// TestEnvironment.GetGuaranteedPodsWithExclusiveCPUs Retrieves pods that have guaranteed exclusive CPU allocation +// +// The method examines each pod in the test environment, applying a check to +// determine if the pod is guaranteed with exclusive CPUs. Pods passing this +// check are collected into a slice and returned. This list can be used by other +// functions to identify containers or pods suitable for CPU‑pinning +// scenarios. func (env *TestEnvironment) GetGuaranteedPodsWithExclusiveCPUs() []*Pod { var filteredPods []*Pod for _, p := range env.Pods { @@ -37,8 +41,14 @@ func (env *TestEnvironment) GetGuaranteedPodsWithExclusiveCPUs() []*Pod { return filteredPods } -// GetGuaranteedPodsWithIsolatedCPUs returns a list of pods from the TestEnvironment -// that are guaranteed to have isolated CPUs and are CPU isolation compliant. +// TestEnvironment.GetGuaranteedPodsWithIsolatedCPUs Retrieves pods that are guaranteed to have isolated CPUs +// +// This method scans all pods in the test environment, selecting only those +// whose CPU requests match whole units and whose resources are identical across +// containers. It further checks that each pod meets CPU isolation compliance +// criteria, such as having appropriate annotations and a specified runtime +// class name. The resulting slice of pods is returned for use by other +// filtering functions. func (env *TestEnvironment) GetGuaranteedPodsWithIsolatedCPUs() []*Pod { var filteredPods []*Pod for _, p := range env.Pods { @@ -49,10 +59,12 @@ func (env *TestEnvironment) GetGuaranteedPodsWithIsolatedCPUs() []*Pod { return filteredPods } -// GetGuaranteedPods returns a slice of guaranteed pods in the test environment. -// A guaranteed pod is a pod that meets certain criteria specified by the IsPodGuaranteed method. -// The method iterates over all pods in the environment and filters out the guaranteed ones. -// It returns the filtered pods as a slice. +// TestEnvironment.GetGuaranteedPods Retrieves all pods that satisfy the guaranteed condition +// +// This method scans every pod in the test environment, checks each one with its +// own guarantee logic, and collects those that pass into a slice. The resulting +// slice contains only the pods deemed guaranteed, which are then returned to +// the caller. func (env *TestEnvironment) GetGuaranteedPods() []*Pod { var filteredPods []*Pod for _, p := range env.Pods { @@ -63,7 +75,12 @@ func (env *TestEnvironment) GetGuaranteedPods() []*Pod { return filteredPods } -// GetNonGuaranteedPods returns a slice of non-guaranteed pods in the test environment. +// TestEnvironment.GetNonGuaranteedPods retrieves all pods that are not guaranteed in the test environment +// +// The function iterates over every pod in the TestEnvironment, checks if each +// pod is not guaranteed by calling IsPodGuaranteed, and collects those pods +// into a slice. It returns this slice of non‑guaranteed pods for further +// processing or analysis. func (env *TestEnvironment) GetNonGuaranteedPods() []*Pod { var filteredPods []*Pod for _, p := range env.Pods { @@ -74,9 +91,12 @@ func (env *TestEnvironment) GetNonGuaranteedPods() []*Pod { return filteredPods } -// GetPodsWithoutAffinityRequiredLabel returns a slice of Pod objects that do not have the affinity required label. -// It iterates over the Pods in the TestEnvironment and filters out the ones that do not have the affinity required label. -// The filtered Pods are returned as a slice. +// TestEnvironment.GetPodsWithoutAffinityRequiredLabel Retrieves pods missing the required affinity label +// +// The method scans all pods in the test environment, checks each pod for the +// presence of an affinity-required label using the Pod.AffinityRequired helper, +// and collects those that lack it. It returns a slice containing only these +// pods, allowing callers to identify which resources need proper labeling. func (env *TestEnvironment) GetPodsWithoutAffinityRequiredLabel() []*Pod { var filteredPods []*Pod for _, p := range env.Pods { @@ -87,9 +107,12 @@ func (env *TestEnvironment) GetPodsWithoutAffinityRequiredLabel() []*Pod { return filteredPods } -// GetAffinityRequiredPods returns a slice of Pod objects that have affinity required. -// It iterates over the Pods in the TestEnvironment and filters out the Pods that have affinity required. -// The filtered Pods are returned as a slice. +// TestEnvironment.GetAffinityRequiredPods Retrieves pods that require affinity +// +// This method scans the test environment's collection of pod objects and +// selects those that have an affinity requirement flag set in their labels. It +// returns a slice containing only the matching pods, enabling callers to focus +// on affinity-dependent resources. func (env *TestEnvironment) GetAffinityRequiredPods() []*Pod { var filteredPods []*Pod for _, p := range env.Pods { @@ -100,9 +123,12 @@ func (env *TestEnvironment) GetAffinityRequiredPods() []*Pod { return filteredPods } -// GetHugepagesPods returns a slice of Pod objects that have hugepages enabled. -// It iterates over the Pods in the TestEnvironment and filters out the ones that do not have hugepages. -// The filtered Pods are returned as a []*Pod. +// TestEnvironment.GetHugepagesPods returns all pods that request or limit hugepages +// +// The method scans the environment’s pod collection, checks each pod for any +// container using a hugepage resource via HasHugepages, and collects those that +// do. The resulting slice of pointers to Pod objects is returned; if none have +// hugepages, an empty slice is produced. func (env *TestEnvironment) GetHugepagesPods() []*Pod { var filteredPods []*Pod for _, p := range env.Pods { @@ -113,11 +139,25 @@ func (env *TestEnvironment) GetHugepagesPods() []*Pod { return filteredPods } -// GetCPUPinningPodsWithDpdk returns a slice of Pods that have CPU pinning enabled with DPDK. +// TestEnvironment.GetCPUPinningPodsWithDpdk Lists guaranteed pods that pin CPUs with DPDK +// +// This method retrieves all pods in the test environment that are guaranteed to +// have exclusive CPU resources and then filters them to include only those +// running DPDK drivers. It calls a helper function that checks each pod’s +// container for DPDK device presence via a system command. The resulting slice +// contains pointers to pods meeting both criteria, suitable for further +// validation or manipulation. func (env *TestEnvironment) GetCPUPinningPodsWithDpdk() []*Pod { return filterDPDKRunningPods(env.GetGuaranteedPodsWithExclusiveCPUs()) } +// filterPodsWithoutHostPID filters out pods that enable HostPID +// +// The function receives a slice of pod objects and iterates through each one, +// checking whether the HostPID flag is set in the pod specification. Pods with +// this flag enabled are skipped; all others are collected into a new slice. The +// resulting slice contains only those pods that do not use the host's PID +// namespace. func filterPodsWithoutHostPID(pods []*Pod) []*Pod { var withoutHostPIDPods []*Pod @@ -130,6 +170,13 @@ func filterPodsWithoutHostPID(pods []*Pod) []*Pod { return withoutHostPIDPods } +// filterDPDKRunningPods Filters pods that are running DPDK-enabled devices +// +// This function examines a slice of pod objects, executing a command inside +// each container to locate the device file path specified by the pod’s Multus +// PCI annotation. If the output contains the string "vfio-pci", indicating the +// presence of a DPDK driver, the pod is added to a new list. The resulting +// slice contains only pods that have confirmed DPDK support. func filterDPDKRunningPods(pods []*Pod) []*Pod { var filteredPods []*Pod const ( @@ -155,9 +202,12 @@ func filterDPDKRunningPods(pods []*Pod) []*Pod { return filteredPods } -// GetShareProcessNamespacePods returns a slice of Pod objects that have the ShareProcessNamespace flag set to true. -// It iterates over the Pods in the TestEnvironment and filters out the ones that do not have the ShareProcessNamespace flag set. -// The filtered Pods are then returned as a slice. +// TestEnvironment.GetShareProcessNamespacePods Retrieves pods that enable shared process namespaces +// +// The function scans the TestEnvironment's collection of Pod objects, selecting +// those whose ShareProcessNamespace flag is true. It accumulates these matching +// pods into a new slice and returns it. The returned slice contains only pods +// configured for shared process namespace operation. func (env *TestEnvironment) GetShareProcessNamespacePods() []*Pod { var filteredPods []*Pod for _, p := range env.Pods { @@ -168,10 +218,12 @@ func (env *TestEnvironment) GetShareProcessNamespacePods() []*Pod { return filteredPods } -// GetPodsUsingSRIOV returns a list of pods that are using SR-IOV. -// It iterates through the pods in the TestEnvironment and checks if each pod is using SR-IOV. -// If an error occurs while checking the SR-IOV usage for a pod, it returns an error. -// The filtered pods that are using SR-IOV are returned along with a nil error. +// TestEnvironment.GetPodsUsingSRIOV Collects all pods that are using SR-IOV +// +// The method scans every pod in the test environment, checking each one for +// SR‑IOV usage by calling its helper function. If a pod reports SR‑IOV +// support, it is added to a slice of matching pods. The function returns this +// list and an error if any pod check fails. func (env *TestEnvironment) GetPodsUsingSRIOV() ([]*Pod, error) { var filteredPods []*Pod for _, p := range env.Pods { @@ -187,6 +239,12 @@ func (env *TestEnvironment) GetPodsUsingSRIOV() ([]*Pod, error) { return filteredPods, nil } +// getContainers collects all containers from a list of pods +// +// The function iterates over each pod in the provided slice, appending every +// container within those pods to a new slice. It returns this aggregated slice, +// allowing callers to work with a flat list of containers regardless of their +// originating pod. func getContainers(pods []*Pod) []*Container { var containers []*Container @@ -196,26 +254,43 @@ func getContainers(pods []*Pod) []*Container { return containers } -// GetGuaranteedPodContainersWithExclusiveCPUs returns a slice of Container objects representing the containers -// that have exclusive CPUs in the TestEnvironment. +// TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUs Retrieves containers with guaranteed exclusive CPUs +// +// This method returns a slice of container objects that belong to pods which +// have been marked as guaranteed to use exclusive CPUs. It gathers the relevant +// pods via GetGuaranteedPodsWithExclusiveCPUs and then collects their +// containers into a single list for further processing or inspection. func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUs() []*Container { return getContainers(env.GetGuaranteedPodsWithExclusiveCPUs()) } -// GetNonGuaranteedPodContainersWithoutHostPID returns a slice of containers from the test environment -// that belong to non-guaranteed pods without the HostPID setting enabled. +// TestEnvironment.GetNonGuaranteedPodContainersWithoutHostPID Lists containers in non-guaranteed pods that do not use HostPID +// +// This method retrieves all non-guaranteed pods from the test environment, +// filters out any pods with the HostPID setting enabled, then collects every +// container within those remaining pods. The result is a slice of container +// objects representing workloads that are both non‑guaranteed and run without +// shared PID namespaces. func (env *TestEnvironment) GetNonGuaranteedPodContainersWithoutHostPID() []*Container { return getContainers(filterPodsWithoutHostPID(env.GetNonGuaranteedPods())) } -// GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID returns a slice of containers from the test environment -// that belong to pods with exclusive CPUs and do not have the host PID enabled. +// TestEnvironment.GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID Retrieves containers from guaranteed pods that use exclusive CPUs but do not enable host PID +// +// It first selects all pods in the test environment marked as guaranteed with +// exclusive CPUs, then filters out any pod where HostPID is enabled. Finally it +// collects and returns every container belonging to the remaining pods. func (env *TestEnvironment) GetGuaranteedPodContainersWithExclusiveCPUsWithoutHostPID() []*Container { return getContainers(filterPodsWithoutHostPID(env.GetGuaranteedPodsWithExclusiveCPUs())) } -// GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID returns a slice of containers from the TestEnvironment -// that have guaranteed pods with isolated CPUs and without the HostPID flag set. +// TestEnvironment.GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID returns containers from guaranteed pods with isolated CPUs that do not use HostPID +// +// It first collects all pods in the environment that are guaranteed to have +// exclusive CPU allocation and comply with CPU isolation rules. Then it filters +// out any pod where the HostPID flag is enabled, ensuring only non-HostPID pods +// remain. Finally, it aggregates and returns a slice of containers from those +// remaining pods. func (env *TestEnvironment) GetGuaranteedPodContainersWithIsolatedCPUsWithoutHostPID() []*Container { return getContainers(filterPodsWithoutHostPID(env.GetGuaranteedPodsWithIsolatedCPUs())) } diff --git a/pkg/provider/isolation.go b/pkg/provider/isolation.go index a35ec5036..a3afb9f47 100644 --- a/pkg/provider/isolation.go +++ b/pkg/provider/isolation.go @@ -20,6 +20,13 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" ) +// AreResourcesIdentical Verifies that CPU and memory requests match limits for every container in a pod +// +// The function iterates over all containers in the supplied pod, ensuring each +// has defined resource limits. It compares the request values to their +// corresponding limits for both CPU and memory; if any mismatch is found, it +// logs a debug message and returns false. When all containers satisfy these +// conditions, the function returns true. func AreResourcesIdentical(p *Pod) bool { // Pods may contain more than one container. All containers must conform to the CPU isolation requirements. for _, cut := range p.Containers { @@ -50,6 +57,13 @@ func AreResourcesIdentical(p *Pod) bool { return true } +// AreCPUResourcesWholeUnits Verifies that all CPU requests and limits are whole units +// +// The function iterates over each container in a pod, ensuring both CPU +// requests and limits are defined and expressed as multiples of one . If any +// container lacks these specifications or has non‑whole‑unit values, it +// logs the issue and returns false. When all containers meet the criteria, it +// returns true. func AreCPUResourcesWholeUnits(p *Pod) bool { isInteger := func(val int64) bool { return val%1000 == 0 @@ -79,6 +93,13 @@ func AreCPUResourcesWholeUnits(p *Pod) bool { return true } +// LoadBalancingDisabled Determines if both CPU and IRQ load balancing are disabled via annotations +// +// The function checks a pod’s annotations for "cpu-load-balancing.crio.io" +// and "irq-load-balancing.crio.io", verifying each is set to the value +// "disable". If either annotation is missing or has an invalid value, it logs a +// debug message. It returns true only when both annotations are present with +// the correct value; otherwise it returns false. func LoadBalancingDisabled(p *Pod) bool { const ( disableVar = "disable" diff --git a/pkg/provider/nodes.go b/pkg/provider/nodes.go index d5f291b60..35bded58d 100644 --- a/pkg/provider/nodes.go +++ b/pkg/provider/nodes.go @@ -29,15 +29,36 @@ import ( corev1 "k8s.io/api/core/v1" ) +// Node Encapsulates a Kubernetes node with optional machine configuration +// +// This structure holds a reference to the underlying corev1.Node object, +// providing convenient access to node metadata and status information. It +// optionally includes a MachineConfig for nodes managed by OpenShift, enabling +// retrieval of configuration details such as kernel settings or custom +// annotations. The struct’s methods offer helpers for OS detection, role +// identification, workload presence, and JSON serialization. type Node struct { Data *corev1.Node Mc MachineConfig `json:"-"` } +// Node.MarshalJSON Serializes the node's internal data to JSON +// +// The method calls the standard library’s Marshal function with a pointer to +// the node’s Data field. It produces a byte slice containing the JSON +// representation of that data and returns any error encountered during +// marshaling. func (node Node) MarshalJSON() ([]byte, error) { return json.Marshal(&node.Data) } +// Node.IsWorkerNode Determines if a node is considered a worker by inspecting its labels +// +// This method iterates over all labels attached to the node and checks each one +// against a predefined list of worker-identifying label patterns. It uses a +// helper that performs a substring match, allowing flexible recognition of +// common worker label conventions. The function returns true if any matching +// label is found; otherwise it returns false. func (node *Node) IsWorkerNode() bool { for nodeLabel := range node.Data.Labels { if stringhelper.StringInSlice(WorkerLabels, nodeLabel, true) { @@ -47,6 +68,11 @@ func (node *Node) IsWorkerNode() bool { return false } +// Node.IsControlPlaneNode Determines whether the node is a control‑plane instance +// +// The method inspects each label on the node’s data and checks if any match +// known master labels using a string containment helper. If a matching label is +// found, it returns true; otherwise it returns false. func (node *Node) IsControlPlaneNode() bool { for nodeLabel := range node.Data.Labels { if stringhelper.StringInSlice(MasterLabels, nodeLabel, true) { @@ -56,23 +82,57 @@ func (node *Node) IsControlPlaneNode() bool { return false } +// Node.IsRHCOS Determines whether a node runs Red Hat CoreOS +// +// The method examines the operating system image field of the node's status +// information, removing any surrounding whitespace before searching for the +// predefined CoreOS identifier string. If that identifier is present, it +// returns true; otherwise, it returns false. This check is used by other +// functions to confirm OS compatibility before proceeding with further +// operations. func (node *Node) IsRHCOS() bool { return strings.Contains(strings.TrimSpace(node.Data.Status.NodeInfo.OSImage), rhcosName) } +// Node.IsCSCOS Determines whether the node runs CoreOS +// +// This method inspects the operating system image string from the node’s +// status information, trims surrounding whitespace, and checks if it contains +// the CoreOS identifier. It returns true when the identifier is present, +// indicating a CoreOS or CentOS Stream CoreOS environment; otherwise it returns +// false. func (node *Node) IsCSCOS() bool { return strings.Contains(strings.TrimSpace(node.Data.Status.NodeInfo.OSImage), cscosName) } +// Node.IsRHEL checks whether the node’s OS image is a Red Hat Enterprise Linux release +// +// The method trims any surrounding whitespace from the node’s OS image string +// and then looks for the RHEL identifier within it. If the identifier is +// present, it returns true; otherwise it returns false. This boolean result is +// used by other functions to decide whether RHEL‑specific logic should be +// applied. func (node *Node) IsRHEL() bool { return strings.Contains(strings.TrimSpace(node.Data.Status.NodeInfo.OSImage), rhelName) } +// Node.IsRTKernel Indicates if the node uses a real‑time kernel +// +// This method examines the node's kernel version string, trims whitespace, and +// checks for the presence of "rt" to determine whether a real‑time kernel is +// installed. It returns true when the substring is found, otherwise false. func (node *Node) IsRTKernel() bool { // More information: https://www.redhat.com/sysadmin/real-time-kernel return strings.Contains(strings.TrimSpace(node.Data.Status.NodeInfo.KernelVersion), "rt") } +// Node.GetRHCOSVersion Retrieves the short RHCOS version string from a node's OS image +// +// The function first verifies that the node is running Red Hat Enterprise Linux +// CoreOS, returning an error if not. It then parses the OSImage field to +// extract the long version identifier and converts it into the corresponding +// short version using a helper routine. The resulting short version string is +// returned alongside any potential errors. func (node *Node) GetRHCOSVersion() (string, error) { // Check if the node is running CoreOS or not if !node.IsRHCOS() { @@ -92,6 +152,12 @@ func (node *Node) GetRHCOSVersion() (string, error) { return shortVersion, nil } +// Node.GetCSCOSVersion Retrieves the CoreOS version string from a node's OS image +// +// The function first verifies that the node is running CoreOS by checking its +// status. If not, it returns an error indicating an unsupported OS type. When +// valid, it parses the OSImage field to extract and return the CoreOS release +// identifier as a string. func (node *Node) GetCSCOSVersion() (string, error) { // Check if the node is running CoreOS or not if !node.IsCSCOS() { @@ -105,6 +171,14 @@ func (node *Node) GetCSCOSVersion() (string, error) { return longVersionSplit[0], nil } +// Node.GetRHELVersion Retrieves the major and minor RHEL version from a node +// +// The method first verifies that the node reports an OS image containing +// "RHEL"; if not, it returns an error indicating the OS type is invalid. It +// then splits the OS image string on the RHEL identifier, trims any surrounding +// whitespace, and extracts the leading numeric part of the remaining string as +// the version. The extracted version string is returned along with a nil error +// when successful. func (node *Node) GetRHELVersion() (string, error) { // Check if the node is running RHEL or not if !node.IsRHEL() { @@ -123,6 +197,13 @@ const ( isHyperThreadCommand = "chroot /host lscpu" ) +// Node.IsHyperThreadNode Determines if the node supports hyper‑threading +// +// The method runs a predefined command inside a probe pod on the node to query +// CPU core information. It parses the output for the number of threads per core +// and returns true when more than one thread is reported, indicating +// hyper‑threading support. Errors from execution or parsing are returned +// alongside the boolean result. func (node *Node) IsHyperThreadNode(env *TestEnvironment) (bool, error) { o := clientsholder.GetClientsHolder() nodeName := node.Data.Name @@ -140,6 +221,12 @@ func (node *Node) IsHyperThreadNode(env *TestEnvironment) (bool, error) { return num > 1, nil } +// Node.HasWorkloadDeployed Determines whether any of a set of pods are running on this node +// +// The method walks through each pod in the provided slice and inspects its spec +// to see if the node name matches the current node’s name. If it finds a +// match, it immediately returns true; otherwise, after checking all pods it +// returns false. func (node *Node) HasWorkloadDeployed(podsUnderTest []*Pod) bool { for _, pod := range podsUnderTest { if pod.Spec.NodeName == node.Data.Name { diff --git a/pkg/provider/operators.go b/pkg/provider/operators.go index 1a0d4ebc6..c80f67e8f 100644 --- a/pkg/provider/operators.go +++ b/pkg/provider/operators.go @@ -41,6 +41,14 @@ import ( "k8s.io/apimachinery/pkg/types" ) +// Operator represents an installed operator within a cluster +// +// This data structure holds metadata about an operator, including its name, +// namespace, deployment phase, subscription details, package information, and +// any associated install plans. It also tracks whether the operator is +// cluster‑wide or scoped to specific namespaces and stores preflight test +// results for validation. The fields provide a comprehensive view of an +// operator’s state and configuration used by the certification framework. type Operator struct { Name string `yaml:"name" json:"name"` Namespace string `yaml:"namespace" json:"namespace"` @@ -60,6 +68,13 @@ type Operator struct { OperandPods map[string]*Pod } +// CsvInstallPlan Describes an operator's install plan details +// +// This structure holds the name of the install plan along with URLs for both +// the bundle image and the index image used in the installation process. It is +// primarily utilized to convey necessary information when creating or managing +// operator deployments, ensuring that the correct images are referenced during +// installation. type CsvInstallPlan struct { // Operator's installPlan name Name string `yaml:"name" json:"name"` @@ -69,10 +84,23 @@ type CsvInstallPlan struct { IndexImage string `yaml:"indexImage" json:"indexImage"` } +// Operator.String Provides a human-readable representation of the operator +// +// This method formats key fields such as the operator name, namespace, +// subscription name, and target namespaces into a single string. It uses a +// standard formatting function to create the output and returns it for display +// or logging purposes. func (op *Operator) String() string { return fmt.Sprintf("csv: %s ns:%s subscription:%s targetNamespaces=%v", op.Name, op.Namespace, op.SubscriptionName, op.TargetNamespaces) } +// Operator.SetPreflightResults Collects and stores Preflight test outcomes for an operator +// +// The function runs the OpenShift Preflight checks against the operator's +// bundle image, capturing passed, failed, and error results. It writes all +// check logs to a buffer and attaches them to the global log output. After +// processing, it removes temporary artifacts and assigns the collected results +// to the operator’s PreflightResults field. func (op *Operator) SetPreflightResults(env *TestEnvironment) error { if len(op.InstallPlans) == 0 { log.Warn("Operator %q has no InstallPlans. Skipping setting Preflight results", op) @@ -131,8 +159,13 @@ func (op *Operator) SetPreflightResults(env *TestEnvironment) error { return nil } -// getUniqueCsvListByName returns a CSV list with unique names from a list which may contain -// more than one CSV with the same name. The output CSV list is sorted by CSV name. +// getUniqueCsvListByName filters a list to include only one instance per CSV name +// +// The function receives a slice of ClusterServiceVersion objects, removes any +// duplicates by keeping the last occurrence for each unique name, logs how many +// unique entries were found, and then returns the deduplicated slice sorted +// alphabetically by CSV name. It uses an internal map to track seen names and +// sort.Slice for deterministic ordering. func getUniqueCsvListByName(csvs []*olmv1Alpha.ClusterServiceVersion) []*olmv1Alpha.ClusterServiceVersion { uniqueCsvsMap := map[string]*olmv1Alpha.ClusterServiceVersion{} for _, csv := range csvs { @@ -151,6 +184,14 @@ func getUniqueCsvListByName(csvs []*olmv1Alpha.ClusterServiceVersion) []*olmv1Al return uniqueCsvsList } +// createOperators Creates a list of operator objects from CSV data +// +// The function iterates over unique cluster service versions, filters out +// failed ones if required, and builds an Operator struct for each. It extracts +// package and version information from the CSV name, associates at least one +// subscription to determine target namespaces, and gathers install plans linked +// to the CSV. The resulting slice contains operators enriched with phase, +// namespace, and optional CSV details. func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion, allSubscriptions []olmv1Alpha.Subscription, allPackageManifests []*olmpkgv1.PackageManifest, @@ -205,6 +246,13 @@ func createOperators(csvs []*olmv1Alpha.ClusterServiceVersion, return operators } +// getAtLeastOneSubscription Finds a subscription linked to the given CSV and updates the operator record +// +// The function scans through all subscriptions, matching one whose installed +// CSV name equals that of the provided CSV. When found, it populates the +// operator with subscription details such as name, namespace, package, catalog +// source, and channel. If the channel is missing, it retrieves the default +// channel from the related package manifest; otherwise it logs an error. func getAtLeastOneSubscription(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, subscriptions []olmv1Alpha.Subscription, packageManifests []*olmpkgv1.PackageManifest) (atLeastOneSubscription bool) { atLeastOneSubscription = false for s := range subscriptions { @@ -234,6 +282,14 @@ func getAtLeastOneSubscription(op *Operator, csv *olmv1Alpha.ClusterServiceVersi return atLeastOneSubscription } +// getPackageManifestWithSubscription Finds a matching package manifest for a subscription +// +// The function iterates over the provided package manifests, checking whether +// each one matches the subscription’s package name, catalog source namespace, +// and catalog source. If a match is found, that package manifest is returned; +// otherwise the function returns nil. This lookup assists in determining +// default channel information when it is not explicitly set in the +// subscription. func getPackageManifestWithSubscription(subscription *olmv1Alpha.Subscription, packageManifests []*olmpkgv1.PackageManifest) *olmpkgv1.PackageManifest { for index := range packageManifests { if packageManifests[index].Status.PackageName == subscription.Spec.Package && @@ -245,6 +301,13 @@ func getPackageManifestWithSubscription(subscription *olmv1Alpha.Subscription, p return nil } +// getAtLeastOneCsv Determines if an install plan includes a specific CSV +// +// The function iterates through the names listed in the install plan’s +// specification to see if it matches the provided CSV. If a match is found, it +// verifies that the install plan contains bundle lookup information; otherwise +// it logs a warning and skips that plan. It returns true when a matching CSV +// with valid bundle lookups exists, false otherwise. func getAtLeastOneCsv(csv *olmv1Alpha.ClusterServiceVersion, installPlan *olmv1Alpha.InstallPlan) (atLeastOneCsv bool) { atLeastOneCsv = false for _, csvName := range installPlan.Spec.ClusterServiceVersionNames { @@ -262,6 +325,14 @@ func getAtLeastOneCsv(csv *olmv1Alpha.ClusterServiceVersion, installPlan *olmv1A return atLeastOneCsv } +// getAtLeastOneInstallPlan retrieves at least one install plan for an operator +// +// This function iterates through all available install plans, filtering by +// namespace and ensuring the plan includes the specified CSV. For each +// qualifying plan it extracts bundle and index image information from catalog +// sources. The install plan details are appended to the operator’s +// InstallPlans slice and a true flag is returned when at least one plan has +// been added. func getAtLeastOneInstallPlan(op *Operator, csv *olmv1Alpha.ClusterServiceVersion, allInstallPlans []*olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource) (atLeastOneInstallPlan bool) { atLeastOneInstallPlan = false for _, installPlan := range allInstallPlans { @@ -291,6 +362,12 @@ func getAtLeastOneInstallPlan(op *Operator, csv *olmv1Alpha.ClusterServiceVersio return atLeastOneInstallPlan } +// CsvToString Formats a CSV name and namespace into a readable string +// +// The function receives a pointer to a ClusterServiceVersion object and returns +// a string that includes the object's name followed by its namespace. It uses +// formatting to produce a concise representation suitable for logging or +// debugging purposes. func CsvToString(csv *olmv1Alpha.ClusterServiceVersion) string { return fmt.Sprintf("operator csv: %s ns: %s", csv.Name, @@ -298,6 +375,12 @@ func CsvToString(csv *olmv1Alpha.ClusterServiceVersion) string { ) } +// getSummaryAllOperators Creates a sorted list of unique operator status strings +// +// This function iterates over a slice of operators, building a key that +// includes the phase, package name, version and namespace information. It +// stores each distinct key in a map to avoid duplicates, then collects the keys +// into a slice, sorts them alphabetically, and returns the result. func getSummaryAllOperators(operators []*Operator) (summary []string) { operatorMap := map[string]bool{} for _, o := range operators { @@ -317,6 +400,12 @@ func getSummaryAllOperators(operators []*Operator) (summary []string) { return summary } +// getCatalogSourceImageIndexFromInstallPlan retrieves the image index of a catalog source referenced by an install plan +// +// The function takes an install plan and a list of catalog sources, finds the +// catalog source referenced in the first bundle lookup, and returns its image +// field. If no matching catalog source is found it reports an error. The +// returned string is used elsewhere to identify the index image for a CSV. func getCatalogSourceImageIndexFromInstallPlan(installPlan *olmv1Alpha.InstallPlan, allCatalogSources []*olmv1Alpha.CatalogSource) (string, error) { // ToDo/Technical debt: what to do if installPlan has more than one BundleLookups entries. catalogSourceName := installPlan.Status.BundleLookups[0].CatalogSourceRef.Name @@ -331,6 +420,12 @@ func getCatalogSourceImageIndexFromInstallPlan(installPlan *olmv1Alpha.InstallPl return "", fmt.Errorf("failed to get catalogsource: not found") } +// getOperatorTargetNamespaces Retrieves the list of namespaces an operator targets +// +// The function queries the Operator Group resource within a specified namespace +// to determine which namespaces the operator is allowed to operate in. It +// returns a slice of target namespace names and an error if no OperatorGroup +// exists or if the API call fails. func getOperatorTargetNamespaces(namespace string) ([]string, error) { client := clientsholder.GetClientsHolder() @@ -347,6 +442,13 @@ func getOperatorTargetNamespaces(namespace string) ([]string, error) { return list.Items[0].Spec.TargetNamespaces, nil } +// GetAllOperatorGroups Retrieves all OperatorGroup resources from the cluster +// +// This function queries the OpenShift Operator Lifecycle Manager for +// OperatorGroup objects across all namespaces. It returns a slice of pointers +// to each group found, or nil if none exist, while logging warnings when the +// API resource is missing or empty. Errors unrelated to a missing resource are +// propagated back to the caller. func GetAllOperatorGroups() ([]*olmv1.OperatorGroup, error) { client := clientsholder.GetClientsHolder() @@ -374,6 +476,11 @@ func GetAllOperatorGroups() ([]*olmv1.OperatorGroup, error) { return operatorGroups, nil } +// searchPodInSlice Finds a pod in a list by name and namespace +// +// The function receives a pod name, its namespace, and a slice of pod objects. +// It builds an index map keyed on the namespaced name and looks up the +// requested key, returning the matching pod if found or nil otherwise. func searchPodInSlice(name, namespace string, pods []*Pod) *Pod { // Helper map to filter pods that have been already added podsMap := map[types.NamespacedName]*Pod{} @@ -390,6 +497,13 @@ func searchPodInSlice(name, namespace string, pods []*Pod) *Pod { return nil } +// addOperatorPodsToTestPods Adds operator pods to the test pod list +// +// This function iterates over a slice of operator pods, checking each one +// against the current environment's pod collection. If an operator pod is +// already present, it marks that existing pod as an operator; otherwise, it +// appends the new pod to the test list. Logging statements provide visibility +// into whether pods were added or already discovered. func addOperatorPodsToTestPods(operatorPods []*Pod, env *TestEnvironment) { for _, operatorPod := range operatorPods { // Check whether the pod was already discovered @@ -406,6 +520,14 @@ func addOperatorPodsToTestPods(operatorPods []*Pod, env *TestEnvironment) { } } +// addOperandPodsToTestPods Adds discovered operand pods to the test environment +// +// This routine iterates over a list of operand pods, checking each against the +// current set of test pods in the environment. If a pod is already present, it +// logs that fact and marks the existing entry as an operand; otherwise it +// appends the new pod to the environment's pod list. The function ensures no +// duplicate entries while guaranteeing all operand pods are available for +// subsequent tests. func addOperandPodsToTestPods(operandPods []*Pod, env *TestEnvironment) { for _, operandPod := range operandPods { // Check whether the pod was already discovered diff --git a/pkg/provider/pods.go b/pkg/provider/pods.go index 1c28ae63a..7f0f8c5e0 100644 --- a/pkg/provider/pods.go +++ b/pkg/provider/pods.go @@ -42,6 +42,15 @@ const ( IstioProxyContainerName = "istio-proxy" ) +// Pod Represents a Kubernetes pod with extended metadata and helper methods +// +// This structure embeds the corev1.Pod type and adds fields that track +// additional information such as service account mappings, container lists, +// network interface data, PCI device references, and flags indicating whether +// the pod is an operator or operand. It also provides boolean indicators for +// skipping certain tests. The struct’s methods offer utilities for examining +// resource guarantees, CPU isolation compliance, affinity requirements, +// SR‑IOV usage, and other security and configuration checks. type Pod struct { *corev1.Pod AllServiceAccountsMap *map[string]*corev1.ServiceAccount @@ -54,6 +63,15 @@ type Pod struct { IsOperand bool } +// NewPod Creates a Pod wrapper with network and container details +// +// The function takes a Kubernetes pod object, extracts its annotations to +// determine Multus network interfaces and PCI addresses, logs missing or empty +// annotations, and handles errors gracefully. It also inspects labels to decide +// whether to skip connectivity tests and populates the list of containers from +// the pod specification. The resulting Pod structure includes the original pod +// pointer, network interface maps, PCI information, container slice, and flags +// controlling test behavior. func NewPod(aPod *corev1.Pod) (out Pod) { var err error out.Pod = aPod @@ -89,6 +107,14 @@ func NewPod(aPod *corev1.Pod) (out Pod) { return out } +// ConvertArrayPods Transforms a slice of core Kubernetes pods into provider-specific pod wrappers +// +// The function iterates over each input pod, creates a new wrapper object with +// the helper constructor, and collects pointers to these wrappers in a result +// slice. Each wrapper contains additional fields such as network interfaces, +// PCI devices, and test skip flags based on pod annotations and labels. The +// returned slice provides an enriched representation suitable for downstream +// connectivity testing. func ConvertArrayPods(pods []*corev1.Pod) (out []*Pod) { for i := range pods { aPodWrapper := NewPod(pods[i]) @@ -97,14 +123,30 @@ func ConvertArrayPods(pods []*corev1.Pod) (out []*Pod) { return out } +// Pod.IsPodGuaranteed Determines if the pod meets guaranteed resource conditions +// +// The method checks whether every container in the pod has defined CPU and +// memory limits that match their requests, indicating a guaranteed QoS class. +// It delegates this logic to AreResourcesIdentical, which verifies consistency +// across all containers. The result is returned as a boolean. func (p *Pod) IsPodGuaranteed() bool { return AreResourcesIdentical(p) } +// Pod.IsPodGuaranteedWithExclusiveCPUs Determines if a pod’s CPU requests and limits are whole units and match exactly +// +// It checks that each container in the pod specifies CPU resources as whole and +// that the request equals the limit for both CPU and memory. If all containers +// satisfy these conditions, it returns true; otherwise false. func (p *Pod) IsPodGuaranteedWithExclusiveCPUs() bool { return AreCPUResourcesWholeUnits(p) && AreResourcesIdentical(p) } +// Pod.IsCPUIsolationCompliant Determines whether a pod meets CPU isolation requirements +// +// The method checks that the pod has annotations disabling both CPU and IRQ +// load balancing, and verifies a runtime class name is set. If either condition +// fails it logs a debug message and returns false; otherwise true. func (p *Pod) IsCPUIsolationCompliant() bool { isCPUIsolated := true @@ -121,6 +163,12 @@ func (p *Pod) IsCPUIsolationCompliant() bool { return isCPUIsolated } +// Pod.String Formats pod name and namespace into a readable string +// +// This method constructs a human‑readable representation of a Pod by +// combining its name and namespace. It uses formatting to produce the pattern +// "pod: ns: ", which is helpful for logging or debugging +// output throughout the provider package. func (p *Pod) String() string { return fmt.Sprintf("pod: %s ns: %s", p.Name, @@ -128,6 +176,12 @@ func (p *Pod) String() string { ) } +// Pod.AffinityRequired Determines if a pod requires affinity based on its labels +// +// The method looks for the key that indicates whether affinity is required in +// the pod's label set. If present, it attempts to interpret the value as a +// boolean string; on parsing failure it logs a warning and returns false. When +// the key is absent or parsing succeeds, it returns the parsed boolean result. func (p *Pod) AffinityRequired() bool { if val, ok := p.Labels[AffinityRequiredKey]; ok { result, err := strconv.ParseBool(val) @@ -140,7 +194,12 @@ func (p *Pod) AffinityRequired() bool { return false } -// returns true if at least one container in the pod has a resource name containing "hugepage", return false otherwise +// Pod.HasHugepages determines if any container requests or limits hugepage resources +// +// The method scans each container’s resource requests and limits for a name +// containing the substring "hugepage". If such a resource is found, it +// immediately returns true; otherwise, after all containers are checked, it +// returns false. func (p *Pod) HasHugepages() bool { for _, cut := range p.Containers { for name := range cut.Resources.Requests { @@ -157,6 +216,12 @@ func (p *Pod) HasHugepages() bool { return false } +// Pod.CheckResourceHugePagesSize Verifies that all huge page resources match the specified size +// +// The method iterates over each container in a pod, checking both requested and +// limited resources for any huge page entries. If a huge page resource is found +// but its name differs from the supplied size, the function returns false +// immediately. When no mismatches are detected, it returns true. func (p *Pod) CheckResourceHugePagesSize(size string) bool { for _, cut := range p.Containers { // Resources must be specified @@ -177,6 +242,13 @@ func (p *Pod) CheckResourceHugePagesSize(size string) bool { return true } +// Pod.IsAffinityCompliant checks whether a pod has required affinity rules +// +// The method examines the pod's specification to determine if it contains any +// affinity configuration. If no affinity is present, or if anti‑affinity +// rules exist, or if neither pod nor node affinity are defined, it returns +// false along with an explanatory error. Otherwise it reports success by +// returning true and a nil error. func (p *Pod) IsAffinityCompliant() (bool, error) { if p.Spec.Affinity == nil { return false, fmt.Errorf("%s has been found with an AffinityRequired flag but is missing corresponding affinity rules", p.String()) @@ -190,10 +262,21 @@ func (p *Pod) IsAffinityCompliant() (bool, error) { return true, nil } +// Pod.IsShareProcessNamespace determines if a pod shares its process namespace +// +// The method checks the pod specification for the ShareProcessNamespace field. +// If the field exists and is set to true, it returns true; otherwise it returns +// false. func (p *Pod) IsShareProcessNamespace() bool { return p.Spec.ShareProcessNamespace != nil && *p.Spec.ShareProcessNamespace } +// Pod.ContainsIstioProxy Detects the presence of an Istio side‑car container in a pod +// +// The method scans each container defined in the pod, comparing its name +// against the predefined Istio proxy container identifier. If it finds a match, +// it immediately returns true; otherwise, after examining all containers, it +// returns false. func (p *Pod) ContainsIstioProxy() bool { for _, container := range p.Containers { if container.Name == IstioProxyContainerName { @@ -203,6 +286,14 @@ func (p *Pod) ContainsIstioProxy() bool { return false } +// Pod.CreatedByDeploymentConfig Determines if a pod originates from an OpenShift DeploymentConfig +// +// This method examines each owner reference of the pod, looking for a +// ReplicationController that itself references a DeploymentConfig. It retrieves +// replication controller objects via the Kubernetes client and checks their +// owners to find a matching deployment config name. The function returns true +// if such a relationship exists, otherwise false, along with any error +// encountered during API calls. func (p *Pod) CreatedByDeploymentConfig() (bool, error) { oc := clientsholder.GetClientsHolder() for _, podOwner := range p.GetOwnerReferences() { @@ -221,31 +312,36 @@ func (p *Pod) CreatedByDeploymentConfig() (bool, error) { return false, nil } +// Pod.HasNodeSelector Indicates if the pod specifies a node selector +// +// The method examines the pod's specification for a non‑empty nodeSelector +// map. It returns true when at least one key/value pair is present, meaning the +// pod has constraints on which nodes it can run. If the map is empty or nil, +// the function returns false. func (p *Pod) HasNodeSelector() bool { // Checks whether or not the pod has a nodeSelector or a NodeName supplied return len(p.Spec.NodeSelector) != 0 } +// Pod.IsRuntimeClassNameSpecified checks whether a pod has a runtime class specified +// +// The method returns true when the pod’s specification includes a +// runtimeClassName field, indicating that a runtime class has been assigned. If +// the field is nil, it returns false, implying no runtime class is set for the +// pod. func (p *Pod) IsRuntimeClassNameSpecified() bool { return p.Spec.RuntimeClassName != nil } -// Helper function to parse CNCF's networks annotation, retrieving -// the names only. It's a custom and simplified version of: -// https://github.com/k8snetworkplumbingwg/multus-cni/blob/e692127d19623c8bdfc4d391224ea542658b584c/pkg/k8sclient/k8sclient.go#L185 -// -// The cncf netwoks annotation has two different formats: +// getCNCFNetworksNamesFromPodAnnotation Extracts network names from a pod's CNCF annotation // -// a) list of network names: k8s.v1.cni.cncf.io/networks: [,,...] -// b) json array of network objects: -// k8s.v1.cni.cncf.io/networks: |- -// [ -// { -// "name": "", -// "namespace": "", -// "default-route": [""] -// } -// ] +// The function receives the raw value of the k8s.v1.cni.cncf.io/networks +// annotation, which can be either a comma‑separated list or a JSON array of +// objects. It attempts to unmarshal the JSON; if that succeeds it collects the +// "name" field from each object. If unmarshalling fails, it falls back to +// splitting the string on commas and trimming spaces, returning all non‑empty +// names. The result is a slice of strings containing only the network +// identifiers. func getCNCFNetworksNamesFromPodAnnotation(networksAnnotation string) []string { // Each CNCF network has many more fields, but here we only need to unmarshal the name. // See https://github.com/k8snetworkplumbingwg/multus-cni/blob/e692127d19623c8bdfc4d391224ea542658b584c/pkg/types/types.go#L127 @@ -299,6 +395,14 @@ func getCNCFNetworksNamesFromPodAnnotation(networksAnnotation string) []string { ] } */ + +// isNetworkAttachmentDefinitionSRIOVConfigMTUSet determines whether a SR-IOV plugin specifies an MTU +// +// The function parses the JSON network attachment definition string into a CNI +// configuration structure, verifies that it contains multiple plugins, and then +// iterates over those plugins to find one of type "sriov" with a positive MTU +// value. If such a plugin is found, it returns true; otherwise false. Errors +// are returned for malformed JSON or missing plugin list. func isNetworkAttachmentDefinitionSRIOVConfigMTUSet(nadConfig string) (bool, error) { const ( typeSriov = "sriov" @@ -335,34 +439,13 @@ func isNetworkAttachmentDefinitionSRIOVConfigMTUSet(nadConfig string) (bool, err return false, nil } -// isNetworkAttachmentDefinitionConfigTypeSRIOV is a helper function to check whether a CNI -// config string has any config for sriov plugin. -// CNI config has two modes: single CNI plugin, or multi-plugins: -// Single CNI plugin config sample: +// isNetworkAttachmentDefinitionConfigTypeSRIOV checks if a CNI configuration string contains an SR-IOV plugin // -// { -// "cniVersion": "0.4.0", -// "name": "sriov-network", -// "type": "sriov", -// ... -// } -// -// Multi-plugin CNI config sample: -// -// { -// "cniVersion": "0.4.0", -// "name": "sriov-network", -// "plugins": [ -// { -// "type": "sriov", -// "device": "eth1", -// ... -// }, -// { -// "type": "firewall" -// ... -// } -// ] +// The function parses the JSON-formatted CNI config, handling both +// single-plugin and multi-plugin layouts. It looks for a "type" field or +// iterates through the plugins array to find an entry with type "sriov", +// returning true if found. Errors are produced for malformed JSON or unexpected +// structures. func isNetworkAttachmentDefinitionConfigTypeSRIOV(nadConfig string) (bool, error) { const ( typeSriov = "sriov" @@ -404,9 +487,13 @@ func isNetworkAttachmentDefinitionConfigTypeSRIOV(nadConfig string) (bool, error return false, nil } -// IsUsingSRIOV returns true if any of the pod's interfaces is a sriov one. -// First, it retrieves the list of networks names from the CNFC annotation and then -// checks the config of the corresponding network-attachment definition (NAD). +// Pod.IsUsingSRIOV determines whether a pod has any SR‑IOV network interfaces +// +// The method inspects the pod’s annotations for CNCF network names, retrieves +// each corresponding NetworkAttachmentDefinition, and checks if its CNI +// configuration type is "sriov". If at least one definition matches, it returns +// true; otherwise false. Errors from annotation parsing or API calls are +// propagated to the caller. func (p *Pod) IsUsingSRIOV() (bool, error) { const ( cncfNetworksAnnotation = "k8s.v1.cni.cncf.io/networks" @@ -445,7 +532,13 @@ func (p *Pod) IsUsingSRIOV() (bool, error) { return false, nil } -// IsUsingSRIOVWithMTU returns true if any of the pod's interfaces is a sriov one with MTU set. +// Pod.IsUsingSRIOVWithMTU determines if the pod has any SR-IOV interface configured with an MTU +// +// The method inspects the pod's annotations to find declared CNCF networks, +// then retrieves each corresponding NetworkAttachmentDefinition. For every +// network it checks whether a SriovNetwork and matching SriovNetworkNodePolicy +// exist that specify an MTU value; if so it returns true. If no such +// configuration is found, it returns false without error. func (p *Pod) IsUsingSRIOVWithMTU() (bool, error) { const ( cncfNetworksAnnotation = "k8s.v1.cni.cncf.io/networks" @@ -483,6 +576,13 @@ func (p *Pod) IsUsingSRIOVWithMTU() (bool, error) { return false, nil } +// sriovNetworkUsesMTU Checks whether a SriovNetwork has an MTU configured +// +// The function iterates through all provided SriovNetworks and matches one by +// name to the given NetworkAttachmentDefinition. For each match it looks for a +// SriovNetworkNodePolicy in the same namespace that shares the same +// resourceName, then examines its spec for an MTU value greater than zero. If +// such a policy is found, true is returned; otherwise false. func sriovNetworkUsesMTU(sriovNetworks, sriovNetworkNodePolicies []unstructured.Unstructured, nadName string) bool { for _, sriovNetwork := range sriovNetworks { networkName := sriovNetwork.GetName() @@ -535,6 +635,14 @@ func sriovNetworkUsesMTU(sriovNetworks, sriovNetworkNodePolicies []unstructured. return false } +// Pod.IsUsingClusterRoleBinding Checks if a pod’s service account is linked to any cluster role binding +// +// The function receives a list of cluster role bindings and logs the pod being +// examined. It iterates through each binding, comparing the pod’s service +// account name and namespace with the subjects in the binding. If a match is +// found, it reports true along with the role reference name; otherwise it +// returns false. +// //nolint:gocritic func (p *Pod) IsUsingClusterRoleBinding(clusterRoleBindings []rbacv1.ClusterRoleBinding, logger *log.Logger) (bool, string, error) { @@ -558,6 +666,12 @@ func (p *Pod) IsUsingClusterRoleBinding(clusterRoleBindings []rbacv1.ClusterRole return false, "", nil } +// Pod.IsRunAsUserID Checks if the pod runs as a specific user ID +// +// The method inspects the pod's security context, returning false if it is nil +// or if no RunAsUser value is set. If a run-as-user value exists, it compares +// that value to the supplied uid and returns true when they match. This allows +// callers to verify whether the pod will execute with the given user identity. func (p *Pod) IsRunAsUserID(uid int64) bool { if p.Spec.SecurityContext == nil || p.Spec.SecurityContext.RunAsUser == nil { return false @@ -565,10 +679,14 @@ func (p *Pod) IsRunAsUserID(uid int64) bool { return *p.Spec.SecurityContext.RunAsUser == uid } -// Returns the list of containers that have the securityContext.runAsNonRoot set to false and securityContext.runAsUser set to zero. -// Both parameteters are checked first at the pod level and acts as a default value -// for the container configuration, if it is not present. -// See: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +// Pod.GetRunAsNonRootFalseContainers identifies containers violating non-root security policies +// +// This method examines each container in a pod to determine if it inherits or +// sets runAsNonRoot to false or runs as user ID zero, indicating a root +// context. It skips any containers listed in the provided map and aggregates +// those that fail the checks along with explanatory reasons. The function +// returns two slices: one of non-compliant containers and another containing +// the corresponding justification strings. func (p *Pod) GetRunAsNonRootFalseContainers(knownContainersToSkip map[string]bool) (nonCompliantContainers []*Container, nonComplianceReasons []string) { // Check pod-level security context this will be set by default for containers // If not already configured at the container level @@ -603,15 +721,23 @@ func (p *Pod) GetRunAsNonRootFalseContainers(knownContainersToSkip map[string]bo return nonCompliantContainers, nonComplianceReasons } -// Get the list of top owners of pods +// Pod.GetTopOwner Retrieves the top-level owners of a pod +// +// The method returns a map keyed by owner kind, containing information about +// each top-level resource that owns the pod. It calls an internal helper to +// resolve all owner references, following chains up to the root. The result is +// returned along with any error encountered during resolution. func (p *Pod) GetTopOwner() (topOwners map[string]podhelper.TopOwner, err error) { return podhelper.GetPodTopOwner(p.Namespace, p.OwnerReferences) } -// AutomountServiceAccountSetOnSA checks if the AutomountServiceAccountToken field is set on the pod's ServiceAccount. -// Returns: -// - A boolean pointer indicating whether the AutomountServiceAccountToken field is set. -// - An error if any occurred during the operation. +// Pod.IsAutomountServiceAccountSetOnSA Determines if a pod’s service account has automount enabled +// +// The method inspects the pod’s associated service account to see whether its +// AutomountServiceAccountToken field is set. It first validates that the +// service account map exists and contains an entry for the pod’s namespace +// and name, returning errors otherwise. If found, it returns a pointer to the +// boolean value indicating automount status along with nil error. func (p *Pod) IsAutomountServiceAccountSetOnSA() (isSet *bool, err error) { if p.AllServiceAccountsMap == nil { return isSet, fmt.Errorf("AllServiceAccountsMap is not initialized for pod with ns: %s and name %s", p.Namespace, p.Name) diff --git a/pkg/provider/provider.go b/pkg/provider/provider.go index 6195577cc..5407a4661 100644 --- a/pkg/provider/provider.go +++ b/pkg/provider/provider.go @@ -72,6 +72,14 @@ var ( MasterLabels = []string{"node-role.kubernetes.io/master", "node-role.kubernetes.io/control-plane"} ) +// TestEnvironment Provides runtime information for test execution +// +// This struct holds configuration, cluster state, and collected resources +// needed during tests. It tracks pods, nodes, operators, catalogs, and various +// Kubernetes objects while exposing helper methods to filter them by +// characteristics such as CPU isolation or affinity requirements. The data is +// populated from the test harness and can be refreshed when the underlying +// environment changes. type TestEnvironment struct { // rename this with testTarget Namespaces []string `json:"testNamespaces"` AbnormalEvents []*Event @@ -147,6 +155,13 @@ type TestEnvironment struct { // rename this with testTarget SkipPreflight bool } +// MachineConfig Encapsulates a machine configuration including systemd unit definitions +// +// The structure embeds the core machine configuration type from the Kubernetes +// API, adding a Config field that contains systemd unit information. It holds +// an array of unit descriptors, each specifying a name and contents for a +// systemd service file. This representation is used to unmarshal the raw JSON +// of a MachineConfig resource into usable Go objects. type MachineConfig struct { *mcv1.MachineConfig Config struct { @@ -159,6 +174,12 @@ type MachineConfig struct { } `json:"config"` } +// CniNetworkInterface Represents a network interface configured by CNI +// +// This struct holds details about a pod’s network attachment, including the +// interface name, assigned IP addresses, whether it is the default route, DNS +// settings, and additional device metadata. The fields are populated from the +// Kubernetes annotation that lists all attached networks for a pod. type CniNetworkInterface struct { Name string `json:"name"` Interface string `json:"interface"` @@ -168,20 +189,51 @@ type CniNetworkInterface struct { DeviceInfo deviceInfo `json:"device-info"` } +// ScaleObject Represents a Kubernetes custom resource scaling configuration +// +// This struct holds the desired scale for a custom resource along with its +// group and resource identifiers. The Scale field contains the target number of +// replicas, while GroupResourceSchema specifies which API group and kind it +// applies to. It is used by provider functions to adjust or query resource +// scaling settings. type ScaleObject struct { Scale CrScale GroupResourceSchema schema.GroupResource } +// deviceInfo Holds low-level device details +// +// This struct stores information about a device, including its type and version +// strings as well as a PCI configuration structure. The PCI field contains the +// specific bus, device, and function identifiers that enable precise hardware +// identification. Together, these fields provide a compact representation of +// the device’s identity for use in diagnostics or policy enforcement. type deviceInfo struct { Type string `json:"type"` Version string `json:"version"` PCI pci `json:"pci"` } +// pci Represents a PCI device address +// +// This type holds the string representation of a PCI bus, device, and function +// identifier used by the provider to locate hardware resources. The single +// field contains the address formatted as "domain:bus:device.function" or a +// simplified form compatible with the system's PCI enumeration. It is utilized +// internally when mapping certificates or configurations to specific hardware +// components. type pci struct { PciAddress string `json:"pci-address"` } + +// PreflightTest Represents the outcome of a pre‑flight check +// +// This structure holds information about a single test performed before +// deployment, including its name, a description of what it verifies, an +// optional error if the test failed, and suggested remediation steps. When the +// Error field is nil, the test succeeded; otherwise the value explains why it +// did not pass. The struct can be used to report results in logs or user +// interfaces. type PreflightTest struct { Name string Description string @@ -189,6 +241,12 @@ type PreflightTest struct { Error error } +// PreflightResultsDB Stores the outcomes of preflight checks for a container image +// +// This structure holds lists of tests that passed, failed, or encountered +// errors during a preflight run. Each entry contains the test name, +// description, remediation guidance, and any error message if applicable. The +// data is used to report results back to callers and can be cached for reuse. type PreflightResultsDB struct { Passed []PreflightTest Failed []PreflightTest @@ -200,6 +258,14 @@ var ( loaded = false ) +// deployDaemonSet Deploys the privileged probe daemonset +// +// This function first configures a Kubernetes client for privileged daemonset +// operations and checks whether the target daemonset is already running with +// the correct image. If it is not ready, it creates the daemonset using the +// specified image and resource limits from configuration parameters. After +// creation, it waits until all pods of the daemonset are ready or times out, +// returning an error if any step fails. func deployDaemonSet(namespace string) error { k8sPrivilegedDs.SetDaemonSetClient(clientsholder.GetClientsHolder().K8sClient) @@ -229,6 +295,15 @@ func deployDaemonSet(namespace string) error { return nil } +// buildTestEnvironment initializes the test environment state +// +// The function starts by resetting the global environment structure and loading +// configuration parameters from a file. It then attempts to deploy a probe +// daemonset; if that fails it records the failure but continues with limited +// tests. Next, it performs autodiscovery of cluster resources such as +// operators, pods, services, CRDs, and more, populating many fields in the +// environment struct. Throughout the process, it logs progress, handles errors +// by terminating on critical failures, and measures the total time taken. func buildTestEnvironment() { //nolint:funlen,gocyclo start := time.Now() env = TestEnvironment{} @@ -411,6 +486,13 @@ func buildTestEnvironment() { //nolint:funlen,gocyclo log.Info("Completed the test environment build process in %.2f seconds", time.Since(start).Seconds()) } +// updateCrUnderTest Transforms raw scale objects into internal representation +// +// The function receives a slice of autodiscover.ScaleObject items, converts +// each entry into the provider's ScaleObject type by copying its scaling +// information and resource schema, and accumulates them in a new slice. It +// returns this populated slice for use elsewhere in the test environment +// construction. func updateCrUnderTest(scaleCrUnderTest []autodiscover.ScaleObject) []ScaleObject { var scaleCrUndeTestTemp []ScaleObject for i := range scaleCrUnderTest { @@ -421,6 +503,14 @@ func updateCrUnderTest(scaleCrUnderTest []autodiscover.ScaleObject) []ScaleObjec return scaleCrUndeTestTemp } +// getPodContainers Collects relevant container information from a pod while optionally filtering ignored containers +// +// The function iterates over the pod’s declared containers, matching each +// with its status to extract runtime details and image identifiers. It logs +// warnings for containers that are not ready or not running, providing reasons +// and restart counts. If the caller enables ignore mode, containers whose names +// match predefined patterns are skipped; otherwise they are added to the +// returned slice. func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Container) { for j := 0; j < len(aPod.Spec.Containers); j++ { cut := &(aPod.Spec.Containers[j]) @@ -469,6 +559,13 @@ func getPodContainers(aPod *corev1.Pod, useIgnoreList bool) (containerList []*Co return containerList } +// isSkipHelmChart determines whether a Helm chart should be excluded from processing +// +// The function receives the name of a Helm release and a list of names to skip. +// It checks if the list is empty, returning false immediately. Otherwise it +// iterates through each entry; if a match is found it logs that the chart was +// skipped and returns true. If no match is found after the loop, it returns +// false. func isSkipHelmChart(helmName string, skipHelmChartList []configuration.SkipHelmChartList) bool { if len(skipHelmChartList) == 0 { return false @@ -482,6 +579,12 @@ func isSkipHelmChart(helmName string, skipHelmChartList []configuration.SkipHelm return false } +// GetTestEnvironment Retrieves the test environment configuration +// +// This function returns a TestEnvironment instance used throughout the suite. +// It lazily builds the environment on first call by invoking +// buildTestEnvironment and caches it for future invocations. Subsequent calls +// simply return the cached environment without re‑initialising resources. func GetTestEnvironment() TestEnvironment { if !loaded { buildTestEnvironment() @@ -490,10 +593,23 @@ func GetTestEnvironment() TestEnvironment { return env } +// IsOCPCluster Determines if the current cluster is an OpenShift installation +// +// The function checks whether the test environment’s OpenshiftVersion field +// differs from a predefined constant that represents non‑OpenShift clusters. +// It returns true when the cluster is recognized as OpenShift, and false +// otherwise. func IsOCPCluster() bool { return env.OpenshiftVersion != autodiscover.NonOpenshiftClusterVersion } +// buildContainerImageSource Extracts registry, repository, tag, and digest information from image strings +// +// The function parses a container image URL to obtain the registry, repository, +// and optional tag using a regular expression. It then extracts the image +// digest from an image ID string with another regex. The parsed values are +// assembled into a ContainerImageIdentifier structure and returned for use +// elsewhere in the program. func buildContainerImageSource(urlImage, urlImageID string) (source ContainerImageIdentifier) { const regexImageWithTag = `^([^/]*)/*([^@]*):(.*)` const regexImageDigest = `^([^/]*)/(.*)@(.*:.*)` @@ -530,6 +646,12 @@ func buildContainerImageSource(urlImage, urlImageID string) (source ContainerIma return source } +// GetRuntimeUID Extracts runtime type and unique identifier from a container status +// +// The function splits the ContainerID string at "://" to separate the runtime +// prefix from the unique ID. If a split occurs, it assigns the first part as +// the runtime name and the last part as the UID. It returns these two values +// for use in higher‑level logic. func GetRuntimeUID(cs *corev1.ContainerStatus) (runtime, uid string) { split := strings.Split(cs.ContainerID, "://") if len(split) > 0 { @@ -539,9 +661,15 @@ func GetRuntimeUID(cs *corev1.ContainerStatus) (runtime, uid string) { return runtime, uid } -// GetPodIPsPerNet gets the IPs of a pod. -// CNI annotation "k8s.v1.cni.cncf.io/networks-status". -// Returns (ips, error). +// GetPodIPsPerNet Retrieves pod IP addresses from a CNI annotation +// +// This function takes the JSON string stored in the +// "k8s.v1.cni.cncf.io/networks-status" annotation and parses it into a slice of +// network interface structures. It then builds a map keyed by each +// non‑default network name, associating each key with its corresponding +// interface information that includes IP addresses. If the annotation is empty +// or missing, an empty map is returned without error; if parsing fails, an +// error is reported. func GetPodIPsPerNet(annotation string) (ips map[string]CniNetworkInterface, err error) { // This is a map indexed with the network name (network attachment) and // listing all the IPs created in this subnet and belonging to the pod namespace @@ -568,6 +696,13 @@ func GetPodIPsPerNet(annotation string) (ips map[string]CniNetworkInterface, err return ips, nil } +// GetPciPerPod Retrieves PCI addresses associated with a pod's network interfaces +// +// The function accepts the CNI networks status annotation string, checks for +// emptiness, and parses it as JSON into a slice of network interface objects. +// It iterates over each interface, extracting any non-empty PCI address from +// the device information and appends it to the result slice. If parsing fails, +// an error is returned; otherwise the collected PCI addresses are returned. func GetPciPerPod(annotation string) (pciAddr []string, err error) { // Sanity check: if the annotation is missing or empty, return empty result without error if strings.TrimSpace(annotation) == "" { @@ -587,26 +722,63 @@ func GetPciPerPod(annotation string) (pciAddr []string, err error) { return pciAddr, nil } +// TestEnvironment.SetNeedsRefresh Marks the test environment as needing a reload +// +// When invoked, this method clears the internal flag that tracks whether the +// environment has been initialized or loaded. It ensures subsequent operations +// will reinitialize necessary resources before use. The function does not +// return any value and performs no additional side effects. func (env *TestEnvironment) SetNeedsRefresh() { loaded = false } +// TestEnvironment.IsIntrusive Indicates if the test environment is running in intrusive mode +// +// The method checks a configuration flag stored in the environment's parameters +// and returns true when intrusive testing is enabled, otherwise false. It +// performs no other side effects or computations. func (env *TestEnvironment) IsIntrusive() bool { return env.params.Intrusive } +// TestEnvironment.IsPreflightInsecureAllowed Indicates whether insecure Preflight connections are permitted +// +// This method returns the value of the AllowPreflightInsecure flag stored in +// the TestEnvironment parameters. It is used to decide if insecure network +// connections should be allowed when executing Preflight checks for containers +// or operators. func (env *TestEnvironment) IsPreflightInsecureAllowed() bool { return env.params.AllowPreflightInsecure } +// TestEnvironment.GetDockerConfigFile Retrieves the path to the Docker configuration file +// +// This method accesses the TestEnvironment's parameters to return the location +// of the Docker config used by Preflight checks. It returns a string +// representing the file path, which is then supplied to container and operator +// preflight options for authentication. The function performs no additional +// logic beyond fetching the stored value. func (env *TestEnvironment) GetDockerConfigFile() string { return env.params.PfltDockerconfig } +// TestEnvironment.GetOfflineDBPath Retrieves the configured file system path for an offline database +// +// This method accesses the TestEnvironment's internal parameters to obtain the +// location of the offline database. It returns a string representing that +// filesystem path, which can be used by other components to locate or access +// the database file. No arguments are required and the value is read directly +// from the environment configuration. func (env *TestEnvironment) GetOfflineDBPath() string { return env.params.OfflineDB } +// TestEnvironment.GetWorkerCount Returns the number of worker nodes in the environment +// +// This method iterates over all nodes stored in the TestEnvironment, checking +// each one to determine if it is marked as a worker node. It counts how many +// nodes satisfy this condition and returns that integer count. The result +// reflects the current composition of worker nodes within the test setup. func (env *TestEnvironment) GetWorkerCount() int { workerCount := 0 for _, e := range env.Nodes { @@ -617,6 +789,11 @@ func (env *TestEnvironment) GetWorkerCount() int { return workerCount } +// TestEnvironment.GetMasterCount Counts control plane nodes in the test environment +// +// This method iterates over all nodes stored in the TestEnvironment, checks +// each node to see if it is a control‑node by examining its labels, and +// tallies them. It returns the total number of master nodes as an integer. func (env *TestEnvironment) GetMasterCount() int { masterCount := 0 for _, e := range env.Nodes { @@ -627,10 +804,22 @@ func (env *TestEnvironment) GetMasterCount() int { return masterCount } +// TestEnvironment.IsSNO Checks whether the environment contains a single node +// +// The method inspects the collection of nodes in the test environment and +// determines if exactly one node is present. It returns true when the count +// equals one, indicating a single-node setup; otherwise it returns false. func (env *TestEnvironment) IsSNO() bool { return len(env.Nodes) == 1 } +// getMachineConfig Retrieves a machine configuration by name, using caching +// +// The function first checks an in-memory map for the requested configuration; +// if present it returns it immediately. Otherwise it queries the Kubernetes API +// for the MachineConfig resource, decodes its raw YAML into a Go struct, and +// stores the result for future calls. Errors from fetching or unmarshalling are +// propagated to the caller. func getMachineConfig(mcName string, machineConfigs map[string]MachineConfig) (MachineConfig, error) { client := clientsholder.GetClientsHolder() @@ -656,6 +845,14 @@ func getMachineConfig(mcName string, machineConfigs map[string]MachineConfig) (M return mc, nil } +// createNodes Builds a mapping of node names to enriched node structures +// +// The function iterates over supplied node objects, skipping machine +// configuration retrieval for non‑OpenShift clusters and logging warnings in +// that case. For OpenShift nodes it extracts the current MachineConfig +// annotation, fetches or reuses the corresponding config, and attaches it to +// the resulting Node wrapper. The returned map keys each node name to its +// enriched data structure. func createNodes(nodes []corev1.Node) map[string]Node { wrapperNodes := map[string]Node{} @@ -692,6 +889,13 @@ func createNodes(nodes []corev1.Node) map[string]Node { return wrapperNodes } + +// TestEnvironment.GetBaremetalNodes Retrieves nodes that use a bare‑metal provider +// +// It iterates over the environment’s node list, selecting those whose +// ProviderID begins with "baremetalhost://". Matching nodes are collected into +// a slice which is returned. The function returns only the filtered set of +// bare‑metal nodes. func (env *TestEnvironment) GetBaremetalNodes() []Node { var baremetalNodes []Node for _, node := range env.Nodes { @@ -702,6 +906,14 @@ func (env *TestEnvironment) GetBaremetalNodes() []Node { return baremetalNodes } +// GetPreflightResultsDB Transforms runtime preflight test outcomes into a structured result set +// +// The function receives a pointer to the runtime results of preflight checks. +// It iterates over each passed, failed, and errored check, extracting the name, +// description, remediation suggestion, and error message when applicable. For +// every check it constructs a PreflightTest entry and appends it to the +// corresponding slice in a PreflightResultsDB structure. Finally, it returns +// this populated database for use by the container or operator result handling. func GetPreflightResultsDB(results *plibRuntime.Results) PreflightResultsDB { resultsDB := PreflightResultsDB{} for _, res := range results.Passed { diff --git a/pkg/provider/scale_object.go b/pkg/provider/scale_object.go index c27befb07..e49031036 100644 --- a/pkg/provider/scale_object.go +++ b/pkg/provider/scale_object.go @@ -12,22 +12,48 @@ import ( scalingv1 "k8s.io/api/autoscaling/v1" ) +// CrScale Wraps a scale object with status tracking +// +// This type extends the base scaling API object by embedding its fields and +// providing helper methods to inspect readiness and generate a concise string +// representation. The embedded struct contains both specification and current +// status, allowing direct access to replica counts and other properties. type CrScale struct { *scalingv1.Scale } +// CrScale.IsScaleObjectReady Checks whether the scale object has reached the desired replica count +// +// The function compares the desired number of replicas defined in the +// specification with the current replica count reported in the status. It logs +// both values for debugging purposes. The result is a boolean indicating if the +// actual count matches the requested count. func (crScale CrScale) IsScaleObjectReady() bool { replicas := (crScale.Spec.Replicas) log.Info("replicas is %d status replica is %d", replicas, crScale.Status.Replicas) return crScale.Status.Replicas == replicas } +// CrScale.ToString Formats the CrScale object into a readable string +// +// This method returns a single string that contains both the name and namespace +// of the CrScale instance. It uses formatting to combine the two fields with +// clear labels, producing output like "cr: ns: ". The +// function requires no arguments and yields a straightforward textual +// representation for logging or display purposes. func (crScale CrScale) ToString() string { return fmt.Sprintf("cr: %s ns: %s", crScale.Name, crScale.Namespace, ) } + +// GetUpdatedCrObject Retrieves a scaled custom resource and wraps it for further use +// +// This function calls the discovery helper to fetch a custom resource by name +// within a namespace, using the provided scale getter and group-resource +// schema. It then packages the returned scaling object into a CrScale +// structure, returning that along with any error encountered during retrieval. func GetUpdatedCrObject(sg scale.ScalesGetter, namespace, name string, groupResourceSchema schema.GroupResource) (*CrScale, error) { result, err := autodiscover.FindCrObjectByNameByNamespace(sg, namespace, name, groupResourceSchema) return &CrScale{ diff --git a/pkg/provider/statefulsets.go b/pkg/provider/statefulsets.go index 74da595e8..22691793d 100644 --- a/pkg/provider/statefulsets.go +++ b/pkg/provider/statefulsets.go @@ -24,10 +24,22 @@ import ( appv1client "k8s.io/client-go/kubernetes/typed/apps/v1" ) +// StatefulSet Encapsulates a Kubernetes StatefulSet for simplified management +// +// The structure embeds the official StatefulSet type, allowing direct access to +// its fields while providing helper methods. It offers functionality to +// determine if the set is fully ready and to produce a concise string +// representation of its identity. type StatefulSet struct { *appsv1.StatefulSet } +// StatefulSet.IsStatefulSetReady Checks if all replicas of a StatefulSet are fully operational +// +// The method compares the desired number of replicas, which defaults to one if +// unspecified, against the current status fields: ready, current, and updated +// replicas. If any of these counts differ from the target, it returns false; +// otherwise, true indicates the StatefulSet is considered ready. func (ss *StatefulSet) IsStatefulSetReady() bool { var replicas int32 if ss.Spec.Replicas != nil { @@ -43,6 +55,11 @@ func (ss *StatefulSet) IsStatefulSetReady() bool { return true } +// StatefulSet.ToString Formats a StatefulSet name and namespace into a string +// +// The method builds a concise representation of the StatefulSet by combining +// its name and namespace. It uses formatting utilities to return a single +// string that identifies the resource in a human‑readable form. func (ss *StatefulSet) ToString() string { return fmt.Sprintf("statefulset: %s ns: %s", ss.Name, @@ -50,6 +67,13 @@ func (ss *StatefulSet) ToString() string { ) } +// GetUpdatedStatefulset Retrieves the current StatefulSet object for a given namespace and name +// +// This function calls an internal discovery helper to fetch the latest +// statefulset from the Kubernetes API. It wraps the result in a custom +// StatefulSet type that provides additional methods, such as readiness checks. +// The returned pointer is nil if an error occurs, with the error propagated to +// the caller. func GetUpdatedStatefulset(ac appv1client.AppsV1Interface, namespace, name string) (*StatefulSet, error) { result, err := autodiscover.FindStatefulsetByNameByNamespace(ac, namespace, name) return &StatefulSet{ diff --git a/pkg/scheduling/scheduling.go b/pkg/scheduling/scheduling.go index 5748b131c..8737b5148 100644 --- a/pkg/scheduling/scheduling.go +++ b/pkg/scheduling/scheduling.go @@ -48,6 +48,14 @@ var ( GetProcessCPUSchedulingFn = GetProcessCPUScheduling ) +// parseSchedulingPolicyAndPriority Extracts CPU scheduling policy and priority from chrt command output +// +// The function parses the string produced by the "chrt -p" command, looking for +// lines that indicate the current scheduling policy or priority. It splits the +// output into lines, tokenizes each line, and captures the last word as either +// the policy name or a numeric priority value. If parsing fails or an +// unexpected line appears, it returns an error; otherwise it provides the +// extracted policy string and integer priority. func parseSchedulingPolicyAndPriority(chrtCommandOutput string) (schedPolicy string, schedPriority int, err error) { /* Sample output: pid 476's current scheduling policy: SCHED_OTHER @@ -83,6 +91,14 @@ var schedulingRequirements = map[string]string{SharedCPUScheduling: "SHARED_CPU_ ExclusiveCPUScheduling: "EXCLUSIVE_CPU_SCHEDULING: scheduling priority < 10 and scheduling policy == SCHED_RR or SCHED_FIFO", IsolatedCPUScheduling: "ISOLATED_CPU_SCHEDULING: scheduling policy == SCHED_RR or SCHED_FIFO"} +// ProcessPidsCPUScheduling Evaluates CPU scheduling compliance for container processes +// +// The function iterates over a list of process objects, retrieves each +// process's CPU scheduling policy and priority, and checks them against the +// specified scheduling . For each process it records whether it meets the +// requirements, creating a report object that includes scheduling details and +// arguments. The result is two slices: one for compliant processes and another +// for non‑compliant ones. func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *provider.Container, check string, logger *log.Logger) (compliantContainerPids, nonCompliantContainerPids []*testhelper.ReportObject) { hasCPUSchedulingConditionSuccess := false for _, process := range processes { @@ -117,6 +133,13 @@ func ProcessPidsCPUScheduling(processes []*crclient.Process, testContainer *prov return compliantContainerPids, nonCompliantContainerPids } +// GetProcessCPUScheduling retrieves a process's CPU scheduling policy and priority +// +// The function runs the "chrt -p" command inside a node probe pod to gather +// scheduling information for a given PID within a container. It parses the +// command output to extract the scheduling policy string and numeric priority, +// handling errors when the probe context or command fails. The results are +// returned along with any error encountered during execution. func GetProcessCPUScheduling(pid int, testContainer *provider.Container) (schedulePolicy string, schedulePriority int, err error) { log.Info("Checking the scheduling policy/priority in %v for pid=%d", testContainer, pid) @@ -144,6 +167,12 @@ func GetProcessCPUScheduling(pid int, testContainer *provider.Container) (schedu return schedulePolicy, schedulePriority, err } +// PolicyIsRT Determines whether a scheduling policy represents a real‑time policy +// +// The function receives the name of a Linux CPU scheduling policy and returns +// true if it matches either the First‑In‑First‑Out or Round‑Robin +// policies, which are considered real‑time in this context. Any other policy +// string results in false, indicating non‑real‑time behavior. func PolicyIsRT(schedPolicy string) bool { return schedPolicy == SchedulingFirstInFirstOut || schedPolicy == SchedulingRoundRobin } diff --git a/pkg/stringhelper/stringhelper.go b/pkg/stringhelper/stringhelper.go index 22f33bf23..5d0af490a 100644 --- a/pkg/stringhelper/stringhelper.go +++ b/pkg/stringhelper/stringhelper.go @@ -21,7 +21,12 @@ import ( "strings" ) -// StringInSlice checks a slice for a given string. +// StringInSlice Checks if a value exists in a string slice +// +// The function iterates over the provided slice, trimming whitespace from each +// element before comparison. If containsCheck is false it tests for exact +// equality; otherwise it checks whether the element contains the target +// substring. It returns true as soon as a match is found, otherwise false. func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool { for _, v := range s { if !containsCheck { @@ -37,7 +42,12 @@ func StringInSlice[T ~string](s []T, str T, containsCheck bool) bool { return false } -// SubSlice checks if a slice's elements all exist within a slice +// SubSlice verifies all elements of one slice exist in another +// +// The function receives two string slices: the main slice and a candidate +// sub-slice. It iterates over each element of the candidate, checking for an +// exact match within the main slice using StringInSlice. If any element is +// missing, it returns false; otherwise it returns true after all checks pass. func SubSlice(s, sub []string) bool { for _, v := range sub { if !StringInSlice(s, v, false) { @@ -47,7 +57,12 @@ func SubSlice(s, sub []string) bool { return true } -// checks that at least one element is common to both slices +// HasAtLeastOneCommonElement verifies whether two string collections contain a shared value +// +// The routine iterates over the second slice and checks each element against +// the first using a helper that compares trimmed strings for equality. If any +// match is found, it immediately returns true; otherwise it completes the loop +// and returns false. func HasAtLeastOneCommonElement(s1, s2 []string) bool { for _, v := range s2 { if StringInSlice(s1, v, false) { @@ -57,6 +72,11 @@ func HasAtLeastOneCommonElement(s1, s2 []string) bool { return false } +// RemoveEmptyStrings Filters out empty entries from a slice +// +// This function iterates over an input list of strings, selecting only those +// that are not empty. It builds a new slice containing the non-empty values and +// returns it. The original slice is left unchanged. func RemoveEmptyStrings(s []string) []string { var r []string for _, str := range s { @@ -67,31 +87,12 @@ func RemoveEmptyStrings(s []string) []string { return r } -// PointerToString returns the default string representation of the value pointer by p, mainly -// used in log traces to print k8s resources' pointer fields. -// If p is a nil pointer, no matter the type, it will return the string "nil". -// -// # Example 1 -// -// var b* bool -// PointerToString(b) -> returns "nil" -// -// # Example 2 -// -// b := true -// bTrue := &b -// PointerToString(bTrue) -> returns "true" -// -// # Example 3 -// -// var num *int -// PointerToString(num) -> returns "nil" -// -// # Example 4 +// PointerToString converts a pointer to its string representation // -// num := 1984 -// num1984 := &num -// PointerToString(num1984) -> returns "1984" +// When the argument is nil, it returns "nil"; otherwise it dereferences the +// pointer and formats the value using standard printing rules. The function +// works for any type thanks to generics, making it useful in log traces or +// debugging output. func PointerToString[T any](p *T) string { if p == nil { return "nil" diff --git a/pkg/testhelper/testhelper.go b/pkg/testhelper/testhelper.go index 4c20858d7..443db7ffc 100644 --- a/pkg/testhelper/testhelper.go +++ b/pkg/testhelper/testhelper.go @@ -30,17 +30,39 @@ const ( ERROR ) +// ReportObject Represents a structured report entry with type and key/value attributes +// +// This structure holds the kind of object being reported, along with parallel +// slices that store field names and corresponding values. The fields are +// populated via methods such as AddField, SetContainerProcessValues, or +// SetType, allowing callers to build descriptive reports for compliance checks. +// It serves as a lightweight container used throughout the test helper package +// to aggregate and serialize results. type ReportObject struct { ObjectType string ObjectFieldsKeys []string ObjectFieldsValues []string } +// FailureReasonOut Represents collections of compliant and non-compliant report objects +// +// This structure stores two separate lists of report objects, one for items +// that meet the compliance criteria and another for those that do not. Each +// list holds pointers to ReportObject instances, allowing callers to access +// detailed information about each item. The struct provides an Equal method to +// compare two instances by checking both slices for identical contents. type FailureReasonOut struct { CompliantObjectsOut []*ReportObject NonCompliantObjectsOut []*ReportObject } +// Equal Compares two slices of ReportObject pointers for deep equality +// +// The function first verifies that both slices have the same length. It then +// iterates through each index, treating nil entries as equal only when both are +// nil; a mismatch in nil status causes an immediate false result. For non-nil +// elements, it uses reflect.DeepEqual on the dereferenced values to determine +// equality, returning true only if all corresponding pairs match. func Equal(p, other []*ReportObject) bool { if len(p) != len(other) { return false @@ -59,7 +81,12 @@ func Equal(p, other []*ReportObject) bool { return true } -// FailureReasonOutTestString returns a string representation of the FailureReasonOut struct. +// FailureReasonOutTestString Formats a FailureReasonOut as a readable string +// +// This function takes a FailureReasonOut value and builds a formatted string +// that includes the compliant and non‑compliant object lists. It uses helper +// formatting to produce a concise representation of each list, then +// concatenates them into a single string for debugging or test output. func FailureReasonOutTestString(p FailureReasonOut) (out string) { out = "testhelper.FailureReasonOut{" out += fmt.Sprintf("CompliantObjectsOut: %s,", ReportObjectTestStringPointer(p.CompliantObjectsOut)) @@ -68,8 +95,13 @@ func FailureReasonOutTestString(p FailureReasonOut) (out string) { return out } -// ReportObjectTestStringPointer takes a slice of pointers to ReportObject and returns a string representation of the objects. -// The returned string is in the format "[]*testhelper.ReportObject{&{...}, &{...}, ...}". +// ReportObjectTestStringPointer Formats a slice of ReportObject pointers into a readable string +// +// It receives a list of pointers to ReportObject, iterates over each element, +// and appends a formatted representation of the dereferenced object to an +// output string. The resulting string starts with "[]*testhelper.ReportObject" +// and ends with "", enclosing all items separated by commas. This string is +// used primarily for debugging or test failure messages. func ReportObjectTestStringPointer(p []*ReportObject) (out string) { out = "[]*testhelper.ReportObject{" for _, p := range p { @@ -79,9 +111,13 @@ func ReportObjectTestStringPointer(p []*ReportObject) (out string) { return out } -// ReportObjectTestString returns a string representation of the given slice of ReportObject. -// Each ReportObject is formatted using the %#v format specifier and appended to the output string. -// The resulting string is enclosed in square brackets and prefixed with "[]testhelper.ReportObject{". +// ReportObjectTestString Creates a formatted string of ReportObject values +// +// The function takes a slice of pointers to ReportObject and builds a single +// string that lists each element in the same order as the input. Each object is +// rendered with the %#v format specifier, appended with a comma, and the entire +// list is wrapped in brackets prefixed by "[]testhelper.ReportObject". The +// resulting string is returned for use in test output or debugging. func ReportObjectTestString(p []*ReportObject) (out string) { out = "[]testhelper.ReportObject{" for _, p := range p { @@ -91,9 +127,12 @@ func ReportObjectTestString(p []*ReportObject) (out string) { return out } -// Equal checks if the current FailureReasonOut is equal to the other FailureReasonOut. -// It compares the CompliantObjectsOut and NonCompliantObjectsOut fields of both structs. -// Returns true if they are equal, false otherwise. +// FailureReasonOut.Equal determines equality of two FailureReasonOut instances +// +// It compares the CompliantObjectsOut and NonCompliantObjectsOut fields of both +// structs, returning true only if all corresponding values match. The +// comparison is performed using the generic Equal function for each field. If +// any field differs, it returns false. func (p FailureReasonOut) Equal(other FailureReasonOut) bool { return Equal(p.CompliantObjectsOut, other.CompliantObjectsOut) && Equal(p.NonCompliantObjectsOut, other.NonCompliantObjectsOut) @@ -221,10 +260,12 @@ const ( PodRoleBinding = "Pods with RoleBindings details" ) -// SetContainerProcessValues sets the values for a container process in the report object. -// It takes the scheduling policy, scheduling priority, and command line as input parameters. -// It adds the process command line, scheduling policy, and scheduling priority fields to the report object. -// Finally, it sets the object type to ContainerProcessType. +// ReportObject.SetContainerProcessValues Stores container process details in the report object +// +// It records the command line, scheduling policy, and priority of a container +// process by adding these fields to the report. The function also tags the +// report with a type indicating it represents a container process. The updated +// report object is returned for further chaining. func (obj *ReportObject) SetContainerProcessValues(aPolicy, aPriority, aCommandLine string) *ReportObject { obj.AddField(ProcessCommandLine, aCommandLine) obj.AddField(SchedulingPolicy, aPolicy) @@ -233,9 +274,12 @@ func (obj *ReportObject) SetContainerProcessValues(aPolicy, aPriority, aCommandL return obj } -// NewContainerReportObject creates a new ReportObject for a container. -// It takes the namespace, pod name, container name, reason, and compliance status as parameters. -// It returns a pointer to the created ReportObject. +// NewContainerReportObject Creates a report object for a container +// +// It builds a ReportObject with type ContainerType, attaching the provided +// namespace, pod name, container name, and compliance reason as fields. The +// function uses NewReportObject to set the compliance status and then adds +// additional identifying fields before returning the pointer. func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, ContainerType, isCompliant) out.AddField(Namespace, aNamespace) @@ -244,9 +288,14 @@ func NewContainerReportObject(aNamespace, aPodName, aContainerName, aReason stri return out } -// NewCertifiedContainerReportObject creates a new ReportObject for a certified container. -// It takes a ContainerImageIdentifier, aReason string, and a boolean indicating whether the container is compliant. -// It returns a pointer to the created ReportObject. +// NewCertifiedContainerReportObject Creates a report object for a container image +// +// This function receives an image identifier, a compliance reason string, and a +// flag indicating whether the image meets compliance requirements. It +// constructs a new report object of type ContainerImageType, annotating it with +// the provided reason as either compliant or non‑compliant. The resulting +// object includes fields for digest, repository, tag, and registry derived from +// the identifier. func NewCertifiedContainerReportObject(cii provider.ContainerImageIdentifier, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, ContainerImageType, isCompliant) out.AddField(ImageDigest, cii.Digest) @@ -256,24 +305,37 @@ func NewCertifiedContainerReportObject(cii provider.ContainerImageIdentifier, aR return out } -// NewNodeReportObject creates a new ReportObject for a node with the given name, reason, and compliance status. -// It returns the created ReportObject. +// NewNodeReportObject Creates a node-specific report object +// +// The function builds a ReportObject for a node by calling the generic +// constructor with the provided reason, type identifier, and compliance flag. +// It then attaches the node name as an additional field before returning the +// fully populated object. func NewNodeReportObject(aNodeName, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, NodeType, isCompliant) out.AddField(Name, aNodeName) return out } -// NewClusterVersionReportObject creates a new ReportObject for a cluster version. -// It takes the version, aReason, and isCompliant as input parameters and returns the created ReportObject. +// NewClusterVersionReportObject Creates a report object containing cluster version information +// +// The function takes a version string, a reason for compliance or +// non‑compliance, and a boolean indicating compliance status. It constructs a +// new ReportObject with the provided reason and type, then adds the version as +// an additional field before returning the object. func NewClusterVersionReportObject(version, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, OCPClusterType, isCompliant) out.AddField(OCPClusterVersionType, version) return out } -// NewTaintReportObject creates a new ReportObject with taint-related information. -// It takes in the taintBit, nodeName, aReason, and isCompliant parameters and returns a pointer to the created ReportObject. +// NewTaintReportObject Creates a taint report object with node details +// +// This function builds a ReportObject that records a specific taint bit on a +// given node. It initializes the object with the reason for compliance or +// non‑compliance, sets its type to a predefined taint category, and then adds +// fields for the node name and the taint bit value. The resulting pointer is +// returned for further use in testing or reporting. func NewTaintReportObject(taintBit, nodeName, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, TaintType, isCompliant) out.AddField(NodeType, nodeName) @@ -281,9 +343,12 @@ func NewTaintReportObject(taintBit, nodeName, aReason string, isCompliant bool) return out } -// NewPodReportObject creates a new ReportObject for a pod. -// It takes the namespace, pod name, reason, and compliance status as input parameters. -// It returns a pointer to the created ReportObject. +// NewPodReportObject Creates a report object for a pod +// +// The function builds a ReportObject by calling NewReportObject with the given +// reason, type set to PodType, and compliance flag. It then attaches the +// namespace and pod name as fields on the resulting object. Finally, it returns +// a pointer to this populated ReportObject. func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, PodType, isCompliant) out.AddField(Namespace, aNamespace) @@ -291,9 +356,13 @@ func NewPodReportObject(aNamespace, aPodName, aReason string, isCompliant bool) return out } -// NewHelmChartReportObject creates a new ReportObject for a Helm chart. -// It takes the namespace, Helm chart name, reason, and compliance status as input parameters. -// It returns the created ReportObject. +// NewHelmChartReportObject Creates a report object for a Helm chart +// +// It constructs a new report object with the provided namespace, chart name, +// reason, and compliance status. The function first creates a base report +// object using the supplied reason and compliance flag, then adds fields for +// the namespace and chart name to that object. The completed report object is +// returned for use in testing or reporting. func NewHelmChartReportObject(aNamespace, aHelmChartName, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, HelmType, isCompliant) out.AddField(Namespace, aNamespace) @@ -301,9 +370,12 @@ func NewHelmChartReportObject(aNamespace, aHelmChartName, aReason string, isComp return out } -// NewOperatorReportObject creates a new ReportObject for an operator. -// It takes the namespace, operator name, reason, and compliance status as input parameters. -// It returns the created ReportObject. +// NewOperatorReportObject Creates a report object for an operator +// +// The function builds a new ReportObject using the provided namespace, operator +// name, reason, and compliance flag. It initializes the base object with type +// information, then adds fields for namespace and operator name before +// returning it. func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, OperatorType, isCompliant) out.AddField(Namespace, aNamespace) @@ -311,15 +383,25 @@ func NewOperatorReportObject(aNamespace, aOperatorName, aReason string, isCompli return out } +// NewClusterOperatorReportObject Creates a report object for a cluster operator +// +// This function builds a ReportObject by calling the generic constructor with a +// reason, type label, and compliance flag. It then adds the operator name as an +// additional field before returning the populated object. The returned pointer +// represents a structured report entry that can be used in test results. func NewClusterOperatorReportObject(aClusterOperatorName, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, ClusterOperatorType, isCompliant) out.AddField(Name, aClusterOperatorName) return out } -// NewCatalogSourceReportObject creates a new ReportObject for a catalog source. -// It takes the namespace, catalog source name, reason, and compliance status as input parameters. -// It returns the created ReportObject. +// NewCatalogSourceReportObject Creates a report object for a catalog source +// +// The function builds a new report object using the provided namespace, catalog +// source name, reason, and compliance flag. It delegates creation to an +// internal helper that sets the type and records whether the item is compliant. +// Finally, it adds namespace and name fields before returning the populated +// report. func NewCatalogSourceReportObject(aNamespace, aCatalogSourceName, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, CatalogSourceType, isCompliant) out.AddField(Namespace, aNamespace) @@ -327,9 +409,13 @@ func NewCatalogSourceReportObject(aNamespace, aCatalogSourceName, aReason string return out } -// NewDeploymentReportObject creates a new ReportObject for a deployment. -// It takes the namespace, deployment name, reason, and compliance status as input parameters. -// It returns a pointer to the created ReportObject. +// NewDeploymentReportObject Creates a deployment report object with namespace, name, reason, and compliance status +// +// This function builds a new ReportObject by first invoking the generic +// constructor with the provided reason, type identifier for deployments, and +// compliance flag. It then adds fields for the namespace and deployment name to +// the object's key/value store. The resulting pointer is returned for further +// use or inspection. func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, DeploymentType, isCompliant) out.AddField(Namespace, aNamespace) @@ -337,9 +423,12 @@ func NewDeploymentReportObject(aNamespace, aDeploymentName, aReason string, isCo return out } -// NewStatefulSetReportObject creates a new ReportObject for a StatefulSet. -// It takes the namespace, statefulSetName, reason, and compliance status as parameters. -// It returns the created ReportObject. +// NewStatefulSetReportObject Creates a report object for a StatefulSet +// +// It builds a ReportObject with the type set to a constant representing +// StatefulSet, attaches compliance or non‑compliance reason, then adds +// namespace and name fields. The function returns the fully populated +// ReportObject for use in tests. func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, StatefulSetType, isCompliant) out.AddField(Namespace, aNamespace) @@ -347,8 +436,12 @@ func NewStatefulSetReportObject(aNamespace, aStatefulSetName, aReason string, is return out } -// NewCrdReportObject creates a new ReportObject for a custom resource definition (CRD). -// It takes the name, version, reason, and compliance status as parameters and returns the created ReportObject. +// NewCrdReportObject Creates a report object for a custom resource definition +// +// This function takes the name, version, reason, and compliance status of a +// CRD. It builds a ReportObject by delegating to NewReportObject, then adds +// fields for the CRD's name and version before returning the constructed +// object. func NewCrdReportObject(aName, aVersion, aReason string, isCompliant bool) (out *ReportObject) { out = NewReportObject(aReason, CustomResourceDefinitionType, isCompliant) out.AddField(CustomResourceDefinitionName, aName) @@ -356,10 +449,12 @@ func NewCrdReportObject(aName, aVersion, aReason string, isCompliant bool) (out return out } -// NewReportObject creates a new ReportObject with the specified reason, type, and compliance status. -// If isCompliant is true, the reason is added as a field with the key ReasonForCompliance. -// If isCompliant is false, the reason is added as a field with the key ReasonForNonCompliance. -// Returns a pointer to the created ReportObject. +// NewReportObject Creates a report object with reason and type +// +// This function initializes an empty ReportObject, sets its type field, and +// adds the provided reason as either a compliance or non‑compliance note +// depending on the boolean flag. The resulting pointer is returned for further +// augmentation by caller functions. func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject) { out = &ReportObject{} out.ObjectType = aType @@ -371,40 +466,52 @@ func NewReportObject(aReason, aType string, isCompliant bool) (out *ReportObject return out } -// AddField adds a key-value pair to the ReportObject. -// It appends the given key to the ObjectFieldsKeys slice and the given value to the ObjectFieldsValues slice. -// It returns the modified ReportObject. +// ReportObject.AddField Adds a key-value pair to the report +// +// The method appends the supplied key to an internal slice of keys and the +// corresponding value to a parallel slice of values, maintaining order. It +// returns the same ReportObject pointer so calls can be chained. This enables +// constructing structured reports by sequentially adding fields. func (obj *ReportObject) AddField(aKey, aValue string) (out *ReportObject) { obj.ObjectFieldsKeys = append(obj.ObjectFieldsKeys, aKey) obj.ObjectFieldsValues = append(obj.ObjectFieldsValues, aValue) return obj } -// NewNamespacedReportObject creates a new ReportObject with the specified reason, type, compliance status, and namespace. -// It adds the namespace field to the ReportObject. +// NewNamespacedReportObject Creates a ReportObject that includes namespace information +// +// The function constructs a new report object with the provided reason, type, +// and compliance status, then appends an additional field for the namespace. It +// returns the resulting report object. This allows callers to generate reports +// that are scoped to a specific Kubernetes namespace. func NewNamespacedReportObject(aReason, aType string, isCompliant bool, aNamespace string) (out *ReportObject) { return NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace) } -// NewNamespacedNamedReportObject creates a new namespaced named report object with the given parameters. -// It returns a pointer to the created ReportObject. -// The report object contains the specified reason, type, compliance status, namespace, and name. +// NewNamespacedNamedReportObject Creates a report object with namespace and name fields +// +// It builds a new ReportObject using the reason, type, and compliance flag, +// then appends the specified namespace and name as additional fields. The +// resulting pointer is returned for further use. func NewNamespacedNamedReportObject(aReason, aType string, isCompliant bool, aNamespace, aName string) (out *ReportObject) { return NewReportObject(aReason, aType, isCompliant).AddField(Namespace, aNamespace).AddField(Name, aName) } -// SetType sets the type of the ReportObject. -// It takes aType as a parameter and updates the ObjectType field of the ReportObject. -// It returns a pointer to the updated ReportObject. +// ReportObject.SetType Assigns a new type to the report object +// +// The method receives a string that represents the desired type and stores it +// in the ObjectType field of the ReportObject instance. It then returns the +// same instance, allowing callers to chain further configuration calls. func (obj *ReportObject) SetType(aType string) (out *ReportObject) { obj.ObjectType = aType return obj } -// ResultToString converts an integer result code into a corresponding string representation. -// It takes an integer result as input and returns the corresponding string representation. -// The possible result codes are SUCCESS, FAILURE, and ERROR. -// If the input result code is not recognized, an empty string is returned. +// ResultToString Translates a result code into its textual form +// +// The function receives an integer representing a status code and returns the +// matching string: "SUCCESS", "FAILURE" or "ERROR". If the input does not match +// any known code, it yields an empty string. func ResultToString(result int) (str string) { switch result { case SUCCESS: @@ -417,6 +524,13 @@ func ResultToString(result int) (str string) { return "" } +// GetNonOCPClusterSkipFn provides a test skip function for non‑OCP clusters +// +// This helper creates and returns a zero‑argument function that, when called, +// checks whether the current environment is an OpenShift cluster. If it is not, +// the returned function signals to skip the test by returning true along with a +// descriptive message; otherwise it indicates no skip with false and an empty +// string. func GetNonOCPClusterSkipFn() func() (bool, string) { return func() (bool, string) { if !provider.IsOCPCluster() { @@ -426,6 +540,11 @@ func GetNonOCPClusterSkipFn() func() (bool, string) { } } +// GetNoServicesUnderTestSkipFn Checks whether the test environment has any services defined +// +// The function produces a closure that inspects the provided test environment's +// service list. If the list is empty it signals to skip the test with an +// explanatory message; otherwise it indicates the test should proceed. func GetNoServicesUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.Services) == 0 { @@ -436,6 +555,13 @@ func GetNoServicesUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, s } } +// GetDaemonSetFailedToSpawnSkipFn returns a closure that skips tests when the probe daemonset fails to spawn +// +// The function takes a test environment and produces a zero‑argument function +// returning a boolean and a message. When called, the inner function checks +// whether the environment records a failed daemonset launch; if so it signals +// the test should be skipped with an explanatory string. Otherwise it indicates +// no skip is needed. func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if env.DaemonsetFailedToSpawn { @@ -446,6 +572,12 @@ func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool } } +// GetNoCPUPinningPodsSkipFn Checks for the presence of CPU pinning pods before running a test +// +// This function receives an environment object and returns a closure that +// indicates if a test should be skipped. The inner function counts +// CPU‑pinning pods with DPDK; if none are found it signals to skip with an +// explanatory message, otherwise it allows the test to proceed. func GetNoCPUPinningPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.GetCPUPinningPodsWithDpdk()) == 0 { @@ -456,6 +588,11 @@ func GetNoCPUPinningPodsSkipFn(env *provider.TestEnvironment) func() (bool, stri } } +// GetNoSRIOVPodsSkipFn Provides a skip function for tests when no SRIOV pods are present +// +// This returns a closure that checks the test environment for SRIOV-enabled +// pods. If retrieving the list fails or the list is empty, it signals to skip +// the test with an explanatory message; otherwise the test proceeds normally. func GetNoSRIOVPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { pods, err := env.GetPodsUsingSRIOV() @@ -471,6 +608,12 @@ func GetNoSRIOVPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) { } } +// GetNoContainersUnderTestSkipFn skips tests when there are no containers to evaluate +// +// This function receives a test environment and returns another function that +// determines whether the current test should be skipped. It checks if the +// container list in the environment is empty; if so, it signals to skip with an +// explanatory message. Otherwise, it allows the test to proceed. func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.Containers) == 0 { @@ -481,6 +624,12 @@ func GetNoContainersUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, } } +// GetNoPodsUnderTestSkipFn skips the test when there are no pods to check +// +// This function creates a closure that examines the supplied test environment's +// pod list. If the list is empty, it signals that the test should be skipped by +// returning true and an explanatory message; otherwise, it indicates the test +// should run. func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.Pods) == 0 { @@ -491,6 +640,12 @@ func GetNoPodsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, strin } } +// GetNoDeploymentsUnderTestSkipFn Determines whether tests should be skipped due to absence of deployments +// +// The function returns a closure that checks the length of the Deployments +// slice in a test environment. If no deployments are present, it signals that +// the test should skip with an explanatory message. Otherwise, it indicates +// that testing can proceed. func GetNoDeploymentsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.Deployments) == 0 { @@ -501,6 +656,13 @@ func GetNoDeploymentsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool } } +// GetNoStatefulSetsUnderTestSkipFn Skips tests when there are no StatefulSets in the environment +// +// This function receives a test environment and produces a callback used by +// test frameworks to decide whether to skip a particular check. The returned +// closure inspects the number of StatefulSet objects present; if none exist, it +// signals that the test should be skipped with an explanatory message. +// Otherwise it indicates the test can proceed. func GetNoStatefulSetsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.StatefulSets) == 0 { @@ -511,6 +673,12 @@ func GetNoStatefulSetsUnderTestSkipFn(env *provider.TestEnvironment) func() (boo } } +// GetNoCrdsUnderTestSkipFn Provides a skip function for tests when no CRDs are present +// +// It returns an anonymous function that checks the TestEnvironment's Crds +// slice. If the slice is empty, the inner function signals to skip the test +// with a message indicating there are no roles to check. Otherwise it allows +// the test to proceed. func GetNoCrdsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.Crds) == 0 { @@ -521,6 +689,12 @@ func GetNoCrdsUnderTestSkipFn(env *provider.TestEnvironment) func() (bool, strin } } +// GetNoNamespacesSkipFn Determines whether tests should be skipped due to lack of namespaces +// +// The function returns a closure that checks the provided test environment for +// configured namespaces. If no namespaces are present, it signals that tests +// should be skipped and supplies an explanatory message. Otherwise, it +// indicates that testing can proceed normally. func GetNoNamespacesSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.Namespaces) == 0 { @@ -531,6 +705,11 @@ func GetNoNamespacesSkipFn(env *provider.TestEnvironment) func() (bool, string) } } +// GetNoRolesSkipFn Determines whether tests should be skipped due to missing roles +// +// The returned function checks the Roles slice in the test environment. If no +// roles are present, it signals a skip by returning true along with an +// explanatory message. Otherwise, it indicates that testing can proceed. func GetNoRolesSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.Roles) == 0 { @@ -541,6 +720,12 @@ func GetNoRolesSkipFn(env *provider.TestEnvironment) func() (bool, string) { } } +// GetSharedProcessNamespacePodsSkipFn Determines whether to skip tests based on shared process namespace pod presence +// +// It examines the test environment for pods that share a process namespace. If +// none are present, it signals that the condition required for the test is not +// met and returns true along with an explanatory message. Otherwise, it +// indicates the test should proceed. func GetSharedProcessNamespacePodsSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.GetShareProcessNamespacePods()) == 0 { @@ -551,6 +736,12 @@ func GetSharedProcessNamespacePodsSkipFn(env *provider.TestEnvironment) func() ( } } +// GetNotIntrusiveSkipFn Provides a skip function for non‑intrusive tests +// +// The returned closure checks whether the test environment is marked as +// intrusive. If it is not, the function signals that the test should be skipped +// by returning true along with an explanatory message. Otherwise, it indicates +// the test should run normally. func GetNotIntrusiveSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if !env.IsIntrusive() { @@ -561,6 +752,12 @@ func GetNotIntrusiveSkipFn(env *provider.TestEnvironment) func() (bool, string) } } +// GetNoPersistentVolumesSkipFn skips tests when no persistent volumes exist +// +// It produces a function that inspects the test environment’s list of +// persistent volumes. If the list is empty, it signals to skip the related +// tests and provides an explanatory message; otherwise it allows the tests to +// run. func GetNoPersistentVolumesSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.PersistentVolumes) == 0 { @@ -571,6 +768,12 @@ func GetNoPersistentVolumesSkipFn(env *provider.TestEnvironment) func() (bool, s } } +// GetNotEnoughWorkersSkipFn Creates a test skip function based on worker count +// +// This returns a closure that checks whether the current environment has fewer +// workers than the required minimum. If the condition is met, it signals to +// skip the test by returning true along with an explanatory message; otherwise +// it indicates the test should proceed. func GetNotEnoughWorkersSkipFn(env *provider.TestEnvironment, minWorkerNodes int) func() (bool, string) { return func() (bool, string) { if env.GetWorkerCount() < minWorkerNodes { @@ -581,6 +784,12 @@ func GetNotEnoughWorkersSkipFn(env *provider.TestEnvironment, minWorkerNodes int } } +// GetPodsWithoutAffinityRequiredLabelSkipFn Creates a skip function for tests that require pods with an affinity label +// +// It receives the test environment and returns a closure that checks whether +// any pods lack the required affinity label. If none are found, the closure +// signals to skip the test with an explanatory message; otherwise it allows the +// test to proceed. func GetPodsWithoutAffinityRequiredLabelSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.GetPodsWithoutAffinityRequiredLabel()) == 0 { @@ -591,6 +800,12 @@ func GetPodsWithoutAffinityRequiredLabelSkipFn(env *provider.TestEnvironment) fu } } +// GetNoGuaranteedPodsWithExclusiveCPUsSkipFn skips test when there are no pods using exclusive CPUs +// +// The returned closure examines the test environment for pods that have been +// assigned exclusive CPU resources. If none are found, it signals to skip the +// test by returning true and a descriptive message. Otherwise, it allows the +// test to proceed. func GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.GetGuaranteedPodsWithExclusiveCPUs()) == 0 { @@ -601,6 +816,12 @@ func GetNoGuaranteedPodsWithExclusiveCPUsSkipFn(env *provider.TestEnvironment) f } } +// GetNoAffinityRequiredPodsSkipFn Determines if a test should be skipped due to absence of affinity-required pods +// +// The function returns a closure that checks the test environment for any pods +// marked with required node affinity. If none are found, it signals that the +// test should be skipped and provides an explanatory message. Otherwise, it +// indicates the test can proceed. func GetNoAffinityRequiredPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.GetAffinityRequiredPods()) == 0 { @@ -611,6 +832,11 @@ func GetNoAffinityRequiredPodsSkipFn(env *provider.TestEnvironment) func() (bool } } +// GetNoStorageClassesSkipFn Skips tests when no storage classes are present +// +// This function returns a closure that checks the length of the environment's +// storage class list. If the list is empty, it signals to skip the test with an +// explanatory message; otherwise, it allows the test to proceed normally. func GetNoStorageClassesSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.StorageClassList) == 0 { @@ -620,6 +846,13 @@ func GetNoStorageClassesSkipFn(env *provider.TestEnvironment) func() (bool, stri } } +// GetNoPersistentVolumeClaimsSkipFn Determines if tests should be skipped due to absence of persistent volume claims +// +// The function receives a test environment and produces a closure used by the +// testing framework. When invoked, the closure checks whether the environment +// contains any persistent volume claim objects. If none are present, it signals +// that the test should be skipped and supplies an explanatory message; +// otherwise it allows the test to proceed. func GetNoPersistentVolumeClaimsSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.PersistentVolumeClaims) == 0 { @@ -629,6 +862,12 @@ func GetNoPersistentVolumeClaimsSkipFn(env *provider.TestEnvironment) func() (bo } } +// GetNoBareMetalNodesSkipFn skips tests when no bare-metal nodes exist +// +// The returned function checks the test environment for bare-metal nodes by +// calling GetBaremetalNodes. If none are found, it signals that the current +// test should be skipped with a descriptive message. Otherwise, it allows the +// test to proceed normally. func GetNoBareMetalNodesSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.GetBaremetalNodes()) == 0 { @@ -638,6 +877,12 @@ func GetNoBareMetalNodesSkipFn(env *provider.TestEnvironment) func() (bool, stri } } +// GetNoIstioSkipFn Decides if tests should be skipped due to missing Istio +// +// The function creates and returns a closure that inspects the test environment +// for an Istio service mesh flag. If the flag indicates no Istio is present, it +// signals to skip with a descriptive message; otherwise it allows the test to +// proceed. func GetNoIstioSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if !env.IstioServiceMeshFound { @@ -647,6 +892,12 @@ func GetNoIstioSkipFn(env *provider.TestEnvironment) func() (bool, string) { } } +// GetNoHugepagesPodsSkipFn Determines if a test should be skipped due to lack of hugepage pods +// +// This function receives a testing environment and returns another function +// that, when called, checks whether any pods are requesting hugepages. If none +// exist, it signals the test framework to skip with an explanatory message. +// Otherwise, it allows the test to proceed normally. func GetNoHugepagesPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.GetHugepagesPods()) == 0 { @@ -656,6 +907,12 @@ func GetNoHugepagesPodsSkipFn(env *provider.TestEnvironment) func() (bool, strin } } +// GetNoCatalogSourcesSkipFn Determines whether to skip tests due to missing catalog sources +// +// The function returns a closure that checks the test environment for catalog +// source entries. If no catalog sources are present, it signals that the +// associated tests should be skipped with an explanatory message. Otherwise, it +// indicates that testing can proceed normally. func GetNoCatalogSourcesSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.AllCatalogSources) == 0 { @@ -665,6 +922,11 @@ func GetNoCatalogSourcesSkipFn(env *provider.TestEnvironment) func() (bool, stri } } +// GetNoOperatorsSkipFn Decides if a test should be skipped because no operators are present +// +// The function generates a closure that inspects the provided environment's +// operator list. If the list is empty, it signals to skip the test and supplies +// an explanatory message; otherwise it indicates the test can proceed. func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.Operators) == 0 { @@ -674,6 +936,12 @@ func GetNoOperatorsSkipFn(env *provider.TestEnvironment) func() (bool, string) { } } +// GetNoOperatorPodsSkipFn Determines whether to skip tests due to missing operator pods +// +// The returned function checks the TestEnvironment's mapping of CSVs to pod +// lists. If no entries exist, it signals that tests should be skipped by +// returning true along with a message explaining that no operator pods were +// found. Otherwise, it indicates tests can proceed. func GetNoOperatorPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.CSVToPodListMap) == 0 { @@ -684,6 +952,12 @@ func GetNoOperatorPodsSkipFn(env *provider.TestEnvironment) func() (bool, string } } +// GetNoOperatorCrdsSkipFn Skips tests when no operator CRDs are present +// +// The function takes a test environment and returns a closure used to decide +// whether a test should be skipped. The closure checks the length of the Crds +// slice in the environment; if it is empty, it signals to skip the test with an +// explanatory message. Otherwise, it indicates that the test should proceed. func GetNoOperatorCrdsSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { if len(env.Crds) == 0 { @@ -693,7 +967,12 @@ func GetNoOperatorCrdsSkipFn(env *provider.TestEnvironment) func() (bool, string } } -// The returned func returns true (skip) if there isn't any node using realtime kernel type. +// GetNoNodesWithRealtimeKernelSkipFn Skips tests when no node uses a realtime kernel +// +// This helper returns a function that checks all nodes in the test environment +// for a realtime kernel type. If any node is found to use such a kernel, the +// returned function signals not to skip; otherwise it indicates a skip with an +// explanatory message. func GetNoNodesWithRealtimeKernelSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { for i := range env.Nodes { @@ -708,6 +987,13 @@ func GetNoNodesWithRealtimeKernelSkipFn(env *provider.TestEnvironment) func() (b } } +// ResultObjectsToString Serializes compliant and non‑compliant report objects into a JSON string +// +// The function receives two slices of ReportObject values, one for compliant +// items and another for non‑compliant ones. It constructs a FailureReasonOut +// structure containing these slices, marshals the structure to JSON, and +// returns the resulting string. If the marshalling fails, an error is returned +// with context. func ResultObjectsToString(compliantObject, nonCompliantObject []*ReportObject) (string, error) { reason := FailureReasonOut{ CompliantObjectsOut: compliantObject, diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 15a0b725e..c248763b9 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,8 +23,13 @@ var ( ClaimFormatVersion string ) -// getGitVersion returns the git display version: the latest previously released -// build in case this build is not released. Otherwise display the build version +// GitVersion provides the current build’s git display version +// +// The function checks if a release tag is defined; if not it falls back to an +// unreleased build label combined with the previous release information. It +// then appends the short commit hash in parentheses and returns the resulting +// string, which is used throughout the application to report the running +// version. func GitVersion() string { if GitRelease == "" { GitDisplayRelease = "Unreleased build post " + GitPreviousRelease @@ -35,11 +40,22 @@ func GitVersion() string { return GitDisplayRelease + " (" + GitCommit + ")" } +// IsValidSemanticVersion Validates that a string is a proper semantic version +// +// The function attempts to parse the input using a semantic version parser. If +// parsing succeeds without error, it returns true, indicating a valid semantic +// version; otherwise, it returns false. func IsValidSemanticVersion(version string) bool { _, err := semver.NewVersion(version) return err == nil } +// IsValidK8sVersion Checks if a string matches Kubernetes version naming conventions +// +// The function compiles a regular expression that enforces the pattern for +// Kubernetes versions, allowing optional pre-release identifiers such as alpha +// or beta with numeric suffixes. It returns true when the input string conforms +// to this format and false otherwise. func IsValidK8sVersion(version string) bool { r := regexp.MustCompile(`^(v)([1-9]\d*)+((alpha|beta)([1-9]\d*)+){0,2}$`) return r.MatchString(version) diff --git a/tests/accesscontrol/namespace/namespace.go b/tests/accesscontrol/namespace/namespace.go index 2f9f321c6..2988141b3 100644 --- a/tests/accesscontrol/namespace/namespace.go +++ b/tests/accesscontrol/namespace/namespace.go @@ -28,10 +28,14 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// TestCrsNamespaces finds the list of the input CRDs (crds parameter) instances (CRs) and verify that they are only in namespaces provided as input. -// Returns : -// - map[string]map[string][]string : The list of CRs not belonging to the namespaces passed as input is returned as invalid. -// - error : if exist error. +// TestCrsNamespaces identifies custom resources outside allowed namespaces +// +// The function examines each provided CRD, gathers all its instances across the +// cluster, and checks whether their namespaces match a given list of permitted +// namespaces. For any instance found in an unauthorized namespace, it records +// the CRD name, namespace, and resource names in a nested map. The resulting +// map is returned along with any error that occurred during retrieval; +// otherwise nil indicates success. func TestCrsNamespaces(crds []*apiextv1.CustomResourceDefinition, configNamespaces []string, logger *log.Logger) (invalidCrs map[string]map[string][]string, err error) { // Initialize the top level map invalidCrs = make(map[string]map[string][]string) @@ -55,10 +59,14 @@ func TestCrsNamespaces(crds []*apiextv1.CustomResourceDefinition, configNamespac return invalidCrs, nil } -// getCrsPerNamespaces gets the list of CRs instantiated in the cluster per namespace. -// Returns : -// - map[string][]string : a map indexed by namespace and data is a list of CR names. -// - error : if exist error. +// getCrsPerNamespaces Retrieves custom resources per namespace +// +// This function queries the Kubernetes cluster for all instances of a given +// CustomResourceDefinition across its versions, organizing them into a map +// keyed by namespace with lists of resource names as values. It uses a dynamic +// client from a shared holder to perform list operations and logs debug +// information during the search. If any listing operation fails, an error is +// returned along with a nil or partially filled map. func getCrsPerNamespaces(aCrd *apiextv1.CustomResourceDefinition) (crdNamespaces map[string][]string, err error) { oc := clientsholder.GetClientsHolder() for _, version := range aCrd.Spec.Versions { @@ -94,9 +102,12 @@ func getCrsPerNamespaces(aCrd *apiextv1.CustomResourceDefinition) (crdNamespaces return crdNamespaces, nil } -// GetInvalidCRDsNum returns the number of invalid CRs in the map. -// Return: -// - int : number of invalid CRs in the map. +// GetInvalidCRsNum Counts the number of custom resources that are not in allowed namespaces +// +// The function walks through a nested map where each CRD maps to namespaces and +// then to lists of CR names, logging an error for every invalid entry it finds. +// It tallies these occurrences into an integer which is returned as the total +// count of invalid custom resources. func GetInvalidCRsNum(invalidCrs map[string]map[string][]string, logger *log.Logger) int { var invalidCrsNum int for crdName, namespaces := range invalidCrs { diff --git a/tests/accesscontrol/pidshelper.go b/tests/accesscontrol/pidshelper.go index 2b828b8a2..bd85de890 100644 --- a/tests/accesscontrol/pidshelper.go +++ b/tests/accesscontrol/pidshelper.go @@ -26,10 +26,12 @@ import ( const nbProcessesIndex = 2 -// getNbOfProcessesInPidNamespace retrieves the number of processes in the Pid namespace. -// Returns: -// - int : the number of processes in the PID namespace associated with the specified process ID -// - error : An error, if any occurred during the execution of the command or parsing of the output. +// getNbOfProcessesInPidNamespace determines the number of processes in a PID namespace +// +// The function runs a container command that lists the PID namespace for a +// given process ID, then parses the output to count how many processes belong +// to that namespace. It returns the count as an integer and propagates any +// errors from executing the command or parsing its result. func getNbOfProcessesInPidNamespace(ctx clientsholder.Context, targetPid int, ch clientsholder.Command) (int, error) { cmd := "lsns -p " + strconv.Itoa(targetPid) + " -t pid -n" diff --git a/tests/accesscontrol/resources/resources.go b/tests/accesscontrol/resources/resources.go index 4d3e2fdb6..0757e9837 100644 --- a/tests/accesscontrol/resources/resources.go +++ b/tests/accesscontrol/resources/resources.go @@ -5,9 +5,12 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" ) -// HasRequestsSet checks if a container has resource requests set. -// Returns : -// - bool : true if resource requests are set for the container, otherwise return false. +// HasRequestsSet Determines if a container has resource requests defined +// +// This function examines the request fields of a container's resource +// specification. It checks that there is at least one request entry, and that +// both CPU and memory requests are non‑zero values. If any requirement is +// missing it logs an error and returns false; otherwise it returns true. func HasRequestsSet(cut *provider.Container, logger *log.Logger) bool { passed := true @@ -29,10 +32,13 @@ func HasRequestsSet(cut *provider.Container, logger *log.Logger) bool { return passed } -// For more info on cpu management policies see https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/. -// HasExclusiveCPUsAssigned checks if a container has exclusive CPU's assigned. -// Returns: -// - bool : true if a container has exclusive CPU's assigned, otherwise return false. +// HasExclusiveCPUsAssigned Determines if a container runs with exclusive CPU allocation +// +// The function examines the CPU and memory limits and requests of a container +// to decide whether it belongs to an exclusive CPU pool. If either limit is +// missing, non‑integer, or mismatched with its request, the container is +// considered shared; otherwise it is marked exclusive. The result is returned +// as a boolean. func HasExclusiveCPUsAssigned(cut *provider.Container, logger *log.Logger) bool { cpuLimits := cut.Resources.Limits.Cpu() memLimits := cut.Resources.Limits.Memory() diff --git a/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go b/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go index 1336389d5..198f44571 100644 --- a/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go +++ b/tests/accesscontrol/securitycontextcontainer/securitycontextcontainer.go @@ -27,7 +27,13 @@ const ( NOKString = "false" ) -// print the strings +// OkNok.String returns a textual representation of the status +// +// When invoked, this method examines its receiver value and maps specific +// enumeration cases to predefined string constants. If the value matches the +// success case it returns the corresponding OKString; if it matches the failure +// case it returns NOKString. For any other value, it defaults to returning +// "false". func (okNok OkNok) String() string { switch okNok { case OK: @@ -38,6 +44,14 @@ func (okNok OkNok) String() string { return "false" } +// ContainerSCC Represents a container’s security context compliance state +// +// This struct holds flags indicating whether each security setting of a +// container satisfies the requirements of a given security context constraint. +// Each field is an OkNok value that marks the presence or absence of a feature +// such as host networking, privilege escalation, or required capabilities. The +// struct also records the lowest capability category applicable to the +// container. type ContainerSCC struct { HostDirVolumePluginPresent OkNok // 0 or 1 - 0 is false 1 - true HostIPC OkNok @@ -67,6 +81,13 @@ const ( CategoryID4 ) +// PodListCategory Represents a container’s classification within a pod +// +// This structure holds identifying information for a specific container in a +// Kubernetes pod, including the container name, pod name, namespace, and its +// security context category. It is used to record and report which security +// policy tier applies to each container during analysis. The String method +// formats these fields into a readable string for logging or output. type PodListCategory struct { Containername string Podname string @@ -148,7 +169,11 @@ var ( OK} // AllVolumeAllowed ) -// print the strings +// PodListCategory.String Formats PodListCategory fields into a readable string +// +// The method combines the container name, pod name, namespace, and category of +// a PodListCategory instance into a single line with labels. It returns this +// formatted string for display or logging purposes. func (category PodListCategory) String() string { returnString := fmt.Sprintf("Containername: %s Podname: %s NameSpace: %s Category: %s \n ", category.Containername, category.Podname, category.NameSpace, category.Category) @@ -163,9 +188,12 @@ const ( CategoryID4String = "CategoryID4(anything not matching lower category)" ) -// String converts the category to a string. -// Returns: -// - string: The string representation of the Category. +// CategoryID.String Returns the string representation of a CategoryID +// +// The method examines the receiver value and maps each predefined constant to +// its corresponding string. It uses a switch statement to select the +// appropriate case and returns that string, defaulting to a fallback if none +// match. func (category CategoryID) String() string { switch category { case CategoryID1: @@ -184,9 +212,13 @@ func (category CategoryID) String() string { return CategoryID4String } -// GetContainerSCC is update the containerSCC according capability of container(cut) -// Returns: -// - ContainerSCC: struct that updated according container(cut) +// GetContainerSCC updates a container's security context compliance status +// +// The function examines a container’s properties such as host ports, +// capabilities, privilege escalation settings, and SELinux options. It sets +// corresponding flags in the provided ContainerSCC structure to indicate +// whether each security requirement is satisfied. The updated ContainerSCC is +// returned for further classification or reporting. // //nolint:gocritic func GetContainerSCC(cut *provider.Container, containerSCC ContainerSCC) ContainerSCC { @@ -223,7 +255,15 @@ func GetContainerSCC(cut *provider.Container, containerSCC ContainerSCC) Contain return containerSCC } -// updateCapabilitiesFromContainer update the per container capabilities with the capabilities defined at the container level. +// updateCapabilitiesFromContainer updates container capability settings based on its security context +// +// This routine examines a container’s SecurityContext for defined +// capabilities, adjusting the SCC record to reflect required drop capabilities +// and categorizing the added capabilities into predefined groups. It checks if +// all required drops are present or if an empty add list implies Category 1, +// otherwise it matches the added capabilities against three category sets. The +// function marks the appropriate flags in the ContainerSCC structure to +// indicate compliance status. func updateCapabilitiesFromContainer(cut *provider.Container, containerSCC *ContainerSCC) { containerSCC.RequiredDropCapabilitiesPresent = NOK if cut.SecurityContext != nil && cut.SecurityContext.Capabilities != nil { @@ -256,10 +296,14 @@ func updateCapabilitiesFromContainer(cut *provider.Container, containerSCC *Cont } } -// AllVolumeAllowed checks if all volumes in the provided slice are allowed based on certain criteria. -// Returns : -// - r1 : whether all volumes are allowed (OK/NOK) -// - r2 : whether any volume with HostPath is found (OK/NOK) +// AllVolumeAllowed Verifies all volumes are permitted and detects host path usage +// +// The function examines each volume in the provided slice, counting only those +// of allowed types such as ConfigMap, DownwardAPI, EmptyDir, +// PersistentVolumeClaim, Projected, or Secret. If every volume is of an allowed +// type, it returns OK for the overall check; otherwise it returns NOK. It also +// flags whether any HostPath volume was encountered by setting a separate +// status value. func AllVolumeAllowed(volumes []corev1.Volume) (r1, r2 OkNok) { countVolume := 0 var value OkNok @@ -293,10 +337,12 @@ func AllVolumeAllowed(volumes []corev1.Volume) (r1, r2 OkNok) { return NOK, value } -// checkContainerCategory categorizes each container based on Security context. -// builds a list of PodListCategory structs , each representing a container along with its category information. -// Returns: -// - []PodListCategory: a slice of PodListCategory structs representing categorized containers. +// checkContainerCategory creates a list of container categories based on security context checks +// +// For each container in the pod, it builds a container-specific SCC +// representation and then determines which predefined category matches that +// SCC. The function returns a slice of structs containing the container name, +// pod name, namespace, and assigned category identifier. // //nolint:gocritic func checkContainerCategory(containers []corev1.Container, containerSCC ContainerSCC, podName, nameSpace string) []PodListCategory { @@ -328,9 +374,13 @@ func checkContainerCategory(containers []corev1.Container, containerSCC Containe return ContainerList } -// checkContainCategory checks whether all elements in the addCapability exist in referenceCategoryAddCapabilities -// Returns: -// - bool: true if all elements in the addCapability exist in referenceCategoryAddCapabilities, otherwise return false +// checkContainCategory verifies that every capability in a list is present in another set +// +// The function receives a slice of capabilities and a reference slice of +// strings. It iterates through each capability, checking whether its string +// representation appears in the reference slice using a helper routine. If any +// capability is missing, it immediately returns false; otherwise it returns +// true after all checks pass. func checkContainCategory(addCapability []corev1.Capability, referenceCategoryAddCapabilities []string) bool { for _, ncc := range addCapability { if !stringhelper.StringInSlice(referenceCategoryAddCapabilities, string(ncc), true) { @@ -340,11 +390,14 @@ func checkContainCategory(addCapability []corev1.Capability, referenceCategoryAd return true } -// CheckPod updates the containerSCC objects with security context variable defined at the Pod Level. Then it updates the containerSCC object with security context values overloaded at the container level. -// It then categorizes each container based on specific conditions and constructs a list of PodListCategory structs, -// each representing a container along with its category information. -// Returns: -// - []PodListCategory: a slice of PodListCategory structs representing categorized containers for the pod. +// CheckPod Evaluates a pod’s security context and categorizes its containers +// +// The function inspects the pod's host networking, IPC, PID settings, SELinux +// options, volume types, run-as-user, and FSGroup fields to build a +// ContainerSCC profile. It then determines each container’s category by +// comparing that profile against predefined security categories. The result is +// a slice of PodListCategory structs, one per container, indicating the +// container name, pod details, namespace, and assigned category. func CheckPod(pod *provider.Pod) []PodListCategory { var containerSCC ContainerSCC containerSCC.HostIPC = NOK @@ -377,9 +430,14 @@ func CheckPod(pod *provider.Pod) []PodListCategory { return checkContainerCategory(pod.Spec.Containers, containerSCC, pod.Name, pod.Namespace) } -// compareCategory compare between the fields in refCategory and containerSCC -// Returns: -// - bool : true if containerSCC matches the reference category, otherwise return false. +// compareCategory determines if a container matches a reference security context category +// +// The function compares the security context properties of two containers, +// checking fields such as volume allowance, user settings, privilege flags, and +// capability lists against a predefined category definition. It logs each +// comparison step for debugging purposes and aggregates any mismatches into a +// boolean result. The returned value indicates whether the container conforms +// to all constraints specified by the reference category. // //nolint:funlen func compareCategory(refCategory, containerSCC *ContainerSCC, id CategoryID) bool { diff --git a/tests/accesscontrol/suite.go b/tests/accesscontrol/suite.go index 95ce1cd2e..69e31909e 100644 --- a/tests/accesscontrol/suite.go +++ b/tests/accesscontrol/suite.go @@ -67,7 +67,14 @@ var ( } ) -// LoadChecks loads all the checks. +// LoadChecks Initialises all access control checks for the test suite +// +// This function registers a group of security checks under the access‑control +// key, attaching pre‑execution logic and a series of check functions that +// validate container capabilities, pod configurations, namespace policies, and +// more. Each check is created with identifiers derived from test metadata, may +// be skipped based on environmental conditions, and logs its progress through a +// shared logger. // //nolint:funlen func LoadChecks() { @@ -274,8 +281,13 @@ func LoadChecks() { })) } -// isContainerCapabilitySet checks whether a container capability was explicitly set -// in securityContext.capabilities.add list. +// isContainerCapabilitySet checks if a capability is explicitly added to a container +// +// The function receives a pointer to a capabilities structure and a capability +// name. It returns true when the Add list contains either the specified +// capability or the special ALL value, indicating that the capability has been +// granted. If the capabilities object is nil or its Add list is empty, it +// returns false. func isContainerCapabilitySet(containerCapabilities *corev1.Capabilities, capability string) bool { if containerCapabilities == nil { return false @@ -293,10 +305,13 @@ func isContainerCapabilitySet(containerCapabilities *corev1.Capabilities, capabi return false } -// checkForbiddenCapability checks if containers use a forbidden capability. -// Returns: -// - compliantObjects []*testhelper.ReportObject : Slice containing report objects for containers compliant with the capability restrictions. -// - nonCompliantObjects []*testhelper.ReportObject : Slice containing report objects for containers not compliant with the capability restrictions. +// checkForbiddenCapability determines if containers avoid a specific capability +// +// The function iterates over each container, checking whether the specified +// capability is present in its security context. Containers lacking the +// capability are recorded as compliant; those with it are flagged +// non‑compliant and an error logged. The results are returned as two slices +// of report objects for further processing. func checkForbiddenCapability(containers []*provider.Container, capability string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { for _, cut := range containers { logger.Info("Testing Container %q", cut) @@ -320,32 +335,75 @@ func checkForbiddenCapability(containers []*provider.Container, capability strin return compliantObjects, nonCompliantObjects } +// testSysAdminCapability Checks containers for the SYS_ADMIN capability +// +// This routine examines each container in the test environment, looking for the +// SYS_ADMIN capability in its security context. Containers lacking this +// capability are recorded as compliant; those that include it are flagged +// non‑compliant with an error log entry. The results are then stored back +// into the check object. func testSysAdminCapability(check *checksdb.Check, env *provider.TestEnvironment) { compliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, "SYS_ADMIN", check.GetLogger()) check.SetResult(compliantObjects, nonCompliantObjects) } +// testNetAdminCapability Verifies that containers do not use the NET_ADMIN capability +// +// The function queries all test containers for the presence of the NET_ADMIN +// capability using a helper routine, collecting compliant and non‑compliant +// reports. It then records these results in the provided check object. This +// ensures that network administration privileges are not granted to container +// processes. func testNetAdminCapability(check *checksdb.Check, env *provider.TestEnvironment) { compliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, "NET_ADMIN", check.GetLogger()) check.SetResult(compliantObjects, nonCompliantObjects) } +// testNetRawCapability Validates that containers do not use the NET_RAW capability +// +// The function examines all container security contexts in the test environment +// and reports any instance where the NET_RAW capability is granted. It records +// compliant and non‑compliant findings, attaching relevant details to each +// report object. Finally, it stores the results in the provided check for later +// aggregation. func testNetRawCapability(check *checksdb.Check, env *provider.TestEnvironment) { compliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, "NET_RAW", check.GetLogger()) check.SetResult(compliantObjects, nonCompliantObjects) } +// testIpcLockCapability Verifies containers lack the IPC_LOCK capability +// +// This function inspects each container in the test environment, checking +// whether the IPC_LOCK capability is present in its security context. +// Containers without the capability are marked compliant; those with it are +// flagged non‑compliant and reported accordingly. The results are stored back +// into the check object for later aggregation. func testIpcLockCapability(check *checksdb.Check, env *provider.TestEnvironment) { compliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, "IPC_LOCK", check.GetLogger()) check.SetResult(compliantObjects, nonCompliantObjects) } +// testBpfCapability Verifies that containers do not use the BPF kernel capability +// +// The function iterates over all test containers, checking whether the "BPF" +// capability is present in their security context. It records compliant +// containers where the capability is absent and non‑compliant ones where it +// appears, attaching appropriate log messages. Finally, it stores the results +// within the provided check object for reporting. func testBpfCapability(check *checksdb.Check, env *provider.TestEnvironment) { compliantObjects, nonCompliantObjects := checkForbiddenCapability(env.Containers, "BPF", check.GetLogger()) check.SetResult(compliantObjects, nonCompliantObjects) } -// testSecConRunAsNonRoot verifies that containers are not allowed to run as root. +// testSecConRunAsNonRoot checks that pods do not run containers as root +// +// The routine iterates over all test pods, determines which containers are +// configured to run as root, and records compliance results. For each pod it +// logs a message, then calls a helper to retrieve non‑compliant containers. +// If none exist the pod is marked compliant; otherwise each offending container +// is logged with an error and added to the non‑compliant list. Finally, the +// check’s result is set with both compliant and non‑compliant report +// objects. func testSecConRunAsNonRoot(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -369,7 +427,13 @@ func testSecConRunAsNonRoot(check *checksdb.Check, env *provider.TestEnvironment check.SetResult(compliantObjects, nonCompliantObjects) } -// testSecConPrivilegeEscalation verifies that the container is not allowed privilege escalation +// testSecConPrivilegeEscalation Verifies that containers do not allow privilege escalation +// +// The function iterates over each container in the test environment, checking +// if the SecurityContext's AllowPrivilegeEscalation flag is explicitly set to +// true. Containers with this setting are marked non‑compliant and logged as +// errors; those without the flag or with it false are considered compliant. +// Results are collected into report objects and stored via SetResult. func testSecConPrivilegeEscalation(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -393,7 +457,14 @@ func testSecConPrivilegeEscalation(check *checksdb.Check, env *provider.TestEnvi check.SetResult(compliantObjects, nonCompliantObjects) } -// testSecConReadOnlyFilesystem verifies that the container has a readonly file system access. +// testSecConReadOnlyFilesystem verifies that each container mounts a read‑only root filesystem +// +// The function iterates over every pod and its containers in the test +// environment, checking if the container’s root filesystem is set to +// read‑only using a helper method. Containers that satisfy the requirement +// are recorded as compliant; those that do not are logged as errors and marked +// non‑compliant. Finally, the results are aggregated into report objects and +// passed back via the check’s result setter. func testSecConReadOnlyFilesystem(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -414,7 +485,14 @@ func testSecConReadOnlyFilesystem(check *checksdb.Check, env *provider.TestEnvir check.SetResult(compliantObjects, nonCompliantObjects) } -// testContainerHostPort tests that containers are not configured with host port privileges +// testContainerHostPort Verifies that containers do not expose host ports +// +// The function iterates over all containers in the test environment, checking +// each declared port for a non‑zero HostPort value. If such a port is found, +// it records a non‑compliant report object with details of the offending +// container and port number; otherwise it logs compliance and records a +// compliant object. Finally, it sets the check result with the collected +// reports. func testContainerHostPort(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -440,7 +518,13 @@ func testContainerHostPort(check *checksdb.Check, env *provider.TestEnvironment) check.SetResult(compliantObjects, nonCompliantObjects) } -// testPodHostNetwork verifies that the pod hostNetwork parameter is not set to true +// testPodHostNetwork Checks whether pods enable host networking +// +// The routine iterates over all pod objects in the test environment, logging +// each one. If a pod’s HostNetwork flag is true it records a non‑compliant +// report object and logs an error; otherwise it records a compliant report +// object and logs success. Finally, it stores both lists of results on the +// check instance. func testPodHostNetwork(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -458,7 +542,12 @@ func testPodHostNetwork(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } -// testPodHostPath verifies that the pod hostpath parameter is not set to true +// testPodHostPath Verifies that pods do not use host path volumes +// +// The function iterates over all pods in the test environment, checking each +// volume for a non-empty HostPath field. If such a path is found, the pod is +// marked non‑compliant and recorded with details; otherwise it is considered +// compliant. Results are logged and reported via the check object. func testPodHostPath(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -484,7 +573,13 @@ func testPodHostPath(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } -// testPodHostIPC verifies that the pod hostIpc parameter is not set to true +// testPodHostIPC Ensures Pod HostIPC is disabled +// +// The function iterates over all pods in the test environment, logging each +// pod’s name. For every pod it checks whether the HostIPC flag is true; if +// so, it records a non‑compliant report object and logs an error, otherwise +// it records a compliant report object. Finally, it sets the check result with +// both lists of objects. func testPodHostIPC(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -502,7 +597,13 @@ func testPodHostIPC(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } -// testPodHostPID verifies that the pod hostPid parameter is not set to true +// testPodHostPID Checks that no pod uses the host PID namespace +// +// The function iterates over all pods in the test environment, logging each +// one’s status. If a pod has HostPID enabled it records a non‑compliant +// report object and logs an error; otherwise it records a compliant object and +// logs informational text. Finally, it sets the check result with the lists of +// compliant and non‑compliant objects. func testPodHostPID(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -520,7 +621,15 @@ func testPodHostPID(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } -// testNamespace Tests namespaces for invalid prefixes and CRs that are not defined in namespaces under test. +// testNamespace Checks namespace names for disallowed prefixes and validates CR placement +// +// The function iterates over all namespaces in the test environment, logging +// each one. For every namespace it verifies that none of the predefined invalid +// prefixes are present; if an invalid prefix is found a non‑compliant report +// object is created. After checking prefixes, it calls helper routines to +// examine whether custom resources exist in unauthorized namespaces and records +// any failures as non‑compliant objects. Finally, results for both compliant +// and non‑compliant cases are set on the check. func testNamespace(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -559,7 +668,14 @@ func testNamespace(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } -// testPodServiceAccount verifies that the pod utilizes a valid service account +// testPodServiceAccount checks that pods use non‑default service accounts +// +// The function iterates over all pods in the test environment, logging each +// pod’s name. For every pod it verifies whether the ServiceAccountName equals +// the default account; if so, it records a non‑compliant report object and +// logs an error, otherwise it records a compliant report object and logs +// success. Finally, it sets the check result with the collected compliant and +// non‑compliant objects. func testPodServiceAccount(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -577,7 +693,14 @@ func testPodServiceAccount(check *checksdb.Check, env *provider.TestEnvironment) check.SetResult(compliantObjects, nonCompliantObjects) } -// testPodRoleBindings verifies that the pod utilizes a valid role binding that does not cross non-CNF namespaces +// testPodRoleBindings Checks pod role bindings against CNF namespace rules +// +// The routine iterates over each pod in the test environment, verifying that +// its service account is not default and that any role binding it relies on +// resides within an approved CNF namespace. If a pod references a role binding +// outside these namespaces, it is marked non‑compliant and detailed +// information about the offending binding is recorded. Pods passing all checks +// are logged as compliant, and results are reported back to the test framework. func testPodRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -648,7 +771,13 @@ func testPodRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } -// testPodClusterRoleBindings verifies that the pod does not use a cluster role binding +// testPodClusterRoleBindings Verifies that pods do not use cluster role bindings +// +// The function iterates over all pods in the test environment, checking each +// for usage of a cluster role binding. If a pod uses one but is owned by a +// cluster‑wide operator, it is considered compliant; otherwise any usage +// flags the pod as non‑compliant. Results are recorded in report objects and +// returned via the check result. // //nolint:dupl func testPodClusterRoleBindings(check *checksdb.Check, env *provider.TestEnvironment) { @@ -694,9 +823,12 @@ func testPodClusterRoleBindings(check *checksdb.Check, env *provider.TestEnviron check.SetResult(compliantObjects, nonCompliantObjects) } -// isCSVAndClusterWide checks if object identified by namespace and name is a CSV created by a cluster-wide operator -// Return: -// - bool : true if object identified by namespace and name is a CSV created by a cluster-wide operator, otherwise return false +// isCSVAndClusterWide determines if a CSV belongs to a cluster‑wide operator +// +// The function inspects all operators in the test environment, checking whether +// any have a CSV matching the given namespace and name. If the CSV is found and +// its operator is marked as cluster wide or supports all namespaces, it returns +// true; otherwise it returns false. func isCSVAndClusterWide(aNamespace, name string, env *provider.TestEnvironment) bool { for _, op := range env.Operators { if op.Csv != nil && @@ -709,9 +841,12 @@ func isCSVAndClusterWide(aNamespace, name string, env *provider.TestEnvironment) return false } -// isInstallModeMultiNamespace checks if CSV install mode contains multi namespaces or all namespaces -// Return: -// - bool : true if CSV install mode contains multi namespaces or all namespaces, otherwise return false +// isInstallModeMultiNamespace determines whether a CSV install mode includes all namespaces +// +// The function iterates over the provided slice of install modes and returns +// true if any entry indicates an all‑namespace deployment. If none match, it +// returns false. This result is used to identify cluster‑wide operators in +// subsequent logic. func isInstallModeMultiNamespace(installModes []v1alpha1.InstallMode) bool { for i := 0; i < len(installModes); i++ { if installModes[i].Type == v1alpha1.InstallModeTypeAllNamespaces { @@ -721,11 +856,13 @@ func isInstallModeMultiNamespace(installModes []v1alpha1.InstallMode) bool { return false } -// ownedByClusterWideOperator checks if one of the passed topOwners is a CSV that is installed by a cluster-wide operator. -// Return: -// - bool: true if one of the passed topOwners is a CSV that is installed by a cluster-wide operator, otherwise return false -// - name string : the name of the matching object, if found. -// - aNamespace string : the namespace of the matching object, if found. +// ownedByClusterWideOperator Determines if any top owner is a cluster‑wide CSV +// +// The function examines each top owner of an object, checking whether the owner +// is a ClusterServiceVersion that is installed by a cluster‑wide operator. It +// returns the namespace and name of the matching CSV along with a boolean flag +// indicating a match. If no such owner exists, empty strings and false are +// returned. func ownedByClusterWideOperator(topOwners map[string]podhelper.TopOwner, env *provider.TestEnvironment) (aNamespace, name string, found bool) { for _, owner := range topOwners { if isCSVAndClusterWide(owner.Namespace, owner.Name, env) { @@ -735,8 +872,14 @@ func ownedByClusterWideOperator(topOwners map[string]podhelper.TopOwner, env *pr return "", "", false } -// testAutomountServiceToken checks if each pod uses the default service account name and if the token is explicitly set in the Pod's spec or if it is inherited from the associated ServiceAccount. -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// testAutomountServiceToken Verifies pod service account usage and automount token settings +// +// The function iterates over all pods in the test environment, checking whether +// a pod uses the default service account name and evaluating its +// automountServiceAccountToken configuration via an external helper. It records +// non-compliant objects when defaults are used or tokens are set to true, and +// compliant objects otherwise. Results are stored in the check’s report for +// later reporting. func testAutomountServiceToken(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -763,8 +906,14 @@ func testAutomountServiceToken(check *checksdb.Check, env *provider.TestEnvironm check.SetResult(compliantObjects, nonCompliantObjects) } -// testOneProcessPerContainer is a function that checks if each container(except Istio proxy containers) has only one process running. -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// testOneProcessPerContainer verifies that each container runs only a single process +// +// The function iterates over all containers in the test environment, excluding +// Istio proxy sidecars. For each container it obtains the main PID via the +// probe pod and counts processes in its namespace. Containers with more than +// one process are flagged as non‑compliant; otherwise they are marked +// compliant. The results are aggregated into report objects and set on the +// check. func testOneProcessPerContainer(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -807,8 +956,13 @@ func testOneProcessPerContainer(check *checksdb.Check, env *provider.TestEnviron check.SetResult(compliantObjects, nonCompliantObjects) } -// testSYSNiceRealtimeCapability is a function that checks if each container running on a realtime kernel enabled node has the SYS_NICE capability. -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// testSYSNiceRealtimeCapability Verifies SYS_NICE capability on containers in realtime kernel nodes +// +// The function iterates over all test containers, checks if their node uses a +// realtime kernel, and then determines whether the container has the SYS_NICE +// capability set. Containers running on non‑realtime nodes are automatically +// considered compliant. Results are collected into compliant and +// non‑compliant lists that are reported back to the test framework. func testSYSNiceRealtimeCapability(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -835,8 +989,14 @@ func testSYSNiceRealtimeCapability(check *checksdb.Check, env *provider.TestEnvi check.SetResult(compliantObjects, nonCompliantObjects) } -// testSysPtraceCapability is a function that checks if each pod has process namespace sharing enabled and at least one container allowing the SYS_PTRACE capability. -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// testSysPtraceCapability verifies pods with shared process namespaces contain a container granting SYS_PTRACE +// +// The function iterates over all pods that enable shared process namespaces, +// examining each container’s security context for the SYS_PTRACE capability. +// If at least one container has this capability it records the pod as +// compliant; otherwise it logs an error and marks it non‑compliant. Finally, +// it sets the check result with lists of compliant and non‑compliant report +// objects. func testSysPtraceCapability(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -866,8 +1026,14 @@ func testSysPtraceCapability(check *checksdb.Check, env *provider.TestEnvironmen check.SetResult(compliantObjects, nonCompliantObjects) } -// testNamespaceResourceQuota is a function that checks if each pod is running in a namespace that has a ResourceQuota applied. -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// testNamespaceResourceQuota evaluates pod placement against namespace resource quotas +// +// The function iterates through all pods in the test environment, checking +// whether each pod's namespace has an associated ResourceQuota object. If a +// matching quota is found, the pod is marked compliant; otherwise it is +// recorded as non‑compliant and logged with an error message. After +// processing all pods, the compliance results are stored via the check’s +// SetResult method. func testNamespaceResourceQuota(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -903,8 +1069,14 @@ const ( sshServicePortProtocol = "TCP" ) -// testNoSSHDaemonsAllowed is a function that checks if each pod is running an SSH daemon. -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// testNoSSHDaemonsAllowed Verifies pods do not run SSH daemons +// +// The function iterates over each pod in the test environment, attempting to +// locate an SSH listening port within its first container. If a port is found, +// it checks whether that port is actively listening; presence indicates a +// running SSH daemon and marks the pod non‑compliant. Pods without an SSH +// port or with no active listener are considered compliant. Results are +// aggregated into report objects and set as the check outcome. func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -955,8 +1127,14 @@ func testNoSSHDaemonsAllowed(check *checksdb.Check, env *provider.TestEnvironmen check.SetResult(compliantObjects, nonCompliantObjects) } -// testPodRequests is a function that checks if each container has resource requests. -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// testPodRequests Verifies that every container has defined CPU and memory requests +// +// The function iterates over all containers in the test environment, checking +// whether each one specifies resource requests using a helper routine. +// Containers lacking any request or with zero CPU or memory values are logged +// as errors and collected into a non‑compliant list; those that pass are +// recorded as compliant. Finally, it records both lists as the result of the +// compliance check. func testPodRequests(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -976,8 +1154,12 @@ func testPodRequests(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } -// test1337UIDs is a function that checks if each pod is using securityContext RunAsUser 1337. -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// test1337UIDs Checks whether pods run with UID 1337 +// +// The function iterates over all pods in the test environment, logging each +// pod’s status. It records a non‑compliant report object for any pod whose +// securityContext RunAsUser is set to 1337 and a compliant one otherwise. +// Finally, it sets the check result using these lists. func test1337UIDs(check *checksdb.Check, env *provider.TestEnvironment) { // Note this test is only ran as part of the 'extended' test suite. var compliantObjects []*testhelper.ReportObject @@ -997,9 +1179,14 @@ func test1337UIDs(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } -// testContainerSCC categorizes the containers under test into several categories of increasing privileges based on their SCC. -// Containers not compliant with the least privileged category fail this test. -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// testContainerSCC Determines container privilege levels based on SCC analysis +// +// The function iterates over all pods in the test environment, applying a +// security context check to each pod's containers. Containers are classified +// into categories, with only those in the lowest privileged category considered +// compliant. It records both compliant and non‑compliant containers, tracks +// the highest privilege level found, and reports an overall CNF category before +// setting the test result. func testContainerSCC(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -1031,8 +1218,12 @@ func testContainerSCC(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } -// testNodePort is a function that checks for each service type if it is nodePort. -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// testNodePort Checks services for disallowed nodePort usage +// +// The function iterates over all services in the test environment, logging each +// one. If a service is of type NodePort, it records a non‑compliant report +// object and logs an error; otherwise it records a compliant report object. +// Finally, it sets the check result with the two lists. func testNodePort(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -1056,8 +1247,14 @@ func testNodePort(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } -// testCrdRoles is a function that checks for each role applies only to CRDs under test. -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// testCrdRoles Evaluates whether role rules target only CRDs under test +// +// The function iterates over all roles in the environment, filtering by +// namespace, then extracts each role's API rules. It compares these rules +// against the list of CRD resources under test to separate matching and +// non‑matching rules. For each rule it records a report object indicating +// compliance, and if any role contains mixed rule types it logs an error and +// marks the role as non‑compliant. func testCrdRoles(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject diff --git a/tests/certification/suite.go b/tests/certification/suite.go index ee3140e0c..435443eb7 100644 --- a/tests/certification/suite.go +++ b/tests/certification/suite.go @@ -73,6 +73,14 @@ var ( } ) +// LoadChecks Loads the suite of certification checks +// +// This function registers several checks for Helm, operators, Helm charts, and +// container certifications by creating a group in the checks database. It +// attaches skip functions that prevent tests from running when necessary data +// is missing and assigns each check a callback to perform its validation logic. +// The function logs its activity at debug level and relies on global +// environment and validator objects for execution. func LoadChecks() { log.Debug("Loading %s suite checks", common.AffiliatedCertTestKey) @@ -108,6 +116,12 @@ func LoadChecks() { })) } +// getContainersToQuery Creates a set of container image identifiers for querying +// +// The function iterates over the containers defined in the test environment, +// adding each container's image identifier to a map with a true value. This map +// represents the collection of images that should be queried during testing. It +// returns the constructed map. func getContainersToQuery(env *provider.TestEnvironment) map[provider.ContainerImageIdentifier]bool { containersToQuery := make(map[provider.ContainerImageIdentifier]bool) for _, cut := range env.Containers { @@ -116,10 +130,24 @@ func getContainersToQuery(env *provider.TestEnvironment) map[provider.ContainerI return containersToQuery } +// testContainerCertification Checks if a container image is certified in the database +// +// This function accepts an image identifier and a validator, delegating to the +// validator's method to determine certification status. It returns true when +// the image's registry, repository, tag, and digest match a certified record, +// otherwise false. The result informs test logic that verifies container +// compliance. func testContainerCertification(c provider.ContainerImageIdentifier, validator certdb.CertificationStatusValidator) bool { return validator.IsContainerCertified(c.Registry, c.Repository, c.Tag, c.Digest) } +// testAllOperatorCertified Verifies that all operators are certified for the current OpenShift version +// +// The function iterates over every operator listed in the test environment, +// determining whether each is certified for the detected OpenShift minor +// release. It logs successes or failures and builds separate lists of compliant +// and non‑compliant report objects. Finally, it records these results in the +// check’s outcome. func testAllOperatorCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) { operatorsUnderTest := env.Operators var compliantObjects []*testhelper.ReportObject @@ -151,6 +179,13 @@ func testAllOperatorCertified(check *checksdb.Check, env *provider.TestEnvironme check.SetResult(compliantObjects, nonCompliantObjects) } +// testHelmCertified Verifies each Helm chart release against certification rules +// +// The function iterates over all Helm chart releases in the test environment, +// logging status for each. It uses a validator to determine if a chart is +// certified for the current Kubernetes version and records compliant or +// non‑compliant reports accordingly. Finally, it sets the overall test result +// with lists of both compliant and non‑compliant objects. func testHelmCertified(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) { helmchartsReleases := env.HelmChartReleases @@ -175,6 +210,14 @@ func testHelmCertified(check *checksdb.Check, env *provider.TestEnvironment, val check.SetResult(compliantObjects, nonCompliantObjects) } +// testContainerCertificationStatusByDigest Validates container digests against certification database +// +// The function iterates over containers in the test environment, checking that +// each has a digest and that the digest exists in the certification database. +// Containers missing a digest or with an unregistered digest are logged as +// errors and added to non‑compliant results; compliant ones are recorded +// accordingly. Finally, it sets the check result with lists of compliant and +// non‑compliant containers. func testContainerCertificationStatusByDigest(check *checksdb.Check, env *provider.TestEnvironment, validator certdb.CertificationStatusValidator) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -206,6 +249,13 @@ func testContainerCertificationStatusByDigest(check *checksdb.Check, env *provid check.SetResult(compliantObjects, nonCompliantObjects) } +// testHelmVersion Checks Helm release version compatibility +// +// This routine inspects the cluster for a Tiller pod to determine whether Helm +// v2 or v3 is in use. If no Tiller pod exists, it records all installed charts +// as compliant with Helm v3 and logs that v3 is being used. When a Tiller pod +// is found, it flags each such pod as non‑compliant because the required +// version is v3, then sets the test result accordingly. func testHelmVersion(check *checksdb.Check) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject diff --git a/tests/common/rbac/automount.go b/tests/common/rbac/automount.go index b3760ce1c..c54adcfc1 100644 --- a/tests/common/rbac/automount.go +++ b/tests/common/rbac/automount.go @@ -23,11 +23,14 @@ import ( corev1typed "k8s.io/client-go/kubernetes/typed/core/v1" ) -// EvaluateAutomountTokens evaluates whether the automountServiceAccountToken is correctly configured for the given Pod. -// Checks if the token is explicitly set in the Pod's spec or if it is inherited from the associated ServiceAccount. -// Returns: -// - bool: Indicates whether the Pod passed all checks. if yes- return true, otherwise return false. -// - string: Error message if the Pod is misconfigured, otherwise an empty string. +// EvaluateAutomountTokens Checks if a Pod’s automount service account token is correctly disabled +// +// The function inspects the Pod specification for an explicit +// automountServiceAccountToken setting, returning failure if it is true. If +// unset, it retrieves the associated ServiceAccount’s setting; a false value +// or absence of the field indicates compliance, while true triggers failure +// with a descriptive message. The result consists of a boolean success flag and +// an error string explaining any misconfiguration. // //nolint:gocritic func EvaluateAutomountTokens(client corev1typed.CoreV1Interface, put *provider.Pod) (bool, string) { diff --git a/tests/common/rbac/roles.go b/tests/common/rbac/roles.go index 6ffa244d5..434ac388c 100644 --- a/tests/common/rbac/roles.go +++ b/tests/common/rbac/roles.go @@ -23,22 +23,46 @@ import ( apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) +// RoleRule Represents a single permission within a role +// +// This structure pairs an API resource, identified by its group and name, with +// a verb that defines the action allowed on that resource. It is used +// throughout the package to flatten complex Role objects into individual rules +// for easier comparison and filtering. Each instance encapsulates one specific +// permission granted by a Kubernetes RBAC Role. type RoleRule struct { Resource RoleResource Verb string } + +// RoleResource Represents an RBAC resource with its API group and kind +// +// This structure holds the API group and the resource name used in Role or +// ClusterRole rules. It allows code to identify which Kubernetes resource a +// rule applies to, such as "apps" for deployments or "core" for pods. The +// fields are simple strings that can be populated from YAML manifests or +// constructed programmatically. type RoleResource struct { Group, Name string } +// CrdResource Represents a custom resource definition's identity within RBAC +// +// This struct holds the group and names of a CRD, including singular, plural, +// and short forms. It is used to match resources against role rules when +// determining permissions for custom resources. type CrdResource struct { Group, SingularName, PluralName string ShortNames []string } -// GetCrdResources converts a list of apiextv1.CustomResourceDefinition structs into a list of list of CrdResource structs. -// Returns: -// - []CrdResource : a slice of CrdResource objects. +// GetCrdResources Converts CRD definitions into a slice of resource descriptors +// +// This function iterates over each CustomResourceDefinition provided, +// extracting the group, singular name, plural name, and short names from its +// specification. For every CRD it creates a CrdResource struct populated with +// these fields and appends it to a list. The resulting slice is returned for +// use in permission checks or reporting. func GetCrdResources(crds []*apiextv1.CustomResourceDefinition) (resourceList []CrdResource) { for _, crd := range crds { var aResource CrdResource @@ -51,9 +75,12 @@ func GetCrdResources(crds []*apiextv1.CustomResourceDefinition) (resourceList [] return resourceList } -// GetAllRules retrieves a list all of rules defined by the role passed in input. -// Returns: -// - []RoleRule : a slice of RoleRule objects. +// GetAllRules Collects every rule from a role into individual entries +// +// The function iterates over each rule in the supplied role, expanding its API +// groups, resources, and verbs into separate RoleRule objects. Each combination +// of group, resource name, and verb is appended to a slice, which is returned. +// The resulting list can be used for detailed policy analysis or filtering. func GetAllRules(aRole *rbacv1.Role) (ruleList []RoleRule) { for _, aRule := range aRole.Rules { for _, aGroup := range aRule.APIGroups { @@ -71,9 +98,12 @@ func GetAllRules(aRole *rbacv1.Role) (ruleList []RoleRule) { return ruleList } -// isResourceInRoleRule Checks if a CRD resource is matched by a rule by comparing its group and plural name. -// Returns: -// - bool : if a CrdResource matches a RoleRule based on their properties return true , otherwise return false. +// isResourceInRoleRule Determines if a CRD matches a role rule by group and resource name +// +// The function receives a custom resource definition and a role rule, extracts +// the base resource name from the rule by removing any subresource part, and +// then compares the API group and plural name of the CRD to those of the rule. +// If both match exactly, it returns true; otherwise it returns false. func isResourceInRoleRule(crd CrdResource, roleRule RoleRule) bool { // remove subresources to keep only resource (plural) name ruleResourcePluralName := strings.Split(roleRule.Resource.Name, "/")[0] @@ -81,10 +111,13 @@ func isResourceInRoleRule(crd CrdResource, roleRule RoleRule) bool { return crd.Group == roleRule.Resource.Group && crd.PluralName == ruleResourcePluralName } -// FilterRulesNonMatchingResources filters RoleRules based on whether they match any CrdResource in the resourceList. -// Returns : -// - Matching: a slice of RoleRule that contains all rules where a CrdResource matches a RoleRule based on their properties. -// - NonMatching: a slice of RoleRule that contains all rules not matching the CRD resource. +// FilterRulesNonMatchingResources Separates role rules into those that match CRD resources +// +// This routine examines each rule against a list of CRD resources, collecting +// any rule whose resource group and plural name align with a CRD. Rules that do +// not find a match are returned separately by computing the difference from the +// original list. The output consists of two slices: one for matching rules and +// one for non‑matching ones. func FilterRulesNonMatchingResources(ruleList []RoleRule, resourceList []CrdResource) (matching, nonMatching []RoleRule) { for _, aRule := range ruleList { for _, aResource := range resourceList { @@ -97,9 +130,14 @@ func FilterRulesNonMatchingResources(ruleList []RoleRule, resourceList []CrdReso return matching, nonMatching } -// SliceDifference checks if there is a difference between s1 and s2 RoleRule slices. -// Returns : -// - []RoleRule : the elements that are exist in s1 but not in s2. +// SliceDifference identifies RoleRule entries present in one slice but absent from another +// +// The function takes two slices of RoleRule values and returns a new slice +// containing elements that exist in the first slice but not in the second. It +// swaps the slices if the second is longer to reduce comparisons, then iterates +// through each element of the larger slice, checking for equality against all +// elements of the other slice. Matching items are omitted; non‑matching ones +// are appended to the result, which is returned. func SliceDifference(s1, s2 []RoleRule) (diff []RoleRule) { var temp []RoleRule if len(s2) > len(s1) { diff --git a/tests/identifiers/identifiers.go b/tests/identifiers/identifiers.go index 1624f4163..45e9e843e 100644 --- a/tests/identifiers/identifiers.go +++ b/tests/identifiers/identifiers.go @@ -52,10 +52,23 @@ const ( NotApplicableSNO = ` Not applicable to SNO applications.` ) +// init initializes the test catalog +// +// When the identifiers package is imported this function runs automatically and +// calls InitCatalog to populate the global catalog of test cases. It ensures +// all test entries are registered before any tests execute. func init() { InitCatalog() } +// AddCatalogEntry Creates a test case entry in the catalog +// +// This function registers a new test by building a descriptive record from its +// ID, suite name, description, remediation, and other metadata. It applies +// defaults for missing exception or reference strings, ensures at least one tag +// is present, then calls the claim builder to generate a structured test case +// description. The resulting identifier and description are stored in global +// maps for later retrieval during test execution. func AddCatalogEntry(testID, suiteName, description, remediation, exception, reference string, qe bool, categoryclassification map[string]string, tags ...string) (aID claim.Identifier) { // Default Values (if missing) if strings.TrimSpace(exception) == "" { @@ -183,6 +196,15 @@ var ( // TestPodDeleteIdentifier claim.Identifier ) +// InitCatalog Initializes the test case catalog with predefined identifiers +// +// This routine registers a series of test case descriptions into the global +// Catalog map by calling AddCatalogEntry for each known identifier. Each call +// supplies metadata such as test ID, suite key, description, remediation logic, +// exception handling, reference link, query‑enabled flag, classification +// tags, and category classification. The function returns the populated catalog +// mapping identifiers to their corresponding TestCaseDescription objects. +// //nolint:funlen func InitCatalog() map[claim.Identifier]claim.TestCaseDescription { TestNetworkPolicyDenyAllIdentifier = AddCatalogEntry( @@ -1831,8 +1853,12 @@ var ( TestIDToClaimID = map[string]claim.Identifier{} ) -// GetTestIDAndLabels transform the claim.Identifier into a test Id that can be used to skip -// specific tests +// GetTestIDAndLabels Transforms a claim identifier into a test ID and associated labels +// +// The function splits the Tags field of a claim.Identifier by commas to create +// label slices, then appends the identifier's Id and Suite values to that list. +// It stores the full identifier in a global map keyed by Id for later lookup, +// and returns the Id as the test ID along with the constructed label slice. func GetTestIDAndLabels(identifier claim.Identifier) (testID string, tags []string) { tags = strings.Split(identifier.Tags, ",") tags = append(tags, identifier.Id, identifier.Suite) diff --git a/tests/lifecycle/ownerreference/ownerreference.go b/tests/lifecycle/ownerreference/ownerreference.go index f9d7ec78d..88de6a2fc 100644 --- a/tests/lifecycle/ownerreference/ownerreference.go +++ b/tests/lifecycle/ownerreference/ownerreference.go @@ -29,11 +29,26 @@ const ( replicaSet = "ReplicaSet" ) +// OwnerReference Tracks a pod's ownership status +// +// This structure stores a reference to a pod and an integer indicating the test +// outcome. The RunTest method examines each owner reference of the pod, logging +// information or errors based on whether the kind matches expected values such +// as StatefulSet or ReplicaSet. If any mismatches are found, it records a +// failure; otherwise, it marks success. GetResults simply returns the stored +// result value. type OwnerReference struct { put *corev1.Pod result int } +// NewOwnerReference Creates a new owner reference checker for a Pod +// +// The function accepts a pointer to a Pod object and constructs an +// OwnerReference instance configured to evaluate the pod's owner references. It +// sets the initial result status to an error state, indicating that validation +// has not yet succeeded. The constructed instance is returned as a pointer so +// it can be used for further testing or result retrieval. func NewOwnerReference(put *corev1.Pod) *OwnerReference { o := OwnerReference{ put: put, @@ -42,8 +57,12 @@ func NewOwnerReference(put *corev1.Pod) *OwnerReference { return &o } -// func (o *OwnerReference) run the tests and store results in -// o.result +// OwnerReference.RunTest verifies a pod’s owner references are either stateful set or replica set +// +// The method iterates over all owner references attached to the pod. For each +// reference it logs the kind and marks the test as successful if the kind +// matches one of the expected types; otherwise it logs an error, records +// failure, and stops further checks. func (o *OwnerReference) RunTest(logger *log.Logger) { for _, k := range o.put.OwnerReferences { if k.Kind == statefulSet || k.Kind == replicaSet { @@ -57,7 +76,11 @@ func (o *OwnerReference) RunTest(logger *log.Logger) { } } -// GetResults return result of the OwnerReference type +// OwnerReference.GetResults retrieves the stored result value +// +// The method returns the integer stored in the OwnerReference instance’s +// result field. It takes no arguments and simply accesses the private field to +// provide its current value. func (o *OwnerReference) GetResults() int { return o.result } diff --git a/tests/lifecycle/podrecreation/podrecreation.go b/tests/lifecycle/podrecreation/podrecreation.go index 52abacb25..05574d2b6 100644 --- a/tests/lifecycle/podrecreation/podrecreation.go +++ b/tests/lifecycle/podrecreation/podrecreation.go @@ -45,6 +45,13 @@ const ( NoDelete = "noDelete" ) +// CordonHelper Executes a cordon or uncordon operation on a node +// +// The function retrieves the Kubernetes client holder, logs the requested +// action, and attempts to update the node’s unschedulable status using a +// retry loop that handles conflicts. It accepts a node name and an operation +// string, applies the appropriate flag, and returns any error encountered +// during retrieval or update. func CordonHelper(name, operation string) error { clients := clientsholder.GetClientsHolder() @@ -73,6 +80,15 @@ func CordonHelper(name, operation string) error { return retryErr } +// CountPodsWithDelete Counts pods scheduled on a node and optionally deletes them +// +// The function iterates over all provided pods, selecting those belonging to +// deployments or statefulsets that are running on the specified node and not +// managed by a DaemonSet. It increments a counter for each qualifying pod and, +// if deletion is requested, initiates the delete operation in either foreground +// or background mode while synchronizing with a wait group. Errors during +// deletion are logged but do not abort the counting; the function returns the +// total count and any error encountered. func CountPodsWithDelete(pods []*provider.Pod, nodeName, mode string) (count int, err error) { count = 0 var wg sync.WaitGroup @@ -99,6 +115,11 @@ func CountPodsWithDelete(pods []*provider.Pod, nodeName, mode string) (count int return count, nil } +// skipDaemonPod identifies pods managed by a DaemonSet +// +// This function examines the owner references of a pod and returns true if any +// reference is of kind DaemonSet. Pods owned by a DaemonSet are skipped from +// deletion or recreation logic. Otherwise it returns false. func skipDaemonPod(pod *corev1.Pod) bool { for _, or := range pod.OwnerReferences { if or.Kind == DaemonSetString { @@ -108,6 +129,14 @@ func skipDaemonPod(pod *corev1.Pod) bool { return false } +// deletePod removes a pod and optionally waits for its deletion +// +// The function initiates the deletion of a specified pod using the Kubernetes +// client, applying the pod's configured termination grace period. It creates a +// watch on the pod to monitor its removal from the cluster; if the mode is not +// background, it launches a goroutine that blocks until the pod is confirmed +// deleted or a timeout occurs. Errors during watcher creation or deletion are +// returned for handling by the caller. func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error { clients := clientsholder.GetClientsHolder() log.Debug("deleting ns=%s pod=%s with %s mode", pod.Namespace, pod.Name, mode) @@ -140,6 +169,13 @@ func deletePod(pod *corev1.Pod, mode string, wg *sync.WaitGroup) error { return nil } +// CordonCleanup Restores a node to schedulable state after draining +// +// This routine attempts to uncordon the specified node by calling the helper +// function with an uncordon operation. If the uncordon fails, it aborts the +// current check, logging the error and providing diagnostic information. The +// function is used as a cleanup step in tests that temporarily cordon nodes +// during pod recreation scenarios. func CordonCleanup(node string, check *checksdb.Check) { err := CordonHelper(node, Uncordon) if err != nil { @@ -147,6 +183,12 @@ func CordonCleanup(node string, check *checksdb.Check) { } } +// waitPodDeleted waits for a pod to be deleted or times out +// +// The function monitors the provided watcher until it receives a deletion event +// for the specified pod, then stops the watch. If no deletion occurs within the +// timeout period, it logs a timeout message and exits. It does not return a +// value but signals completion by stopping the watcher. func waitPodDeleted(ns, podName string, timeout int64, watcher watch.Interface) { log.Debug("Entering waitPodDeleted ns=%s pod=%s", ns, podName) defer watcher.Stop() diff --git a/tests/lifecycle/podsets/podsets.go b/tests/lifecycle/podsets/podsets.go index b189574fe..033c0ad17 100644 --- a/tests/lifecycle/podsets/podsets.go +++ b/tests/lifecycle/podsets/podsets.go @@ -73,6 +73,13 @@ var WaitForScalingToComplete = func(ns, name string, timeout time.Duration, grou return false } +// WaitForStatefulSetReady waits until a StatefulSet reaches the ready state +// +// The function polls the Kubernetes API at one‑second intervals, retrieving +// the latest StatefulSet definition for the given namespace and name. It checks +// whether all replicas are available and the update is complete; if so it logs +// success and returns true. If the timeout expires before readiness, an error +// is logged and false is returned. func WaitForStatefulSetReady(ns, name string, timeout time.Duration, logger *log.Logger) bool { logger.Debug("Check if statefulset %s:%s is ready", ns, name) clients := clientsholder.GetClientsHolder() @@ -91,6 +98,12 @@ func WaitForStatefulSetReady(ns, name string, timeout time.Duration, logger *log return false } +// isDeploymentReady checks if a deployment has finished rolling out +// +// The function retrieves the current state of a deployment in a given namespace +// using Kubernetes clients, then determines readiness by examining its status +// conditions. It returns true when all replicas are updated and available, +// otherwise false, along with any error that occurred during retrieval. func isDeploymentReady(name, namespace string) (bool, error) { appsV1Api := clientsholder.GetClientsHolder().K8sClient.AppsV1() @@ -102,6 +115,12 @@ func isDeploymentReady(name, namespace string) (bool, error) { return dep.IsDeploymentReady(), nil } +// isStatefulSetReady determines if a StatefulSet is fully ready +// +// The function retrieves the current state of a specified StatefulSet using +// Kubernetes client APIs, then checks whether all its replicas are available. +// It returns true when the StatefulSet meets readiness criteria or an error if +// retrieval fails. func isStatefulSetReady(name, namespace string) (bool, error) { appsV1Api := clientsholder.GetClientsHolder().K8sClient.AppsV1() @@ -113,8 +132,12 @@ func isStatefulSetReady(name, namespace string) (bool, error) { return sts.IsStatefulSetReady(), nil } -// Helper function to get a slice of namespace:name strings from a slice of *provider.Deployments. -// E.g: [tnf:test tnf:hazelcast-platform-controller-manager] +// getDeploymentsInfo Collects deployment identifiers as namespace:name strings +// +// The function iterates over a slice of deployment pointers, formatting each +// deployment’s namespace and name into a string separated by a colon. It +// appends these formatted strings to a new slice, which is then returned. This +// helper is used for logging or reporting purposes during test execution. func getDeploymentsInfo(deployments []*provider.Deployment) []string { deps := []string{} for _, dep := range deployments { @@ -124,7 +147,13 @@ func getDeploymentsInfo(deployments []*provider.Deployment) []string { return deps } -// Helper function to get a slice of namespace: name strings from a slice of *provider.Statefulsets. +// getStatefulSetsInfo creates a list of namespace:name strings for each StatefulSet +// +// The function iterates over the supplied slice, formatting each element’s +// namespace and name into a single string separated by a colon. These formatted +// strings are collected in a new slice which is then returned. The resulting +// slice provides a concise representation of the StatefulSets for logging or +// reporting purposes. func getStatefulSetsInfo(statefulSets []*provider.StatefulSet) []string { stsInfo := []string{} for _, sts := range statefulSets { @@ -134,8 +163,13 @@ func getStatefulSetsInfo(statefulSets []*provider.StatefulSet) []string { return stsInfo } -// Helper function that checks the status of each deployment in the slice and returns -// a slice with the not-ready ones. +// getNotReadyDeployments identifies deployments that are not yet ready +// +// This helper inspects each deployment in the supplied slice, calling a +// readiness check for its name and namespace. Deployments reported as ready are +// omitted from the result; any errors during the check also cause the +// deployment to be considered not ready. The function returns a new slice +// containing only those deployments that failed the readiness test. func getNotReadyDeployments(deployments []*provider.Deployment) []*provider.Deployment { notReadyDeployments := []*provider.Deployment{} for _, dep := range deployments { @@ -157,8 +191,13 @@ func getNotReadyDeployments(deployments []*provider.Deployment) []*provider.Depl return notReadyDeployments } -// Helper function that checks the status of each statefulSet in the slice and returns -// a slice with the not-ready ones. +// getNotReadyStatefulSets filters stateful sets that are not ready +// +// The function iterates over a slice of stateful set objects, checking each +// one's readiness status via an external helper. If the check fails or +// indicates the set is not ready, it records the set in a new slice. The +// resulting slice contains only those stateful sets that are considered not +// ready, and this list is returned to the caller. func getNotReadyStatefulSets(statefulSets []*provider.StatefulSet) []*provider.StatefulSet { notReadyStatefulSets := []*provider.StatefulSet{} for _, sts := range statefulSets { @@ -180,6 +219,13 @@ func getNotReadyStatefulSets(statefulSets []*provider.StatefulSet) []*provider.S return notReadyStatefulSets } +// WaitForAllPodSetsReady waits until all deployments and stateful sets are ready or a timeout occurs +// +// The function polls the readiness status of every deployment and stateful set +// in the test environment at fixed intervals, logging each check. It stops +// early if all podsets become ready before the specified duration; otherwise it +// returns the remaining not‑ready objects after the timeout. The returned +// slices allow callers to report which resources failed to reach readiness. func WaitForAllPodSetsReady(env *provider.TestEnvironment, timeout time.Duration, logger *log.Logger) ( notReadyDeployments []*provider.Deployment, notReadyStatefulSets []*provider.StatefulSet) { @@ -217,6 +263,12 @@ func WaitForAllPodSetsReady(env *provider.TestEnvironment, timeout time.Duration return deploymentsToCheck, statefulSetsToCheck } +// GetAllNodesForAllPodSets Collects unique node names for pods owned by replicasets or statefulsets +// +// The function iterates over each pod and inspects its owner references. When +// it finds an owner of kind ReplicaSet or StatefulSet, the pod’s node name is +// added to a map that tracks distinct nodes. The resulting map contains one +// entry per node that hosts at least one such pod. func GetAllNodesForAllPodSets(pods []*provider.Pod) (nodes map[string]bool) { nodes = make(map[string]bool) for _, put := range pods { diff --git a/tests/lifecycle/scaling/crd_scaling.go b/tests/lifecycle/scaling/crd_scaling.go index 01b4abcd7..b41133224 100644 --- a/tests/lifecycle/scaling/crd_scaling.go +++ b/tests/lifecycle/scaling/crd_scaling.go @@ -36,6 +36,15 @@ import ( retry "k8s.io/client-go/util/retry" ) +// TestScaleCrd Tests scaling of a custom resource by temporarily adjusting its replica count +// +// The function receives a reference to a CR with desired replicas, a +// group‑resource schema, a timeout duration, and a logger. It retrieves +// Kubernetes clients, then either increments or decrements the replica count +// depending on whether the current value is one or more, calling an internal +// helper to apply the change and wait for completion. Success of both +// scale‑up and scale‑down operations results in true; any failure logs an +// error and returns false. func TestScaleCrd(crScale *provider.CrScale, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool { if crScale == nil { logger.Error("CR object is nill") @@ -76,6 +85,13 @@ func TestScaleCrd(crScale *provider.CrScale, groupResourceSchema schema.GroupRes return true } +// scaleCrHelper adjusts the replica count of a custom resource +// +// The function takes a scaling client, a group-resource descriptor, a CR +// object, desired replicas, direction flag, timeout, and logger. It updates the +// scaling specification for the CR, retries on conflict using exponential +// backoff, waits for scaling to finish, logs errors if any, and returns true +// when successful. func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, autoscalerpram *provider.CrScale, replicas int32, up bool, timeout time.Duration, logger *log.Logger) bool { if up { logger.Debug("Scale UP CRS to %d replicas", replicas) @@ -110,6 +126,14 @@ func scaleCrHelper(scalesGetter scale.ScalesGetter, rc schema.GroupResource, aut return true } +// TestScaleHPACrd Validates HPA scaling for a custom resource +// +// The function checks that an associated horizontal pod autoscaler can scale +// the target CR up and down within a timeout, restoring original limits +// afterward. It updates the HPA spec to match the CR’s desired replica count, +// waits for the CR to reach that state, then reverts to its original min/max +// settings. If any step fails it logs an error and returns false; otherwise +// true. func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscaler, groupResourceSchema schema.GroupResource, timeout time.Duration, logger *log.Logger) bool { if cr == nil { logger.Error("CR object is nill") @@ -162,6 +186,13 @@ func TestScaleHPACrd(cr *provider.CrScale, hpa *scalingv1.HorizontalPodAutoscale return scaleHpaCRDHelper(hpscaler, hpa.Name, name, namespace, min, hpa.Spec.MaxReplicas, timeout, groupResourceSchema, logger) } +// scaleHpaCRDHelper Attempts to scale an HPA by updating its replica bounds +// +// The function retrieves the specified HorizontalPodAutoscaler, sets new +// minimum and maximum replica counts, and updates it in a retry loop that +// handles conflicts. After a successful update, it waits for the associated +// custom resource to reach the desired state within a timeout period. It logs +// any errors encountered and returns true on success or false if scaling fails. func scaleHpaCRDHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, crName, namespace string, min, max int32, timeout time.Duration, groupResourceSchema schema.GroupResource, logger *log.Logger) bool { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { hpa, err := hpscaler.Get(context.TODO(), hpaName, metav1.GetOptions{}) diff --git a/tests/lifecycle/scaling/deployment_scaling.go b/tests/lifecycle/scaling/deployment_scaling.go index 92ae095be..74c9cb729 100644 --- a/tests/lifecycle/scaling/deployment_scaling.go +++ b/tests/lifecycle/scaling/deployment_scaling.go @@ -37,6 +37,14 @@ import ( hps "k8s.io/client-go/kubernetes/typed/autoscaling/v1" ) +// TestScaleDeployment Tests scaling behavior of a Deployment without HPA +// +// The function obtains Kubernetes clients, determines the current replica count +// or defaults to one, then performs a scale-up followed by a scale-down if the +// deployment has fewer than two replicas; otherwise it scales down first and +// then up. Each scaling operation is executed through a helper that retries on +// conflicts and waits for pods to become ready. It logs success or failure and +// returns true only when both scaling steps complete successfully. func TestScaleDeployment(deployment *appsv1.Deployment, timeout time.Duration, logger *log.Logger) bool { clients := clientsholder.GetClientsHolder() logger.Info("Deployment not using HPA: %s:%s", deployment.Namespace, deployment.Name) @@ -76,6 +84,14 @@ func TestScaleDeployment(deployment *appsv1.Deployment, timeout time.Duration, l return true } +// scaleDeploymentHelper Adjusts a Deployment's replica count with conflict handling +// +// This routine logs the scaling action, retrieves the current Deployment +// object, updates its desired replica count, and applies the change using a +// retry loop to handle conflicts. After a successful update it waits for all +// pods in the set to become ready within a specified timeout, reporting any +// errors through logging. The function returns true if the scaling succeeds and +// false otherwise. func scaleDeploymentHelper(client typedappsv1.AppsV1Interface, deployment *appsv1.Deployment, replicas int32, timeout time.Duration, up bool, logger *log.Logger) bool { if up { logger.Info("Scale UP deployment to %d replicas", replicas) @@ -110,6 +126,16 @@ func scaleDeploymentHelper(client typedappsv1.AppsV1Interface, deployment *appsv return true } +// TestScaleHpaDeployment Verifies that an HPA can scale a deployment up and down correctly +// +// The function retrieves the Kubernetes client and determines the current +// replica count of the deployment, as well as the min and max values from the +// HPA specification. It then performs a sequence of scaling operations: if +// replicas are low it scales up to the minimum, restores to the original, or if +// high it scales down to one replica before restoring. After each adjustment it +// calls a helper that updates the HPA and waits for the deployment to become +// ready. If any step fails, false is returned; otherwise true indicates +// successful round‑trip scaling. func TestScaleHpaDeployment(deployment *provider.Deployment, hpa *v1autoscaling.HorizontalPodAutoscaler, timeout time.Duration, logger *log.Logger) bool { clients := clientsholder.GetClientsHolder() hpscaler := clients.K8sClient.AutoscalingV1().HorizontalPodAutoscalers(deployment.Namespace) @@ -160,6 +186,13 @@ func TestScaleHpaDeployment(deployment *provider.Deployment, hpa *v1autoscaling. return scaleHpaDeploymentHelper(hpscaler, hpa.Name, deployment.Name, deployment.Namespace, min, max, timeout, logger) } +// scaleHpaDeploymentHelper Adjusts the minimum and maximum replica counts for a horizontal pod autoscaler and waits for the deployment to stabilize +// +// The helper updates an HPA's MinReplicas and MaxReplicas fields using retry +// logic to handle conflicts, then triggers a wait until the associated +// deployment is ready or times out. It logs any errors encountered during get, +// update, or readiness checks and returns true only when all operations +// succeed. func scaleHpaDeploymentHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, deploymentName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { hpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{}) diff --git a/tests/lifecycle/scaling/scaling_helper.go b/tests/lifecycle/scaling/scaling_helper.go index 5a404b04c..a8a1601b6 100644 --- a/tests/lifecycle/scaling/scaling_helper.go +++ b/tests/lifecycle/scaling/scaling_helper.go @@ -10,6 +10,12 @@ import ( scalingv1 "k8s.io/api/autoscaling/v1" ) +// GetResourceHPA Finds an HPA matching a resource name, namespace, and kind +// +// The function iterates over a list of HorizontalPodAutoscaler objects, +// checking each one's scale target reference for the specified kind, name, and +// namespace. If a match is found, that HPA is returned; otherwise the function +// returns nil to indicate no suitable HPA exists. func GetResourceHPA(hpaList []*scalingv1.HorizontalPodAutoscaler, name, namespace, kind string) *scalingv1.HorizontalPodAutoscaler { for _, hpa := range hpaList { if hpa.Spec.ScaleTargetRef.Kind == kind && hpa.Spec.ScaleTargetRef.Name == name && hpa.Namespace == namespace { @@ -18,6 +24,13 @@ func GetResourceHPA(hpaList []*scalingv1.HorizontalPodAutoscaler, name, namespac } return nil } + +// IsManaged Checks if a deployment or stateful set is listed as managed +// +// The function iterates over the provided slice of managed pod sets, comparing +// each name with the supplied pod set name. If a match is found it returns +// true, indicating that the object should be considered under management for +// scaling tests. Otherwise, it returns false. func IsManaged(podSetName string, managedPodSet []configuration.ManagedDeploymentsStatefulsets) bool { for _, ps := range managedPodSet { if ps.Name == podSetName { @@ -27,6 +40,13 @@ func IsManaged(podSetName string, managedPodSet []configuration.ManagedDeploymen return false } +// CheckOwnerReference Determines if owner references match scalable CRD filters +// +// The function iterates over each OwnerReference of a resource, comparing its +// kind to the kinds defined in available CustomResourceDefinitions. For +// matching kinds it checks whether the CRD name ends with any configured +// suffix; if so, it returns the corresponding scalability flag from that +// filter. If no match is found, it returns false. func CheckOwnerReference(ownerReference []apiv1.OwnerReference, crdFilter []configuration.CrdFilter, crds []*apiextv1.CustomResourceDefinition) bool { for _, owner := range ownerReference { for _, aCrd := range crds { diff --git a/tests/lifecycle/scaling/statefulset_scaling.go b/tests/lifecycle/scaling/statefulset_scaling.go index 8bc2cd44e..73798216f 100644 --- a/tests/lifecycle/scaling/statefulset_scaling.go +++ b/tests/lifecycle/scaling/statefulset_scaling.go @@ -36,6 +36,13 @@ import ( hps "k8s.io/client-go/kubernetes/typed/autoscaling/v1" ) +// TestScaleStatefulSet Tests scaling of a StatefulSet by adjusting replicas +// +// The function retrieves Kubernetes clients, determines the current replica +// count, and then performs an up‑then‑down or down‑then‑up scale +// sequence using a helper that retries on conflict. It logs each action and +// returns false if any scaling step fails. A true value indicates both scale +// operations succeeded within the given timeout. func TestScaleStatefulSet(statefulset *appsv1.StatefulSet, timeout time.Duration, logger *log.Logger) bool { clients := clientsholder.GetClientsHolder() name, namespace := statefulset.Name, statefulset.Namespace @@ -79,6 +86,12 @@ func TestScaleStatefulSet(statefulset *appsv1.StatefulSet, timeout time.Duration return true } +// scaleStatefulsetHelper updates a StatefulSet replica count and waits for readiness +// +// The helper retrieves the current StatefulSet, sets its desired replicas, and +// updates it using retry logic to handle conflicts. After each successful +// update it polls until the set reports ready or times out. It logs failures +// and returns a boolean indicating success. func scaleStatefulsetHelper(clients *clientsholder.ClientsHolder, ssClient v1.StatefulSetInterface, statefulset *appsv1.StatefulSet, replicas int32, timeout time.Duration, logger *log.Logger) bool { name := statefulset.Name namespace := statefulset.Namespace @@ -110,6 +123,14 @@ func scaleStatefulsetHelper(clients *clientsholder.ClientsHolder, ssClient v1.St return true } +// TestScaleHpaStatefulSet Verifies HPA scaling of a StatefulSet +// +// The function obtains Kubernetes clients, then adjusts the HPA’s min and max +// replica counts to test both up‑scaling and down‑scaling scenarios on the +// target StatefulSet. It calls a helper that updates the HPA, waits for the +// StatefulSet to become ready, and reports success or failure. Finally it +// restores the original HPA settings and returns whether all scaling steps +// succeeded. func TestScaleHpaStatefulSet(statefulset *appsv1.StatefulSet, hpa *v1autoscaling.HorizontalPodAutoscaler, timeout time.Duration, logger *log.Logger) bool { clients := clientsholder.GetClientsHolder() hpaName := hpa.Name @@ -161,6 +182,13 @@ func TestScaleHpaStatefulSet(statefulset *appsv1.StatefulSet, hpa *v1autoscaling return pass } +// scaleHpaStatefulSetHelper updates HPA replica limits and waits for StatefulSet readiness +// +// The function attempts to set the horizontal pod autoscaler's minimum and +// maximum replicas, retrying on conflicts until success or timeout. After each +// update it polls the target StatefulSet to confirm it reaches a ready state +// within the given duration, logging errors if not. It returns true when both +// the HPA update and readiness check succeed, otherwise false. func scaleHpaStatefulSetHelper(hpscaler hps.HorizontalPodAutoscalerInterface, hpaName, statefulsetName, namespace string, min, max int32, timeout time.Duration, logger *log.Logger) bool { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { hpa, err := hpscaler.Get(context.TODO(), hpaName, v1machinery.GetOptions{}) diff --git a/tests/lifecycle/suite.go b/tests/lifecycle/suite.go index 4f7bd3663..903049df5 100644 --- a/tests/lifecycle/suite.go +++ b/tests/lifecycle/suite.go @@ -63,6 +63,16 @@ var ( } ) +// LoadChecks Registers lifecycle test checks for the certsuite suite +// +// This routine initializes a checks group dedicated to lifecycle tests and +// attaches a series of individual checks such as container +// pre‑stop/post‑start probes, scaling tests, high availability validations, +// and storage provisioning rules. Each check is configured with skip functions +// that prevent execution when required resources are absent or the test +// environment does not meet prerequisites. The function logs its activity and +// relies on helper utilities to populate the checks database. +// //nolint:funlen func LoadChecks() { log.Debug("Loading %s suite checks", common.LifecycleTestKey) @@ -236,6 +246,12 @@ func LoadChecks() { })) } +// testContainersPreStop Verifies that containers declare a preStop lifecycle hook +// +// The routine iterates over all test environment containers, checking whether +// each has a defined preStop hook. Containers missing the hook are logged as +// errors and recorded in a non‑compliant list; those with the hook are noted +// as compliant. Finally, the check result aggregates both lists for reporting. func testContainersPreStop(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -253,6 +269,13 @@ func testContainersPreStop(check *checksdb.Check, env *provider.TestEnvironment) check.SetResult(compliantObjects, nonCompliantObjects) } +// testContainersPostStart Verifies that each container has a postStart lifecycle hook defined +// +// The function iterates over all containers in the test environment, logging +// information about each one. For containers missing a postStart hook it +// records a non‑compliant report object; otherwise it records a compliant +// report with a note on a known upstream bug. Finally, it sets the check result +// with the lists of compliant and non‑compliant objects. func testContainersPostStart(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -272,6 +295,13 @@ func testContainersPostStart(check *checksdb.Check, env *provider.TestEnvironmen check.SetResult(compliantObjects, nonCompliantObjects) } +// testContainersImagePolicy Verifies that all containers use IfNotPresent image pull policy +// +// The function iterates over each container in the test environment, logging +// its name and checking whether its ImagePullPolicy equals PullIfNotPresent. +// Containers not meeting this requirement are recorded as non‑compliant with +// an error log, while compliant ones are logged positively. Finally, it +// aggregates the results into two report lists and sets them on the check. func testContainersImagePolicy(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -288,6 +318,12 @@ func testContainersImagePolicy(check *checksdb.Check, env *provider.TestEnvironm check.SetResult(compliantObjects, nonCompliantObjects) } +// testContainersReadinessProbe Verifies that each container has a readiness probe +// +// The routine iterates over all containers in the test environment, logging +// whether each one defines a readiness probe. Containers lacking this +// configuration are recorded as non‑compliant, while those with a probe are +// marked compliant. Finally, the check results are set for reporting. func testContainersReadinessProbe(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -304,6 +340,13 @@ func testContainersReadinessProbe(check *checksdb.Check, env *provider.TestEnvir check.SetResult(compliantObjects, nonCompliantObjects) } +// testContainersLivenessProbe Verifies that every container has a liveness probe defined +// +// The function iterates over all containers in the test environment, logging +// whether each container includes a liveness probe. Containers lacking this +// probe are recorded as non‑compliant with an explanatory report object; +// those that have it are marked compliant. After processing all containers, the +// results are stored back into the check for reporting. func testContainersLivenessProbe(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -320,6 +363,13 @@ func testContainersLivenessProbe(check *checksdb.Check, env *provider.TestEnviro check.SetResult(compliantObjects, nonCompliantObjects) } +// testContainersStartupProbe verifies that each container has a StartupProbe configured +// +// The function walks through all containers in the test environment, logging +// information about each one. If a container lacks a StartupProbe it logs an +// error and records a non‑compliant report object; otherwise it logs success +// and records a compliant object. Finally, it sets the check result with the +// lists of compliant and non‑compliant objects. func testContainersStartupProbe(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -336,6 +386,13 @@ func testContainersStartupProbe(check *checksdb.Check, env *provider.TestEnviron check.SetResult(compliantObjects, nonCompliantObjects) } +// testPodsOwnerReference Verifies that each pod’s owner reference follows best‑practice rules +// +// The function iterates over all pods in the test environment, creating an +// OwnerReference object for each one. It runs a compliance check on the +// reference; non‑compliant pods are logged and recorded as failures, while +// compliant ones are noted as successes. Finally, it records the results with +// the test framework. func testPodsOwnerReference(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -354,6 +411,13 @@ func testPodsOwnerReference(check *checksdb.Check, env *provider.TestEnvironment check.SetResult(compliantObjects, nonCompliantObjects) } +// testPodNodeSelectorAndAffinityBestPractices Checks that pods do not use node selectors or affinity +// +// The routine iterates over a list of pods, logging each one. It flags any pod +// that specifies a node selector or node affinity as non‑compliant, creating +// report objects for those cases. Pods lacking both fields are marked compliant +// and reported accordingly. Finally, it records the results in the supplied +// check object. func testPodNodeSelectorAndAffinityBestPractices(testPods []*provider.Pod, check *checksdb.Check) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -379,6 +443,12 @@ func testPodNodeSelectorAndAffinityBestPractices(testPods []*provider.Pod, check check.SetResult(compliantObjects, nonCompliantObjects) } +// nameInDeploymentSkipList Checks if a deployment is excluded from scaling tests +// +// The function iterates through a slice of configuration entries that specify +// deployments to skip, comparing the supplied name and namespace with each +// entry. If an exact match is found, it returns true indicating the deployment +// should be omitted from further testing. Otherwise, it returns false. func nameInDeploymentSkipList(name, namespace string, list []configuration.SkipScalingTestDeploymentsInfo) bool { for _, l := range list { if name == l.Name && namespace == l.Namespace { @@ -388,6 +458,12 @@ func nameInDeploymentSkipList(name, namespace string, list []configuration.SkipS return false } +// nameInStatefulSetSkipList checks if a StatefulSet should be excluded from scaling tests +// +// The function iterates over a slice of configuration entries, each containing +// a name and namespace pair. If the provided StatefulSet matches any entry in +// the list, it returns true indicating that the test should skip this object; +// otherwise it returns false. func nameInStatefulSetSkipList(name, namespace string, list []configuration.SkipScalingTestStatefulSetsInfo) bool { for _, l := range list { if name == l.Name && namespace == l.Namespace { @@ -397,6 +473,15 @@ func nameInStatefulSetSkipList(name, namespace string, list []configuration.Skip return false } +// testDeploymentScaling Verifies deployment scalability via HPA or direct scaling +// +// It iterates through all deployments in the test environment, skipping those +// managed by CRDs or listed in a configuration skip list. For each remaining +// deployment, it checks if an associated horizontal pod autoscaler exists; if +// so, it runs the HPA scaling test, otherwise it performs a direct scale test. +// Results are logged and collected into compliant or non‑compliant report +// objects for later reporting. +// //nolint:dupl func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) { defer env.SetNeedsRefresh() @@ -446,6 +531,13 @@ func testDeploymentScaling(env *provider.TestEnvironment, timeout time.Duration, check.SetResult(compliantObjects, nonCompliantObjects) } +// testScaleCrd Evaluates scaling behavior of custom resources +// +// This function iterates over all custom resources scheduled for testing, +// checks if an HPA exists for each, and runs the appropriate scaling test. It +// records compliant or non‑compliant results in report objects and logs +// errors when scaling fails. Finally, it stores the outcome in the check +// result. func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) { defer env.SetNeedsRefresh() var compliantObjects []*testhelper.ReportObject @@ -471,6 +563,15 @@ func testScaleCrd(env *provider.TestEnvironment, timeout time.Duration, check *c check.SetResult(compliantObjects, nonCompliantObjects) } +// testStatefulSetScaling Verifies scaling behavior of StatefulSets +// +// This routine iterates over all StatefulSet resources in the test environment, +// skipping those that are managed by CRDs or configured to be excluded from +// scaling tests. For each remaining set it checks whether an HPA controls it; +// if so, it runs a dedicated HPA scaling test, otherwise it scales the +// StatefulSet directly. Results of compliant and non‑compliant objects are +// collected and reported back to the check framework. +// //nolint:dupl func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration, check *checksdb.Check) { defer env.SetNeedsRefresh() @@ -520,7 +621,14 @@ func testStatefulSetScaling(env *provider.TestEnvironment, timeout time.Duration check.SetResult(compliantObjects, nonCompliantObjects) } -// testHighAvailability +// testHighAvailability Verifies high availability settings for deployments and statefulsets +// +// The function iterates over all deployments and statefulsets in the test +// environment, checking that each has more than one replica and defines pod +// anti‑affinity rules unless an "AffinityRequired" label is present. It logs +// informative messages for compliant objects and error messages for +// non‑compliant ones, creating report entries accordingly. Finally it sets +// the check result with lists of compliant and non‑compliant objects. func testHighAvailability(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -574,7 +682,14 @@ func testHighAvailability(check *checksdb.Check, env *provider.TestEnvironment) check.SetResult(compliantObjects, nonCompliantObjects) } -// testPodsRecreation tests that pods belonging to deployments and statefulsets are re-created and ready in case a node is lost +// testPodsRecreation Verifies pods in deployments and statefulsets are recreated after node loss +// +// The function drains each node used by pod sets, ensuring that pods belonging +// to deployments or statefulsets are rescheduled and become ready again. It +// first confirms all pod sets are initially ready, then iterates over nodes, +// cordoning them, counting affected pods, performing a safe drain, and finally +// uncordoning the node. Any failure in readiness or draining results in +// non‑compliant reports; if all succeed, compliant objects are recorded. func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { //nolint:funlen,gocyclo var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -687,6 +802,14 @@ func testPodsRecreation(check *checksdb.Check, env *provider.TestEnvironment) { needsPostMortemInfo = false } +// testPodPersistentVolumeReclaimPolicy Verifies that all pod volumes use persistent volumes with a DELETE reclaim policy +// +// The function iterates over each pod in the test environment, examining every +// volume attached to the pod. For volumes backed by a PersistentVolumeClaim, it +// checks whether the corresponding PersistentVolume has a reclaim policy of +// Delete; non‑compliant cases are recorded with detailed fields. Finally, +// compliant and non‑compliant results are aggregated into the check’s +// report. func testPodPersistentVolumeReclaimPolicy(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -723,6 +846,14 @@ func testPodPersistentVolumeReclaimPolicy(check *checksdb.Check, env *provider.T check.SetResult(compliantObjects, nonCompliantObjects) } +// testCPUIsolation Verifies CPU isolation compliance for guaranteed pods +// +// The function iterates over all pods that request exclusive CPUs, checking +// each pod’s resource requests, limits, runtime class name, and annotations +// to ensure they meet the criteria for CPU isolation. For every pod it logs +// whether the pod is isolated or not, creating a report object accordingly. +// After processing all pods, it sets the test result with lists of compliant +// and non‑compliant objects. func testCPUIsolation(check *checksdb.Check, env *provider.TestEnvironment) { // Individual requirements we are looking for: // - CPU Requests and Limits must be in the form of whole units @@ -749,6 +880,12 @@ func testCPUIsolation(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } +// testAffinityRequiredPods Verifies pod affinity compliance for pods requiring node selector or affinity rules +// +// The routine iterates over all pods flagged as needing affinity, checks each +// pod's affinity configuration, logs the outcome, and records compliant and +// non‑compliant cases in report objects. It aggregates results and sets them +// on the test check to summarize compliance status. func testAffinityRequiredPods(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -767,6 +904,14 @@ func testAffinityRequiredPods(check *checksdb.Check, env *provider.TestEnvironme check.SetResult(compliantObjects, nonCompliantObjects) } +// testPodTolerationBypass Verifies that pod tolerations remain default +// +// The routine iterates over each pod in the test environment, checking every +// toleration against the Kubernetes default set and whether it has been altered +// for the pod's QoS class. If a non‑default or modified toleration is found, +// it records the pod as non‑compliant and logs an error; otherwise it marks +// the pod compliant. Finally, the check aggregates all compliant and +// non‑compliant reports into the test result. func testPodTolerationBypass(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -795,6 +940,16 @@ func testPodTolerationBypass(check *checksdb.Check, env *provider.TestEnvironmen check.SetResult(compliantObjects, nonCompliantObjects) } +// testStorageProvisioner Verifies pod storage provisioning compliance across cluster types +// +// The function iterates over all pods, inspecting each volume that references a +// persistent volume claim. For every matched claim it looks up the associated +// storage class to determine its provisioner type. Based on whether the +// environment is single‑node or multi‑node and whether local is used, it +// logs compliance or non‑compliance and records the outcome in report +// objects. Finally, it sets the check result with lists of compliant and +// non‑compliant pods. +// //nolint:funlen func testStorageProvisioner(check *checksdb.Check, env *provider.TestEnvironment) { const localStorageProvisioner = "kubernetes.io/no-provisioner" diff --git a/tests/lifecycle/tolerations/tolerations.go b/tests/lifecycle/tolerations/tolerations.go index 647b74cbd..419f46612 100644 --- a/tests/lifecycle/tolerations/tolerations.go +++ b/tests/lifecycle/tolerations/tolerations.go @@ -27,6 +27,17 @@ var ( tolerationSecondsDefault = 300 ) +// IsTolerationModified Determines if a pod toleration deviates from the Kubernetes defaults +// +// The function examines a single toleration in conjunction with the pod's QoS +// class to see whether it matches one of the three default tolerations that +// kubelet adds automatically. It first filters out any toleration whose key +// does not start with "node.kubernetes.io", then checks the effect, key, +// operator, and optional seconds value against the expected defaults for +// NoExecute and NoSchedule effects, considering the pod's QoS class for +// memory‑pressure cases. If a toleration fails these checks or matches a +// known non‑compliant set, the function returns true to indicate +// modification; otherwise it returns false. func IsTolerationModified(t corev1.Toleration, qosClass corev1.PodQOSClass) bool { const ( notReadyStr = "node.kubernetes.io/not-ready" @@ -87,6 +98,15 @@ func IsTolerationModified(t corev1.Toleration, qosClass corev1.PodQOSClass) bool return false } +// IsTolerationDefault Determines whether a toleration is one of the default Kubernetes tolerations +// +// This function examines the key field of a toleration and returns true if it +// includes the substring "node.kubernetes.io", indicating that the toleration +// originates from the default set added by Kubernetes. It performs this check +// using a simple string containment test, which covers all standard node taint +// keys such as not-ready, unreachable, and memory-pressure. The result is a +// boolean value signifying whether the toleration should be considered +// unmodified. func IsTolerationDefault(t corev1.Toleration) bool { return strings.Contains(t.Key, "node.kubernetes.io") } diff --git a/tests/lifecycle/volumes/volumes.go b/tests/lifecycle/volumes/volumes.go index 1026b7a22..7db72cdd6 100644 --- a/tests/lifecycle/volumes/volumes.go +++ b/tests/lifecycle/volumes/volumes.go @@ -20,6 +20,12 @@ import ( corev1 "k8s.io/api/core/v1" ) +// getPVCFromSlice retrieves a PersistentVolumeClaim by name from a list +// +// This function iterates over the provided slice of claims, comparing each +// claim's name to the target name. If a match is found, it returns a pointer to +// that claim; otherwise, it returns nil to indicate no matching claim was +// present. func getPVCFromSlice(pvcs []corev1.PersistentVolumeClaim, pvcName string) *corev1.PersistentVolumeClaim { for i := range pvcs { if pvcs[i].Name == pvcName { @@ -29,6 +35,12 @@ func getPVCFromSlice(pvcs []corev1.PersistentVolumeClaim, pvcName string) *corev return nil } +// IsPodVolumeReclaimPolicyDelete Verifies that a pod volume’s reclaim policy is DELETE +// +// The function receives a pod volume, the cluster's persistent volumes, and +// persistent volume claims. It first finds the claim referenced by the volume, +// then checks if the corresponding persistent volume has a delete reclaim +// policy. If both conditions are satisfied, it returns true; otherwise false. func IsPodVolumeReclaimPolicyDelete(vol *corev1.Volume, pvs []corev1.PersistentVolume, pvcs []corev1.PersistentVolumeClaim) bool { // Check if the Volume is bound to a PVC. if putPVC := getPVCFromSlice(pvcs, vol.PersistentVolumeClaim.ClaimName); putPVC != nil { diff --git a/tests/manageability/suite.go b/tests/manageability/suite.go index 457e4d9b5..8508b660c 100644 --- a/tests/manageability/suite.go +++ b/tests/manageability/suite.go @@ -44,7 +44,14 @@ var ( } ) -// LoadChecks loads all the checks. +// LoadChecks Initializes the manageability checks group and registers test functions +// +// The function creates a new checks group for manageability, logs the loading +// action, and adds two checks: one verifying container image tags and another +// validating container port naming conventions. Each check is configured with a +// skip condition that bypasses it if no containers are present and supplies the +// appropriate test logic via closures. The checks are then registered in the +// global checks database for execution during testing. func LoadChecks() { log.Debug("Loading %s suite checks", common.ManageabilityTestKey) @@ -66,8 +73,13 @@ func LoadChecks() { })) } -// testContainersImageTag is a function that checks if each container is missing image tag(s). -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// testContainersImageTag Verifies that each container has a non-empty image tag +// +// The function iterates over all containers in the test environment, checking +// whether their image tags are present. Containers lacking tags are logged as +// errors and added to a non‑compliant list; those with tags are logged as +// info and added to a compliant list. Finally, it records both lists as the +// result of the compliance check. func testContainersImageTag(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -88,16 +100,23 @@ func testContainersImageTag(check *checksdb.Check, env *provider.TestEnvironment // and the optional can be chosen by the application. Allowed protocol names: grpc, grpc-web, http, http2, tcp, udp. var allowedProtocolNames = map[string]bool{"grpc": true, "http": true, "http2": true, "tcp": true, "udp": true} -// containerPortNameFormatCheck is a function that checks if the format of a container port name is valid. -// Return: -// - bool: true if the format of a container port name is valid, otherwise return false. +// containerPortNameFormatCheck Verifies that a container port name starts with an allowed protocol +// +// The function splits the provided name on hyphens, extracts the first segment, +// and checks whether this segment is present in the global map of permitted +// protocols. It returns true if the protocol prefix is valid; otherwise it +// returns false. func containerPortNameFormatCheck(portName string) bool { res := strings.Split(portName, "-") return allowedProtocolNames[res[0]] } -// testContainerPortNameFormat is a function that checks if each container declares ports that do not follow the partner naming conventions. -// It sets the result of a compliance check based on the analysis of lists of compliant and non-compliant objects. +// testContainerPortNameFormat Verifies that container port names match partner naming conventions +// +// The function iterates over all containers in the test environment, checking +// each declared port name against a list of allowed protocol prefixes. It logs +// errors for non‑compliant ports and records both compliant and +// non‑compliant objects. Finally, it sets the check result with these lists. func testContainerPortNameFormat(check *checksdb.Check, env *provider.TestEnvironment) { for _, newProtocol := range env.ValidProtocolNames { allowedProtocolNames[newProtocol] = true diff --git a/tests/networking/icmp/icmp.go b/tests/networking/icmp/icmp.go index b418136cd..285fd0385 100644 --- a/tests/networking/icmp/icmp.go +++ b/tests/networking/icmp/icmp.go @@ -36,6 +36,12 @@ const ( SuccessfulOutputRegex = `(?m)(\d+) packets transmitted, (\d+)( packets){0,1} received, (?:\+(\d+) errors)?.*$` ) +// PingResults represents the outcome of a ping test +// +// The structure holds counts for transmitted, received, and error packets along +// with an outcome code that indicates success, failure, or error. It is used by +// parsing functions to convert raw command output into structured data, and it +// provides a string representation summarizing these metrics. type PingResults struct { outcome int transmitted int @@ -43,10 +49,23 @@ type PingResults struct { errors int } +// PingResults.String Provides a formatted string representation of ping results +// +// The method formats the outcome, transmitted count, received count, and error +// count into a readable string. It converts the numeric result code to a +// human‑readable word using a helper function before embedding all values in +// the output. func (results PingResults) String() string { return fmt.Sprintf("outcome: %s transmitted: %d received: %d errors: %d", testhelper.ResultToString(results.outcome), results.transmitted, results.received, results.errors) } +// BuildNetTestContext Creates a map of network test contexts for pods +// +// The function iterates over provided pods, filtering out those excluded from +// tests. For each pod it collects IP addresses based on the requested interface +// , selects one container to represent the namespace, and builds a context that +// designates a tester source and destination targets. The resulting map is +// keyed by network identifiers and returned for use in connectivity checks. func BuildNetTestContext(pods []*provider.Pod, aIPVersion netcommons.IPVersion, aType netcommons.IFType, logger *log.Logger) (netsUnderTest map[string]netcommons.NetTestContext) { netsUnderTest = make(map[string]netcommons.NetTestContext) for _, put := range pods { @@ -76,8 +95,13 @@ func BuildNetTestContext(pods []*provider.Pod, aIPVersion netcommons.IPVersion, return netsUnderTest } -// processContainerIpsPerNet takes a container ip addresses for a given network attachment's and uses it as a test target. -// The first container in the loop is selected as the test initiator. the Oc context of the container is used to initiate the pings +// processContainerIpsPerNet collects container IPs for a network to set up ping tests +// +// This routine filters the supplied IP addresses by the desired IP version, +// then records them in a shared map keyed by network name. The first IP found +// is designated as the test initiator and stored as the source of pings; +// subsequent IPs become destination targets. If no suitable IPs exist, the +// container is skipped and the function exits early. func processContainerIpsPerNet(containerID *provider.Container, netKey string, ipAddresses []string, @@ -126,8 +150,13 @@ func processContainerIpsPerNet(containerID *provider.Container, netsUnderTest[netKey] = entry } -// runNetworkingTests takes a map netcommons.NetTestContext, e.g. one context per network attachment -// and runs pings test with it. Returns a network name to a slice of bad target IPs map. +// RunNetworkingTests Executes ICMP ping tests across multiple network attachments +// +// The function receives a map of networking contexts, a ping count, IP version, +// and logger. It iterates over each network, performing pings from a source +// container to all destination containers, recording successes and failures in +// report objects. If no networks or destinations are available, the test is +// skipped. func RunNetworkingTests( //nolint:funlen netsUnderTest map[string]netcommons.NetTestContext, count int, @@ -243,6 +272,13 @@ var TestPing = func(sourceContainerID *provider.Container, targetContainerIP net return results, err } +// parsePingResult Parses ping command output to determine success, failure, or error +// +// The function examines the standard output for patterns indicating invalid +// arguments or successful execution. It extracts transmitted, received, and +// error counts from the output using regular expressions and converts them to +// integers. Based on these metrics, it sets an outcome flag and returns the +// results along with any parsing errors. func parsePingResult(stdout, stderr string) (results PingResults, err error) { re := regexp.MustCompile(ConnectInvalidArgumentRegex) matched := re.FindStringSubmatch(stdout) diff --git a/tests/networking/netcommons/netcommons.go b/tests/networking/netcommons/netcommons.go index 57776c190..e30ee9273 100644 --- a/tests/networking/netcommons/netcommons.go +++ b/tests/networking/netcommons/netcommons.go @@ -48,6 +48,12 @@ const ( DEFAULT IFType = "Default" ) +// IPVersion.String Returns the textual form of an IP version +// +// The method examines the value of the receiver and maps each predefined +// constant to its corresponding string. It covers IPv4, IPv6, combined +// IPv4/IPv6, and an undefined case. If none match, it defaults to the undefined +// string. func (version IPVersion) String() string { switch version { case IPv4: @@ -62,9 +68,13 @@ func (version IPVersion) String() string { return UndefinedString } -// netTestContext this is a data structure describing a network test context for a given subnet (e.g. network attachment) -// The test context defines a tester or test initiator, that is initiating the pings. It is selected randomly (first container in the list) -// It also defines a list of destination ping targets corresponding to the other containers IPs on this subnet +// NetTestContext Describes a network test setup for a subnet +// +// This structure holds information about which container initiates ping tests +// on a given network, the node it runs on, and the list of target containers to +// be pinged. The tester source is chosen randomly from available containers. It +// provides a string representation that lists the initiating container followed +// by all destination targets. type NetTestContext struct { // testerContainerNodeOc session context to access the node running the container selected to initiate tests TesterContainerNodeName string @@ -74,7 +84,13 @@ type NetTestContext struct { DestTargets []ContainerIP } -// containerIP holds a container identification and its IP for networking tests. +// ContainerIP Formats a container's IP address with its identifier +// +// This method returns a human‑readable representation that combines the +// container’s IP address and a long form of its identifier. It concatenates +// the two strings with parentheses to clearly separate the network address from +// the container details. The output is useful for logging or debugging +// networking tests. type ContainerIP struct { // ip address of the target container IP string @@ -84,7 +100,13 @@ type ContainerIP struct { InterfaceName string } -// String displays the NetTestContext data structure +// NetTestContext.String Formats the network test context for display +// +// This method builds a multi-line string describing the container that +// initiates the tests and each target container it will communicate with. It +// first writes the source container, then lists all destination containers or +// indicates when none are present. The resulting string is returned for logging +// or debugging purposes. func (testContext *NetTestContext) String() string { var sb strings.Builder sb.WriteString(fmt.Sprintf("From initiating container: %s\n", testContext.TesterSource.String())) @@ -97,7 +119,12 @@ func (testContext *NetTestContext) String() string { return sb.String() } -// String Displays the ContainerIP data structure +// ContainerIP.String Formats the container IP address with its identifier +// +// This method constructs a string that shows the IP address followed by the +// long form of the container’s identifier in parentheses. It uses formatting +// utilities to combine the two pieces into a single readable representation, +// which is returned as a string. func (cip *ContainerIP) String() string { return fmt.Sprintf("%s ( %s )", cip.IP, @@ -105,7 +132,14 @@ func (cip *ContainerIP) String() string { ) } -// PrintNetTestContextMap displays the NetTestContext full map +// PrintNetTestContextMap Formats a map of network test contexts into a readable string +// +// This function iterates over a mapping from network names to NetTestContext +// objects, building a multi-line string that begins with a header for each +// network and then includes the detailed output of the context’s String +// method. If no networks are provided it returns a short message indicating +// there is nothing to test. The resulting string is used by other parts of the +// test suite to log or display current test conditions. func PrintNetTestContextMap(netsUnderTest map[string]NetTestContext) string { var sb strings.Builder if len(netsUnderTest) == 0 { @@ -118,7 +152,13 @@ func PrintNetTestContextMap(netsUnderTest map[string]NetTestContext) string { return sb.String() } -// PodIPsToStringList converts a list of corev1.PodIP objects into a list of strings +// PodIPsToStringList Transforms a slice of PodIP structures into plain IP address strings +// +// The function iterates over each corev1.PodIP element, extracts the IP string +// field, and appends it to a new slice. It returns this list of string +// addresses for use elsewhere in the package. The operation is linear in the +// number of input items and requires no additional dependencies beyond standard +// Go append. func PodIPsToStringList(ips []corev1.PodIP) (ipList []string) { for _, ip := range ips { ipList = append(ipList, ip.IP) @@ -126,7 +166,13 @@ func PodIPsToStringList(ips []corev1.PodIP) (ipList []string) { return ipList } -// GetIPVersion parses a ip address from a string and returns its version +// GetIPVersion determines whether a string represents an IPv4 or IPv6 address +// +// The function attempts to parse the input as an IP address using the standard +// library. If parsing fails, it reports that the string is not a valid IP. It +// then distinguishes between IPv4 and IPv6 by checking if the parsed address +// can be converted to a four‑byte form; the result is returned along with any +// error. func GetIPVersion(aIP string) (IPVersion, error) { ip := net.ParseIP(aIP) if ip == nil { @@ -138,9 +184,12 @@ func GetIPVersion(aIP string) (IPVersion, error) { return IPv6, nil } -// FilterIPListByIPVersion filters a list of ip strings by the provided version -// e.g. a list of mixed ipv4 and ipv6 when filtered with ipv6 version will return a list with -// the ipv6 addresses +// FilterIPListByIPVersion Selects addresses matching a specified IP version +// +// The function receives a slice of string IPs and an IP version to filter by. +// It iterates over the list, determines each address’s version, and keeps +// only those that match the requested type. The resulting slice contains only +// IPv4 or IPv6 addresses as requested. func FilterIPListByIPVersion(ipList []string, aIPVersion IPVersion) []string { var filteredIPList []string for _, aIP := range ipList { @@ -151,6 +200,12 @@ func FilterIPListByIPVersion(ipList []string, aIPVersion IPVersion) []string { return filteredIPList } +// findRogueContainersDeclaringPorts identifies containers that declare prohibited ports +// +// The function scans a list of containers, checking each declared port against +// a set of reserved ports. For every match it records a non‑compliant report; +// otherwise it logs compliance and creates a compliant report object. It +// returns slices of these report objects for further processing. func findRogueContainersDeclaringPorts(containers []*provider.Container, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { for _, cut := range containers { logger.Info("Testing Container %q", cut) @@ -191,6 +246,14 @@ var ReservedIstioPorts = map[int32]bool{ 15000: true, // Envoy admin port (commands/diagnostics) } +// findRoguePodsListeningToPorts Detects pods that are listening on or declaring reserved ports +// +// The function iterates over each pod, checking its containers for declared +// ports and actual listening sockets against a set of prohibited port numbers. +// It logs detailed information and generates report objects indicating +// compliance status for both container declarations and pod-level listening +// behavior. Non‑compliant pods are reported with the specific port and +// protocol that violates the reservation rules. func findRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { for _, put := range pods { logger.Info("Testing Pod %q", put) @@ -247,6 +310,13 @@ func findRoguePodsListeningToPorts(pods []*provider.Pod, portsToTest map[int32]b return compliantObjects, nonCompliantObjects } +// TestReservedPortsUsage checks pods for listening on or declaring reserved ports +// +// The function receives a test environment, a map of port numbers that are +// considered reserved, an origin label for those ports, and a logger. It scans +// all pods in the environment to find any containers listening on or declaring +// these ports, excluding known Istio proxy exceptions. The result is two slices +// of report objects indicating compliant and non‑compliant findings. func TestReservedPortsUsage(env *provider.TestEnvironment, reservedPorts map[int32]bool, portsOrigin string, logger *log.Logger) (compliantObjects, nonCompliantObjects []*testhelper.ReportObject) { compliantObjectsEntries, nonCompliantObjectsEntries := findRoguePodsListeningToPorts(env.Pods, reservedPorts, portsOrigin, logger) compliantObjects = append(compliantObjects, compliantObjectsEntries...) diff --git a/tests/networking/netutil/netutil.go b/tests/networking/netutil/netutil.go index 6e0ee124b..05ee42793 100644 --- a/tests/networking/netutil/netutil.go +++ b/tests/networking/netutil/netutil.go @@ -33,11 +33,25 @@ const ( indexPort = 4 ) +// PortInfo Describes a network port with number and protocol +// +// This structure holds the numeric value of a listening port and the transport +// protocol used, such as TCP or UDP. It is used to identify unique ports in +// mappings returned by functions that parse command output for listening +// sockets. type PortInfo struct { PortNumber int32 Protocol string } +// parseListeningPorts parses command output into a map of listening ports +// +// The function takes the raw string from a network command and splits it line +// by line, extracting protocol and port number when the state indicates LISTEN. +// It converts the numeric part to an integer, normalizes the protocol name to +// upper case, and stores each unique pair in a map keyed by PortInfo with a +// boolean value of true. Errors during conversion cause an immediate return +// with an error message. func parseListeningPorts(cmdOut string) (map[PortInfo]bool, error) { portSet := make(map[PortInfo]bool) @@ -69,6 +83,12 @@ func parseListeningPorts(cmdOut string) (map[PortInfo]bool, error) { return portSet, nil } +// GetListeningPorts Retrieves the set of ports currently listening inside a container +// +// The function runs an nsenter command inside the target container to list open +// sockets, then parses the output into a map keyed by port information. It +// returns this map along with any error that occurs during execution or +// parsing. func GetListeningPorts(cut *provider.Container) (map[PortInfo]bool, error) { outStr, errStr, err := crclient.ExecCommandContainerNSEnter(getListeningPortsCmd, cut) if err != nil || errStr != "" { @@ -78,6 +98,13 @@ func GetListeningPorts(cut *provider.Container) (map[PortInfo]bool, error) { return parseListeningPorts(outStr) } +// GetSSHDaemonPort Retrieves the SSH daemon listening port within a container +// +// This function runs a shell command inside the specified container to locate +// the sshd process and extract its bound TCP port. It executes the command via +// nsenter, handles any execution errors or non‑empty stderr output, and +// returns the trimmed port number as a string. If the command fails or returns +// no output, an error is returned. func GetSSHDaemonPort(cut *provider.Container) (string, error) { const findSSHDaemonPort = "ss -tpln | grep sshd | head -1 | awk '{ print $4 }' | awk -F : '{ print $2 }'" outStr, errStr, err := crclient.ExecCommandContainerNSEnter(findSSHDaemonPort, cut) diff --git a/tests/networking/policies/policies.go b/tests/networking/policies/policies.go index 68e134111..3ccbefc86 100644 --- a/tests/networking/policies/policies.go +++ b/tests/networking/policies/policies.go @@ -21,6 +21,13 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// IsNetworkPolicyCompliant Checks if a network policy enforces an empty rule for the specified direction +// +// The function receives a network policy and a policy . It verifies that the +// policy includes the given type, that its rules are nil or empty, and that the +// overall PolicyTypes slice is not empty. If these conditions hold it returns +// true; otherwise false with an explanatory reason. +// //nolint:gocritic // unnamed results func IsNetworkPolicyCompliant(np *networkingv1.NetworkPolicy, policyType networkingv1.PolicyType) (bool, string) { // As long as we have decided above that there is no pod selector, @@ -57,6 +64,12 @@ func IsNetworkPolicyCompliant(np *networkingv1.NetworkPolicy, policyType network return policyTypeFound, "" } +// LabelsMatch Determines whether a pod satisfies the selector's label constraints +// +// The function examines the labels specified in a pod selector against the +// actual labels of a pod. If the selector has no labels, it immediately returns +// true, meaning all pods match. Otherwise, it checks for at least one matching +// key/value pair and returns true only if such a pair is found. func LabelsMatch(podSelectorLabels v1.LabelSelector, podLabels map[string]string) bool { labelMatch := false diff --git a/tests/networking/services/services.go b/tests/networking/services/services.go index c261a5430..ffcbd07aa 100644 --- a/tests/networking/services/services.go +++ b/tests/networking/services/services.go @@ -24,6 +24,14 @@ import ( corev1 "k8s.io/api/core/v1" ) +// GetServiceIPVersion Determines the IP stack type of a Kubernetes Service +// +// The function examines a service's ClusterIP, IPFamilyPolicy, and any +// additional ClusterIPs to decide whether it is single‑stack IPv4, +// single‑stack IPv6, or dual‑stack. It returns an IPVersion value along +// with an error if the configuration cannot be resolved or violates +// expectations. Logging statements provide debug context for each decision +// path. func GetServiceIPVersion(aService *corev1.Service) (result netcommons.IPVersion, err error) { ipver, err := netcommons.GetIPVersion(aService.Spec.ClusterIP) if err != nil { @@ -65,6 +73,13 @@ func GetServiceIPVersion(aService *corev1.Service) (result netcommons.IPVersion, return result, err } +// ToString Formats a service's namespace, name, cluster IPs, and IP family into a readable string +// +// This function takes a pointer to a Kubernetes Service object and constructs a +// single-line description that includes the service's namespace, name, primary +// ClusterIP, and all associated ClusterIPs. It uses string formatting to +// concatenate these fields in a human‑readable format, which is useful for +// logging and error messages. The result is returned as a plain string. func ToString(aService *corev1.Service) (out string) { return fmt.Sprintf("Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v", aService.Namespace, aService.Name, @@ -72,6 +87,12 @@ func ToString(aService *corev1.Service) (out string) { aService.Spec.ClusterIPs) } +// ToStringSlice Lists services with namespace, name, ClusterIP and IP addresses +// +// The function iterates over a slice of service objects, appending formatted +// information for each one to a single string. For every service it records the +// namespace, name, primary ClusterIP, and any additional cluster IPs. The +// resulting multi-line string is returned. func ToStringSlice(manyServices []*corev1.Service) (out string) { for _, aService := range manyServices { out += fmt.Sprintf("Service ns: %s, name: %s ClusterIP:%s ClusterIPs: %v\n", aService.Namespace, @@ -82,6 +103,12 @@ func ToStringSlice(manyServices []*corev1.Service) (out string) { return out } +// isClusterIPsDualStack verifies that a service's ClusterIPs include both IPv4 and IPv6 addresses +// +// The function iterates over each IP string, determines its version using an +// external helper, and records whether any IPv4 or IPv6 address appears. If +// both types are present it returns true; otherwise false. Errors from the +// helper cause an early return with a descriptive message. func isClusterIPsDualStack(ips []string) (result bool, err error) { var hasIPv4, hasIPv6 bool for _, ip := range ips { diff --git a/tests/networking/suite.go b/tests/networking/suite.go index 6f61cd94d..beef50089 100644 --- a/tests/networking/suite.go +++ b/tests/networking/suite.go @@ -54,6 +54,17 @@ var ( } ) +// LoadChecks Registers networking test checks in the internal database +// +// This function logs that networking tests are being loaded, creates a check +// group for networking, and adds multiple specific checks such as ICMP +// connectivity for IPv4/IPv6, port usage validation, reserved port checks, +// service dual‑stack verification, network policy compliance, +// partner‑specific ports, DPDK CPU pinning probe restrictions, SRIOV restart +// labels, and MTU configuration. Each check is configured with appropriate skip +// conditions based on the current test environment and a function that performs +// the actual test logic. +// //nolint:funlen func LoadChecks() { log.Debug("Loading %s suite checks", common.NetworkingTestKey) @@ -168,6 +179,13 @@ func LoadChecks() { })) } +// testExecProbDenyAtCPUPinning verifies that pods pinned to CPUs do not use exec probes +// +// The routine iterates over each CPU‑pinned pod, inspecting all containers +// for defined exec probes. If any container contains an exec probe, it records +// the pod as non‑compliant and logs an error; otherwise it marks the pod +// compliant and logs informational output. Finally, it sets the check result +// with lists of compliant and non‑compliant report objects. func testExecProbDenyAtCPUPinning(check *checksdb.Check, dpdkPods []*provider.Pod) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -191,6 +209,15 @@ func testExecProbDenyAtCPUPinning(check *checksdb.Check, dpdkPods []*provider.Po check.SetResult(compliantObjects, nonCompliantObjects) } +// testUndeclaredContainerPortsUsage Verifies that every port a pod’s containers actually listen on is declared in the container specification +// +// The function iterates over all pods, collecting the ports defined in each +// container’s spec and then retrieving the actual listening ports via a +// system call. It compares these two sets, ignoring Istio proxy reserved ports, +// and records any mismatches as non‑compliant objects. Finally, it reports +// compliant or non‑compliant results for each pod based on whether all +// listening ports were properly declared. +// //nolint:funlen func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject @@ -264,7 +291,12 @@ func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.Test check.SetResult(compliantObjects, nonCompliantObjects) } -// testDefaultNetworkConnectivity test the connectivity between the default interfaces of containers under test +// testNetworkConnectivity establishes ICMP connectivity between pods +// +// The function builds a test context for the specified IP version and interface +// type, then runs ping tests across all eligible pod pairs. It records both +// successful and failed pings into compliant or non‑compliant report objects. +// If no network has enough pods to test, it logs that the test is skipped. func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommons.IPVersion, aType netcommons.IFType, check *checksdb.Check) { netsUnderTest := icmp.BuildNetTestContext(env.Pods, aIPVersion, aType, check.GetLogger()) report, skip := icmp.RunNetworkingTests(netsUnderTest, defaultNumPings, aIPVersion, check.GetLogger()) @@ -274,6 +306,12 @@ func testNetworkConnectivity(env *provider.TestEnvironment, aIPVersion netcommon check.SetResult(report.CompliantObjectsOut, report.NonCompliantObjectsOut) } +// testOCPReservedPortsUsage Verifies pods do not listen on OpenShift reserved ports +// +// The function builds a map of ports that OpenShift reserves, then calls a +// shared routine to scan all running pods for listeners on those ports. It +// collects compliant and non‑compliant findings, passing them to the test +// framework through the check object’s result setter. func testOCPReservedPortsUsage(check *checksdb.Check, env *provider.TestEnvironment) { // List of all ports reserved by OpenShift OCPReservedPorts := map[int32]bool{ @@ -283,6 +321,12 @@ func testOCPReservedPortsUsage(check *checksdb.Check, env *provider.TestEnvironm check.SetResult(compliantObjects, nonCompliantObjects) } +// testPartnerSpecificTCPPorts Verifies that pods do not listen on partner‑reserved TCP ports +// +// This routine defines a set of TCP ports reserved by the partner and checks +// all pods in the environment to ensure none are listening on those ports. It +// calls a common test helper to identify compliant and non‑compliant objects, +// then records the results in the provided check object. func testPartnerSpecificTCPPorts(check *checksdb.Check, env *provider.TestEnvironment) { // List of all of the ports reserved by partner ReservedPorts := map[int32]bool{ @@ -300,6 +344,14 @@ func testPartnerSpecificTCPPorts(check *checksdb.Check, env *provider.TestEnviro check.SetResult(compliantObjects, nonCompliantObjects) } +// testDualStackServices Verifies that each Service supports IPv6 or dual stack +// +// The function iterates over all services in the test environment, determines +// their IP version using a helper, and logs whether they are compliant. +// Services that only support IPv4 or cannot be evaluated produce +// non‑compliant report objects; otherwise compliant ones are recorded. +// Finally, it sets the check result with lists of compliant and non‑compliant +// reports. func testDualStackServices(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -330,6 +382,13 @@ func testDualStackServices(check *checksdb.Check, env *provider.TestEnvironment) check.SetResult(compliantObjects, nonCompliantObjects) } +// testNetworkPolicyDenyAll Verifies that each pod has default deny-all ingress and egress policies +// +// The routine iterates over all pods in the test environment, checking for +// matching network policies within the same namespace. It confirms that a +// policy with empty rules exists for both ingress and egress, indicating a +// deny‑all configuration. Pods lacking either rule are logged as +// non‑compliant, while compliant pods are recorded accordingly. func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -394,6 +453,14 @@ func testNetworkPolicyDenyAll(check *checksdb.Check, env *provider.TestEnvironme check.SetResult(compliantObjects, nonCompliantObjects) } +// testRestartOnRebootLabelOnPodsUsingSriov Verifies SRIOV pods have a restart-on-reboot label set to true +// +// The function iterates over each pod that uses SRIOV, checking for the +// presence of the restart‑on‑reboot label. If the label is missing or its +// value is not "true", it records a non‑compliant report object and logs an +// error; otherwise it records a compliant object and logs success. After +// processing all pods, it sets the check result with the lists of compliant and +// non‑compliant objects. func testRestartOnRebootLabelOnPodsUsingSriov(check *checksdb.Check, sriovPods []*provider.Pod) { const ( restartOnRebootLabel = "restart-on-reboot" @@ -424,6 +491,13 @@ func testRestartOnRebootLabelOnPodsUsingSriov(check *checksdb.Check, sriovPods [ check.SetResult(compliantObjects, nonCompliantObjects) } +// testNetworkAttachmentDefinitionSRIOVUsingMTU evaluates SRIOV pods for explicit MTU configuration +// +// The function iterates over a list of SRIOV-enabled pods, checking whether +// each pod’s network attachment definition includes an explicitly set MTU +// value. It logs informational messages for compliant pods and error messages +// for non‑compliant or failed checks, creating report objects accordingly. +// Finally, it aggregates the results into the check result sets for reporting. func testNetworkAttachmentDefinitionSRIOVUsingMTU(check *checksdb.Check, sriovPods []*provider.Pod) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject diff --git a/tests/observability/pdb/pdb.go b/tests/observability/pdb/pdb.go index 9512de7a6..198940cd9 100644 --- a/tests/observability/pdb/pdb.go +++ b/tests/observability/pdb/pdb.go @@ -12,7 +12,13 @@ const ( percentageDivisor = 100 ) -// percentageToFloat converts a percentage string to a float +// percentageToFloat Parses a percentage string into a decimal value +// +// The function reads a string that represents a and extracts the numeric part +// using formatted scanning. It then converts this number to a float64 and +// divides by a divisor to express it as a proportion, such as 0.25 for +// twenty‑five percent. If the input is not in the expected format, an error +// is returned. func percentageToFloat(percentage string) (float64, error) { var percentageFloat float64 _, err := fmt.Sscanf(percentage, "%f%%", &percentageFloat) @@ -22,6 +28,14 @@ func percentageToFloat(percentage string) (float64, error) { return percentageFloat / percentageDivisor, nil } +// CheckPDBIsValid Validates a PodDisruptionBudget against replica count +// +// The function checks the .spec.minAvailable and .spec.maxUnavailable fields of +// a PodDisruptionBudget, converting them to integer values based on the +// provided replica count or a default of one. It ensures minAvailable is +// non‑zero and does not exceed replicas, and that maxUnavailable is less than +// the number of pods. If any rule fails, it returns false with an explanatory +// error; otherwise it returns true. func CheckPDBIsValid(pdb *policyv1.PodDisruptionBudget, replicas *int32) (bool, error) { var replicaCount int32 if replicas != nil { @@ -66,6 +80,13 @@ func CheckPDBIsValid(pdb *policyv1.PodDisruptionBudget, replicas *int32) (bool, return true, nil } +// intOrStringToValue Converts an IntOrString to a concrete integer based on replica count +// +// The function examines the type of the input value; if it is an integer, that +// value is returned directly. If it is a string representing a percentage, the +// percentage is parsed and multiplied by the number of replicas, rounding to +// the nearest even integer. Errors are produced for unsupported types or +// invalid percentage strings. func intOrStringToValue(intOrStr *intstr.IntOrString, replicas int32) (int, error) { switch intOrStr.Type { case intstr.Int: diff --git a/tests/observability/suite.go b/tests/observability/suite.go index a93df083e..2677c5dd6 100644 --- a/tests/observability/suite.go +++ b/tests/observability/suite.go @@ -48,6 +48,14 @@ var ( } ) +// LoadChecks Initializes the observability test suite +// +// The function creates a new checks group for observability and registers +// several checks related to logging, CRD status subresources, termination +// message policy, pod disruption budgets, and API compatibility with future +// OpenShift releases. Each check is configured with optional skip functions +// that determine whether the environment contains relevant objects before +// execution. Debug output records the loading of this suite. func LoadChecks() { log.Debug("Loading %s suite checks", common.ObservabilityTestKey) @@ -90,8 +98,13 @@ func LoadChecks() { })) } -// containerHasLoggingOutput helper function to get the last line of logging output from -// a container. Returns true in case some output was found, false otherwise. +// containerHasLoggingOutput Checks whether a container has produced any log output +// +// The function retrieves the last two lines of a pod’s logs via the +// Kubernetes API, reads them into memory, and returns true if any content was +// found. It handles errors from establishing the stream or copying data, +// returning false with an error in those cases. The result indicates whether +// the container produced at least one line to stdout or stderr. func containerHasLoggingOutput(cut *provider.Container) (bool, error) { ocpClient := clientsholder.GetClientsHolder() @@ -118,6 +131,13 @@ func containerHasLoggingOutput(cut *provider.Container) (bool, error) { return buf.String() != "", nil } +// testContainersLogging Verifies that containers emit log output to stdout or stderr +// +// The function iterates over all containers under test, attempts to fetch their +// most recent log lines, and records whether any logs were present. Containers +// lacking logs or encountering errors are marked non‑compliant, while those +// producing at least one line are marked compliant. The results are aggregated +// into report objects for later analysis. func testContainersLogging(check *checksdb.Check, env *provider.TestEnvironment) { // Iterate through all the CUTs to get their log output. The TC checks that at least // one log line is found. @@ -147,7 +167,14 @@ func testContainersLogging(check *checksdb.Check, env *provider.TestEnvironment) check.SetResult(compliantObjects, nonCompliantObjects) } -// testCrds testing if crds have a status sub resource set +// testCrds Verifies CRD status subresource presence +// +// The function iterates over all custom resource definitions in the test +// environment, checking each version for a "status" property in its schema. For +// every missing status field it logs an error and records a non‑compliant +// report object; otherwise it logs success and records a compliant report. +// Finally, it sets the check result with lists of compliant and non‑compliant +// objects. func testCrds(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -173,7 +200,13 @@ func testCrds(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } -// testTerminationMessagePolicy tests to make sure that pods +// testTerminationMessagePolicy Verifies container termination message policies +// +// The function iterates over each container in the test environment, checking +// whether its TerminationMessagePolicy is set to FallbackToLogsOnError. +// Containers that meet this requirement are recorded as compliant; others are +// marked non-compliant with an explanatory report object. After processing all +// containers, the check results are stored for reporting. func testTerminationMessagePolicy(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -194,6 +227,14 @@ func testTerminationMessagePolicy(check *checksdb.Check, env *provider.TestEnvir check.SetResult(compliantObjects, nonCompliantObjects) } +// testPodDisruptionBudgets Verifies that deployments and stateful sets have valid pod disruption budgets +// +// The function iterates through all deployments and stateful sets in the test +// environment, checking for a matching PodDisruptionBudget by label selector. +// It validates each found PDB against the replica count of its controller using +// an external checker. Results are recorded as compliant or non‑compliant +// report objects, which are then set on the check result. +// //nolint:funlen func testPodDisruptionBudgets(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject @@ -281,11 +322,14 @@ func testPodDisruptionBudgets(check *checksdb.Check, env *provider.TestEnvironme check.SetResult(compliantObjects, nonCompliantObjects) } -// Function to build a map from workload service accounts -// to their associated to-be-deprecated APIs and the release version -// Filters: -// - status.removedInRelease is not empty -// - Verifies if the service account is inside the workload SA list from env.ServiceAccounts +// buildServiceAccountToDeprecatedAPIMap Creates a mapping of service accounts to APIs slated for removal +// +// The function receives a slice of API request count objects and a set of +// workload service account names. It iterates through the usage data, +// extracting each service account that appears in the workload list and +// recording any API whose removal release is specified. The result is a nested +// map where each key is a service account name and its value maps deprecated +// APIs to their corresponding Kubernetes release version. func buildServiceAccountToDeprecatedAPIMap(apiRequestCounts []apiserv1.APIRequestCount, workloadServiceAccountNames map[string]struct{}) map[string]map[string]string { // Define a map where the key is the service account name and the value is another map // The inner map key is the API name and the value is the release version in which it will be removed @@ -322,7 +366,15 @@ func buildServiceAccountToDeprecatedAPIMap(apiRequestCounts []apiserv1.APIReques return serviceAccountToDeprecatedAPIs } -// Evaluate workload API compliance with the next Kubernetes version +// evaluateAPICompliance Assesses whether service accounts use APIs that will be removed in the next Kubernetes release +// +// The function parses the current Kubernetes version, increments it to +// determine the upcoming release, and then checks each deprecated API used by a +// service account against the removal schedule. It creates report objects +// indicating compliance or non‑compliance for each API, adding relevant +// fields such as the API name, service account, and removal or active release. +// If no APIs are detected, it generates pass reports for all workload service +// accounts. func evaluateAPICompliance( serviceAccountToDeprecatedAPIs map[string]map[string]string, kubernetesVersion string, @@ -380,7 +432,12 @@ func evaluateAPICompliance( return compliantObjects, nonCompliantObjects } -// Function to extract unique workload-related service account names from the environment +// extractUniqueServiceAccountNames collects distinct service account names from the test environment +// +// It receives a test environment, iterates over its ServiceAccounts slice, and +// inserts each name into a map to ensure uniqueness. The resulting map has keys +// of type string and empty struct values, providing an efficient set +// representation for later use in compatibility checks. func extractUniqueServiceAccountNames(env *provider.TestEnvironment) map[string]struct{} { uniqueServiceAccountNames := make(map[string]struct{}) @@ -392,7 +449,13 @@ func extractUniqueServiceAccountNames(env *provider.TestEnvironment) map[string] return uniqueServiceAccountNames } -// Function to test API compatibility with the next OCP release +// testAPICompatibilityWithNextOCPRelease Checks whether workload APIs remain available in the upcoming OpenShift release +// +// The function first verifies that the cluster is an OpenShift distribution, +// then gathers API request usage data via the ApiserverV1 client. It maps each +// service account to any deprecated APIs it has used and compares these +// deprecation releases against the next minor Kubernetes version. Results are +// recorded as compliant or non‑compliant objects for reporting. func testAPICompatibilityWithNextOCPRelease(check *checksdb.Check, env *provider.TestEnvironment) { isOCP := provider.IsOCPCluster() check.LogInfo("Is OCP: %v", isOCP) diff --git a/tests/operator/access/access.go b/tests/operator/access/access.go index 4bf9282b7..4e85689db 100644 --- a/tests/operator/access/access.go +++ b/tests/operator/access/access.go @@ -4,6 +4,14 @@ import ( "github.com/operator-framework/api/pkg/operators/v1alpha1" ) +// PermissionsHaveBadRule detects if any RBAC rule grants access to security context constraints +// +// The function iterates over a slice of cluster permissions, examining each +// rule for the presence of the security API group or a wildcard. When such a +// group is found, it then checks whether the rule targets the +// securitycontextconstraints resource or all resources. If any matching rule +// exists, the function returns true to indicate a problematic configuration; +// otherwise it returns false. func PermissionsHaveBadRule(clusterPermissions []v1alpha1.StrategyDeploymentPermissions) bool { badRuleFound := false for permissionIndex := range clusterPermissions { diff --git a/tests/operator/catalogsource/catalogsource.go b/tests/operator/catalogsource/catalogsource.go index 94526473d..ab095426e 100644 --- a/tests/operator/catalogsource/catalogsource.go +++ b/tests/operator/catalogsource/catalogsource.go @@ -5,6 +5,13 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" ) +// SkipPMBasedOnChannel Decides whether a package manifest should be ignored based on channel entries +// +// The function examines each channel in the provided list, checking if any +// entry name matches the given CSV name. If a match is found, it indicates that +// the package manifest belongs to the same operator and should not be skipped. +// It returns true when no matching entry exists, meaning the manifest can be +// ignored; otherwise false. func SkipPMBasedOnChannel(channels []olmpkgv1.PackageChannel, csvName string) bool { // This logic is in place because it is possible for an operator to pull from a multiple package manifests. skipPMBasedOnChannel := true diff --git a/tests/operator/helper.go b/tests/operator/helper.go index 2928e89b2..3adc741bb 100644 --- a/tests/operator/helper.go +++ b/tests/operator/helper.go @@ -31,13 +31,24 @@ import ( "github.com/operator-framework/api/pkg/operators/v1alpha1" ) -// CsvResult holds the results of the splitCsv function. +// CsvResult contains the parsed CSV components +// +// This structure holds the two parts produced by splitting a comma-separated +// string: one part is stored as NameCsv and the other, if prefixed with "ns=", +// is stored as Namespace. It is used to return values from the SplitCsv +// function. type CsvResult struct { NameCsv string Namespace string } -// splitCsv splits the input string to extract namecsv and namespace. +// SplitCsv Separates a CSV string into its name and namespace components +// +// This function takes a comma‑delimited string, splits it into parts, trims +// whitespace, and assigns the portion prefixed with "ns=" to the Namespace +// field while the remaining part becomes NameCsv. It returns a CsvResult struct +// containing these two fields. If no namespace prefix is present, Namespace +// remains empty. func SplitCsv(csv string) CsvResult { // Split by comma to separate components parts := strings.Split(csv, ",") @@ -55,6 +66,13 @@ func SplitCsv(csv string) CsvResult { return result } +// OperatorInstalledMoreThanOnce Detects if the same operator appears more than once +// +// The function compares two operator instances by examining their CSV names and +// versions. It first removes the version suffix from each CSV name, then checks +// that the base names match while the versions differ. If both conditions hold, +// it reports that the operator is installed multiple times; otherwise it +// returns false. func OperatorInstalledMoreThanOnce(operator1, operator2 *provider.Operator) bool { // Safeguard against nil operators (should not happen) if operator1 == nil || operator2 == nil { @@ -88,6 +106,13 @@ func OperatorInstalledMoreThanOnce(operator1, operator2 *provider.Operator) bool return false } +// getAllPodsBy Filters pods by namespace +// +// The function iterates over a slice of pod objects, selecting only those whose +// Namespace field matches the provided namespace string. Matching pods are +// appended to a new slice that is returned to the caller. This helper +// simplifies gathering all pods within a specific namespace for further +// processing. func getAllPodsBy(namespace string, allPods []*provider.Pod) (podsInNamespace []*provider.Pod) { for i := range allPods { pod := allPods[i] @@ -98,6 +123,12 @@ func getAllPodsBy(namespace string, allPods []*provider.Pod) (podsInNamespace [] return podsInNamespace } +// getCsvsBy Filters CSVs to a specific namespace +// +// This function iterates over all provided ClusterServiceVersion objects, +// selecting only those whose Namespace field matches the supplied string. The +// matching CSVs are collected into a slice that is returned to the caller. If +// no CSVs match, an empty slice is returned. func getCsvsBy(namespace string, allCsvs []*v1alpha1.ClusterServiceVersion) (csvsInNamespace []*v1alpha1.ClusterServiceVersion) { for _, csv := range allCsvs { if csv.Namespace == namespace { @@ -107,14 +138,34 @@ func getCsvsBy(namespace string, allCsvs []*v1alpha1.ClusterServiceVersion) (csv return csvsInNamespace } +// isSingleNamespacedOperator Determines if an operator is single‑namespace scoped but targets a different namespace +// +// The function checks that the targetNamespaces slice contains exactly one +// entry and that this entry differs from the operatorNamespace. If both +// conditions hold, it returns true indicating the operator runs in its own +// namespace yet serves another namespace; otherwise it returns false. func isSingleNamespacedOperator(operatorNamespace string, targetNamespaces []string) bool { return len(targetNamespaces) == 1 && operatorNamespace != targetNamespaces[0] } +// isMultiNamespacedOperator determines if an operator targets multiple namespaces excluding its own +// +// This function checks whether the list of target namespaces for an operator +// contains more than one entry and that the operator’s own namespace is not +// among them. It returns true only when the operator is intended to operate +// across several distinct namespaces, indicating a multi‑namespaced +// deployment scenario. func isMultiNamespacedOperator(operatorNamespace string, targetNamespaces []string) bool { return len(targetNamespaces) > 1 && !stringhelper.StringInSlice(targetNamespaces, operatorNamespace, false) } +// checkIfCsvUnderTest determines if a CSV is part of the test set +// +// The function iterates through the global list of operators defined for +// testing, checking whether any entry’s CSV name matches that of the supplied +// object. If a match is found it returns true; otherwise false. This boolean +// indicates whether the given CSV should be considered under test in subsequent +// validation logic. func checkIfCsvUnderTest(csv *v1alpha1.ClusterServiceVersion) bool { for _, testOperator := range env.Operators { if testOperator.Csv.Name == csv.Name { @@ -124,6 +175,13 @@ func checkIfCsvUnderTest(csv *v1alpha1.ClusterServiceVersion) bool { return false } +// isCsvInNamespaceClusterWide determines if a CSV is cluster‑wide based on its annotations +// +// The function scans all provided ClusterServiceVersions for the one matching +// the given name. It checks whether that CSV has a nonempty +// "olm.targetNamespaces" annotation; if so, it marks the CSV as not +// cluster‑wide. The result is returned as a boolean indicating whether the +// operator applies across the entire cluster. func isCsvInNamespaceClusterWide(csvName string, allCsvs []*v1alpha1.ClusterServiceVersion) bool { isClusterWide := true for _, eachCsv := range allCsvs { @@ -138,6 +196,13 @@ func isCsvInNamespaceClusterWide(csvName string, allCsvs []*v1alpha1.ClusterServ return isClusterWide } +// checkValidOperatorInstallation Determines if a namespace hosts only valid single or multi‑namespace operators +// +// The function inspects all ClusterServiceVersions in the specified namespace, +// categorising them as installed under test, not under test, or targeting other +// namespaces. It also checks for non‑operator pods that do not belong to any +// operator. The return values indicate whether the namespace is dedicated to +// valid operators and provide lists of any problematic objects. func checkValidOperatorInstallation(namespace string) (isDedicatedOperatorNamespace bool, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators []string, err error) { // 1. operator installation checks @@ -184,6 +249,14 @@ func checkValidOperatorInstallation(namespace string) (isDedicatedOperatorNamesp return isValid, singleOrMultiNamespaceOperators, nonSingleOrMultiNamespaceOperators, csvsTargetingNamespace, operatorsFoundButNotUnderTest, podsNotBelongingToOperators, nil } +// findPodsNotBelongingToOperators identifies pods that are not managed by any operator in the given namespace +// +// The function retrieves all pods within a namespace, then for each pod +// determines its top-level owners using helper logic. It checks whether any +// owner is a ClusterServiceVersion belonging to the same namespace; if none +// exist, the pod name is added to the result list. The returned slice contains +// names of pods that are not controlled by an operator, along with an error if +// ownership resolution fails. func findPodsNotBelongingToOperators(namespace string) (podsBelongingToNoOperators []string, err error) { allPods := getAllPodsBy(namespace, env.AllPods) for index := range allPods { diff --git a/tests/operator/openapi/openapi.go b/tests/operator/openapi/openapi.go index 84bd54f22..c1304a781 100644 --- a/tests/operator/openapi/openapi.go +++ b/tests/operator/openapi/openapi.go @@ -7,6 +7,12 @@ import ( apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) +// IsCRDDefinedWithOpenAPI3Schema Checks if a CRD uses an OpenAPI v3 schema +// +// The function inspects each version of the provided CustomResourceDefinition, +// converting its schema definition to a string. It searches for the substring +// that identifies an OpenAPI v3 schema, ignoring case. If any version contains +// this substring, it returns true; otherwise, it returns false. func IsCRDDefinedWithOpenAPI3Schema(crd *apiextv1.CustomResourceDefinition) bool { for _, version := range crd.Spec.Versions { crdSchema := version.Schema.String() diff --git a/tests/operator/phasecheck/phasecheck.go b/tests/operator/phasecheck/phasecheck.go index 640424bcf..c4b527119 100644 --- a/tests/operator/phasecheck/phasecheck.go +++ b/tests/operator/phasecheck/phasecheck.go @@ -31,6 +31,12 @@ const ( timeout = 5 * time.Minute ) +// WaitOperatorReady Waits until an operator reaches the Succeeded phase +// +// The function repeatedly polls a ClusterServiceVersion object, returning true +// if it enters the Succeeded phase before a timeout or false if it fails or +// times out. It also handles transient pod restarts by refreshing the CSV on +// each iteration and logs debugging information throughout. func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool { oc := clientsholder.GetClientsHolder() start := time.Now() @@ -65,11 +71,22 @@ func WaitOperatorReady(csv *v1alpha1.ClusterServiceVersion) bool { return false } +// isOperatorPhaseSucceeded Determines if an operator is in the succeeded phase +// +// The function inspects the status of a ClusterServiceVersion object and +// returns true when its phase equals the succeeded constant. It logs the +// current phase for debugging purposes before performing the comparison. The +// return value indicates whether the operator has completed successfully. func isOperatorPhaseSucceeded(csv *v1alpha1.ClusterServiceVersion) bool { log.Debug("Checking succeeded status phase for csv %s (ns %s). Phase: %v", csv.Name, csv.Namespace, csv.Status.Phase) return csv.Status.Phase == v1alpha1.CSVPhaseSucceeded } +// isOperatorPhaseFailedOrUnknown determines if a CSV has failed or is unknown +// +// The function examines the status phase of a ClusterServiceVersion object. It +// returns true when the phase equals Failed or Unknown, indicating that the +// operator cannot reach a successful state. Otherwise it returns false. func isOperatorPhaseFailedOrUnknown(csv *v1alpha1.ClusterServiceVersion) bool { log.Debug("Checking failed status phase for csv %s (ns %s). Phase: %v", csv.Name, csv.Namespace, csv.Status.Phase) return csv.Status.Phase == v1alpha1.CSVPhaseFailed || diff --git a/tests/operator/suite.go b/tests/operator/suite.go index 76eb8986e..a1806479a 100644 --- a/tests/operator/suite.go +++ b/tests/operator/suite.go @@ -46,6 +46,15 @@ var ( } ) +// LoadChecks Registers operator test checks with the internal database +// +// This routine creates a new check group for operator tests, then adds a series +// of predefined checks to that group. Each check is configured with optional +// skip logic and a function that performs the actual validation against the +// current test environment. The group is logged when created and all added +// checks are registered in the shared checks database so they can be executed +// during a certification run. +// //nolint:funlen func LoadChecks() { log.Debug("Loading %s suite checks", common.OperatorTestKey) @@ -138,7 +147,14 @@ func LoadChecks() { })) } -// This function checks if single/multi namespaced operators should only be installed in the tenant dedicated operator namespace +// testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces Validates that only single or multi‑namespaced operators reside in tenant namespaces +// +// The routine iterates over all operator namespaces found in the test +// environment, checks each for dedicated status, and gathers any operators or +// pods that violate the single/multi‑namespace rule. It builds compliance +// reports per namespace, marking those that contain only valid operators as +// compliant and reporting detailed reasons for non‑compliance otherwise. The +// results are set on the provided check object. func testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(check *checksdb.Check, env *provider.TestEnvironment) { check.LogInfo("Starting testOnlySingleNamespacedOperatorsAllowedInTenantNamespaces") @@ -204,7 +220,14 @@ func testOnlySingleOrMultiNamespacedOperatorsAllowedInTenantNamespaces(check *ch } } -// This function check if the Operator CRD version follows K8s versioning +// testOperatorCrdVersioning Verifies that all operator CRDs use Kubernetes-compatible versioning +// +// The routine iterates over each CRD in the test environment, checking every +// declared version against a Kubernetes semantic‑version pattern. If any +// non‑conforming version is found, it logs an error and records the CRD as +// non‑compliant; otherwise it logs success and marks it compliant. Finally, +// it reports the lists of compliant and non‑compliant objects for the test +// result. func testOperatorCrdVersioning(check *checksdb.Check, env *provider.TestEnvironment) { check.LogInfo("Starting testOperatorCrdVersioning") var compliantObjects []*testhelper.ReportObject @@ -238,7 +261,13 @@ func testOperatorCrdVersioning(check *checksdb.Check, env *provider.TestEnvironm check.SetResult(compliantObjects, nonCompliantObjects) } -// This function checks if the operator CRD is defined with OpenAPI 3 specification +// testOperatorCrdOpenAPISpec Verifies that operator CRDs use OpenAPI v3 schemas +// +// The function iterates over all CRDs in the test environment, checks whether +// each has an OpenAPI v3 schema defined, and records compliance status. It logs +// the result for each CRD and collects compliant and non‑compliant objects +// into separate slices. Finally it reports these results via the check’s +// SetResult method. func testOperatorCrdOpenAPISpec(check *checksdb.Check, env *provider.TestEnvironment) { check.LogInfo("Starting testOperatorCrdOpenAPISpec") var compliantObjects []*testhelper.ReportObject @@ -259,7 +288,12 @@ func testOperatorCrdOpenAPISpec(check *checksdb.Check, env *provider.TestEnviron check.SetResult(compliantObjects, nonCompliantObjects) } -// This function checks for semantic versioning of the installed operators +// testOperatorSemanticVersioning Verifies operators use semantic versioning +// +// The function iterates through each operator in the test environment, checks +// if its version string conforms to semantic version rules, and logs the +// outcome. It collects compliant and non‑compliant operators into separate +// lists of report objects, then sets these as the result for the check. func testOperatorSemanticVersioning(check *checksdb.Check, env *provider.TestEnvironment) { check.LogInfo("Starting testOperatorSemanticVersioning") var compliantObjects []*testhelper.ReportObject @@ -283,6 +317,12 @@ func testOperatorSemanticVersioning(check *checksdb.Check, env *provider.TestEnv check.SetResult(compliantObjects, nonCompliantObjects) } +// testOperatorInstallationPhaseSucceeded Verifies that each operator reaches the Succeeded phase +// +// The function iterates over all operators in the test environment, waiting for +// each ClusterServiceVersion to report a Succeeded status. It logs success or +// failure, collects compliant and non‑compliant objects into report entries, +// and finally records the results on the check object. func testOperatorInstallationPhaseSucceeded(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -302,6 +342,14 @@ func testOperatorInstallationPhaseSucceeded(check *checksdb.Check, env *provider check.SetResult(compliantObjects, nonCompliantObjects) } +// testOperatorInstallationAccessToSCC Checks operators’ CSV cluster permissions for disallowed SCC access +// +// The function iterates over all operators in the test environment, examining +// each operator’s ClusterServiceVersion for clusterPermissions. If no +// permissions are defined it records compliance; otherwise it calls a helper to +// detect any rule granting access to securitycontextconstraints and logs +// non‑compliance. Results are collected into report objects and set on the +// check. func testOperatorInstallationAccessToSCC(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -330,6 +378,13 @@ func testOperatorInstallationAccessToSCC(check *checksdb.Check, env *provider.Te check.SetResult(compliantObjects, nonCompliantObjects) } +// testOperatorOlmSubscription Verifies that each operator has an OLM subscription +// +// The function iterates over all operators in the test environment, logging +// status for each one. It checks whether a SubscriptionName exists; if missing, +// it records a non‑compliant report object and logs an error. If present, it +// creates a compliant report object noting the subscription was found. Finally, +// it sets the check result with the collected objects. func testOperatorOlmSubscription(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -351,6 +406,13 @@ func testOperatorOlmSubscription(check *checksdb.Check, env *provider.TestEnviro check.SetResult(compliantObjects, nonCompliantObjects) } +// testOperatorSingleCrdOwner Verifies that each CustomResourceDefinition is owned by only one operator +// +// The function builds a mapping of CRD names to the operators that declare +// ownership in their CSVs, filtering duplicate versions per operator. It then +// iterates through this map, marking any CRD with multiple owners as +// non‑compliant and generating report objects accordingly. Finally, it +// records compliant and non‑compliant results on the check instance. func testOperatorSingleCrdOwner(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -393,6 +455,13 @@ func testOperatorSingleCrdOwner(check *checksdb.Check, env *provider.TestEnviron check.SetResult(compliantObjects, nonCompliantObjects) } +// testOperatorPodsNoHugepages Checks that operator pods do not use hugepages +// +// The function iterates over all CSV-to-pod mappings in the test environment, +// examining each pod to determine whether it requests hugepage memory. Pods +// requesting hugepages are marked non‑compliant and logged as errors; +// otherwise they are considered compliant and logged positively. After +// processing all pods, the results are set on the check object for reporting. func testOperatorPodsNoHugepages(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -414,6 +483,13 @@ func testOperatorPodsNoHugepages(check *checksdb.Check, env *provider.TestEnviro } } +// testOperatorOlmSkipRange Verifies the presence of an OLM skipRange annotation on each operator +// +// The function iterates over all operators in the test environment, checking +// whether each has a non-empty "olm.skipRange" annotation. It logs information +// about the check and records compliant or non-compliant operators accordingly. +// Finally, it sets the result of the check with lists of compliant and +// non‑compliant report objects. func testOperatorOlmSkipRange(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -434,6 +510,12 @@ func testOperatorOlmSkipRange(check *checksdb.Check, env *provider.TestEnvironme check.SetResult(compliantObjects, nonCompliantObjects) } +// testMultipleSameOperators Verifies operators are not duplicated across installations +// +// The function iterates over all installed operators in the test environment, +// checking each pair for duplicate CSV names with different versions. It +// records non‑compliant operators when a duplicate is found and marks others +// as compliant. The results are then set on the provided check object. func testMultipleSameOperators(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -463,6 +545,14 @@ func testMultipleSameOperators(check *checksdb.Check, env *provider.TestEnvironm check.SetResult(compliantObjects, nonCompliantObjects) } +// testOperatorCatalogSourceBundleCount Verifies catalog sources contain fewer than a thousand bundle images +// +// The function iterates over operators in the test environment, matching each +// to its catalog source via package manifests. It then counts referenced +// bundles using probe containers for older OpenShift versions or package +// manifests otherwise. If a catalog source exceeds 1000 bundles it logs an +// error and records non‑compliance; otherwise it records compliance. +// //nolint:funlen func testOperatorCatalogSourceBundleCount(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject diff --git a/tests/performance/suite.go b/tests/performance/suite.go index 8c5e4876e..c4137eddd 100644 --- a/tests/performance/suite.go +++ b/tests/performance/suite.go @@ -79,6 +79,14 @@ var ( } ) +// LoadChecks Loads the performance test suite checks into the registry +// +// The function logs that it is loading checks for the performance suite, +// creates or retrieves a checks group identified by the performance key, and +// then registers several specific checks. Each check is configured with +// optional skip conditions and a callback that runs the actual test logic. The +// setup prepares the tests to be executed later as part of the overall testing +// framework. func LoadChecks() { log.Debug("Loading %s suite checks", common.PerformanceTestKey) @@ -128,6 +136,15 @@ func LoadChecks() { })) } +// testLimitedUseOfExecProbes Evaluates the use of exec probes across containers +// +// The routine iterates through all pods and their containers, checking +// liveness, startup, and readiness probes that execute commands. It counts each +// exec probe and records compliance if the period exceeds a defined threshold; +// otherwise it logs an error. If the total number of exec probes reaches ten or +// more, the entire CNF is marked non‑compliant, and the result is set +// accordingly. +// //nolint:funlen func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject @@ -208,6 +225,14 @@ func testLimitedUseOfExecProbes(check *checksdb.Check, env *provider.TestEnviron check.SetResult(compliantObjects, nonCompliantObjects) } +// testExclusiveCPUPool Verifies that all containers in a pod use the same CPU pool +// +// The function iterates over every pod in the test environment, counting how +// many of its containers are assigned to exclusive CPUs versus shared CPUs. If +// both types appear within a single pod it logs an error and records the pod as +// non‑compliant, including counts for each pool. Pods that contain only one +// type of CPU assignment are marked compliant. Finally, the results are stored +// in the check object. func testExclusiveCPUPool(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -241,6 +266,13 @@ func testExclusiveCPUPool(check *checksdb.Check, env *provider.TestEnvironment) check.SetResult(compliantObjects, nonCompliantObjects) } +// testSchedulingPolicyInCPUPool Evaluates CPU scheduling compliance for container processes +// +// The function iterates over a set of containers, retrieves each container's +// PID namespace, then lists all process IDs within that namespace. For every +// process, it checks whether the CPU scheduling policy and priority meet the +// specified . Containers are reported as compliant or non‑compliant based on +// the outcomes of these checks. func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvironment, podContainers []*provider.Container, schedulingType string) { var compliantContainersPids []*testhelper.ReportObject @@ -276,6 +308,14 @@ func testSchedulingPolicyInCPUPool(check *checksdb.Check, env *provider.TestEnvi check.SetResult(compliantContainersPids, nonCompliantContainersPids) } +// getExecProbesCmds Collects normalized exec probe command strings +// +// The function examines a container's liveness, readiness, and startup probes +// for an Exec configuration. For each present probe it joins the command array +// into a single string, removes any extra whitespace, and stores that cleaned +// command as a key in a map with a true value. The resulting map is used to +// quickly determine whether a running process matches one of the probe +// commands. func getExecProbesCmds(c *provider.Container) map[string]bool { cmds := map[string]bool{} @@ -302,6 +342,14 @@ func getExecProbesCmds(c *provider.Container) map[string]bool { const noProcessFoundErrMsg = "No such process" +// testRtAppsNoExecProbes Verifies that non‑guaranteed containers without host PID do not use exec probes with real‑time scheduling +// +// The routine iterates over all eligible containers, checking whether they +// declare exec probes. For those that do, it gathers running processes, filters +// out probe processes, and inspects the CPU scheduling policy of each remaining +// process. If any process runs under a real‑time policy, the container is +// marked non‑compliant; otherwise it is compliant. Results are recorded as +// report objects for later aggregation. func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -361,6 +409,14 @@ func testRtAppsNoExecProbes(check *checksdb.Check, env *provider.TestEnvironment check.SetResult(compliantObjects, nonCompliantObjects) } +// filterProbeProcesses Separates exec probe processes from other container processes +// +// The function receives a list of all running processes in a container and the +// container definition. It identifies which processes belong to exec probes by +// comparing command lines with those defined in liveness, readiness, and +// startup probes. Processes that are part of an exec probe or their descendants +// are marked as compliant and excluded from further checks, while the remaining +// processes are returned for additional verification. func filterProbeProcesses(allProcesses []*crclient.Process, cut *provider.Container) (notExecProbeProcesses []*crclient.Process, compliantObjects []*testhelper.ReportObject) { execProbeProcesses := []int{} execProbesCmds := getExecProbesCmds(cut) diff --git a/tests/platform/bootparams/bootparams.go b/tests/platform/bootparams/bootparams.go index 7063d71c5..a77206334 100644 --- a/tests/platform/bootparams/bootparams.go +++ b/tests/platform/bootparams/bootparams.go @@ -31,6 +31,15 @@ const ( kernelArgscommand = "cat /host/proc/cmdline" ) +// TestBootParamsHelper Verifies that node kernel parameters match the MachineConfig +// +// The function retrieves the expected kernel arguments from a MachineConfig, +// then obtains the current command‑line arguments from both the container’s +// process and the GRUB configuration on the same node. It compares each +// argument value against the expected one, logging warnings when mismatches +// occur and debug messages for matches. If any required probe pod is missing or +// an error occurs during retrieval, it returns an error; otherwise it completes +// silently. func TestBootParamsHelper(env *provider.TestEnvironment, cut *provider.Container, logger *log.Logger) error { probePod := env.ProbePods[cut.NodeName] if probePod == nil { @@ -66,11 +75,24 @@ func TestBootParamsHelper(env *provider.TestEnvironment, cut *provider.Container return nil } +// GetMcKernelArguments Retrieves kernel arguments from a node’s MachineConfig +// +// This function accesses the specified node in the test environment, pulls the +// KernelArguments slice from its MachineConfig, and converts it into a map of +// key‑value pairs using ArgListToMap. The resulting map is returned for +// further comparison against runtime values or other configuration sources. func GetMcKernelArguments(env *provider.TestEnvironment, nodeName string) (aMap map[string]string) { mcKernelArgumentsMap := arrayhelper.ArgListToMap(env.Nodes[nodeName].Mc.Spec.KernelArguments) return mcKernelArgumentsMap } +// getGrubKernelArgs Retrieves GRUB kernel arguments from a probe pod +// +// The function runs a command inside the node's probe container to capture the +// GRUB configuration line, filters for the options line, splits it into +// individual arguments, and converts them into a map of key-value pairs. It +// returns this map along with any error that occurs during execution or +// parsing. func getGrubKernelArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) { o := clientsholder.GetClientsHolder() ctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name) @@ -92,6 +114,13 @@ func getGrubKernelArgs(env *provider.TestEnvironment, nodeName string) (aMap map return arrayhelper.ArgListToMap(grubSplitKernelConfig), nil } +// getCurrentKernelCmdlineArgs retrieves the current kernel command-line arguments from a node's probe pod +// +// The function executes a predefined command inside the probe pod container to +// capture the kernel's command line, splits the output into individual +// arguments, and converts them into a map of key-value pairs. It returns this +// map along with any error that occurs during execution or parsing. The +// returned data is used to compare against expected configuration values. func getCurrentKernelCmdlineArgs(env *provider.TestEnvironment, nodeName string) (aMap map[string]string, err error) { o := clientsholder.GetClientsHolder() ctx := clientsholder.NewContext(env.ProbePods[nodeName].Namespace, env.ProbePods[nodeName].Name, env.ProbePods[nodeName].Spec.Containers[0].Name) diff --git a/tests/platform/clusteroperator/clusteroperator.go b/tests/platform/clusteroperator/clusteroperator.go index 0d0c4d22e..0262ce6c3 100644 --- a/tests/platform/clusteroperator/clusteroperator.go +++ b/tests/platform/clusteroperator/clusteroperator.go @@ -5,6 +5,12 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" ) +// IsClusterOperatorAvailable Determines if a ClusterOperator reports an 'Available' status +// +// The function inspects the conditions of a given cluster operator, checking +// for one whose type indicates availability. If such a condition is found, it +// logs that the operator is available and returns true; otherwise it logs that +// the operator is not available and returns false. func IsClusterOperatorAvailable(co *configv1.ClusterOperator) bool { // Loop through the conditions, looking for the 'Available' state. for _, condition := range co.Status.Conditions { diff --git a/tests/platform/cnffsdiff/fsdiff.go b/tests/platform/cnffsdiff/fsdiff.go index 79face8ad..285d9e89d 100644 --- a/tests/platform/cnffsdiff/fsdiff.go +++ b/tests/platform/cnffsdiff/fsdiff.go @@ -55,19 +55,27 @@ var ( } ) -// fsDiffJSON is a helper struct to unmarshall the "podman diff --format json" output: a slice of -// folders/filepaths (strings) for each event type changed/added/deleted: +// fsDiffJSON Parses podman diff JSON output into separate lists of changed, added, and deleted paths // -// {"changed": ["folder1, folder2"], added": ["folder5", "folder6"], "deleted": ["folder3", "folder4"]"} -// -// We'll only care about deleted and changed types, though, as in case a folder/file is created to any of them, -// there will be two entries, one for the "added" and another for the "changed". +// This struct holds three slices of strings that represent file or folder paths +// reported by the podman diff command. The "changed" slice contains paths +// modified in a container, "deleted" lists removed items, and "added" tracks +// new creations. Only the changed and deleted fields are used for comparison +// logic, while added is retained for completeness. type fsDiffJSON struct { Changed []string `json:"changed"` Deleted []string `json:"deleted"` Added []string `json:"added"` // Will not be checked, but let's keep it just in case. } +// FsDiff Tracks file system differences in a container +// +// This structure stores the results of running a podman diff against a +// container, capturing any folders that have been changed or deleted from a +// predefined target list. It also holds references to the check context, +// command client, and execution context used during the test, along with flags +// for custom podman usage and an error field for failure reporting. The result +// integer indicates success, failure, or error status after the test runs. type FsDiff struct { check *checksdb.Check result int @@ -80,11 +88,24 @@ type FsDiff struct { Error error } +// FsDiffFuncs provides file system diff functionality +// +// This interface defines two operations: one that initiates a diff test within +// a specified container context, and another that retrieves the result status +// of that test as an integer code. The RunTest method accepts execution context +// and container identifier parameters to perform the comparison, while +// GetResults returns an integer indicating success or failure of the last run. type FsDiffFuncs interface { RunTest(ctx clientsholder.Context, containerUID string) GetResults() int } +// NewFsDiffTester Creates a tester for filesystem differences in containers +// +// It determines whether to use a custom podman based on the OpenShift version, +// logs this decision, and initializes an FsDiff structure with the provided +// check, client holder, context, and result state. The returned object is ready +// to run tests that compare container file systems. func NewFsDiffTester(check *checksdb.Check, client clientsholder.Command, ctxt clientsholder.Context, ocpVersion string) *FsDiff { useCustomPodman := shouldUseCustomPodman(check, ocpVersion) check.LogDebug("Using custom podman: %v.", useCustomPodman) @@ -98,10 +119,13 @@ func NewFsDiffTester(check *checksdb.Check, client clientsholder.Command, ctxt c } } -// Helper function that is used to check whether we should use the podman that comes preinstalled -// on each ocp node or the one that we've (custom) precompiled inside the probe pods that can only work in -// RHEL 8.x based ocp versions (4.12.z and lower). For ocp >= 4.13.0 this workaround should not be -// necessary. +// shouldUseCustomPodman determines whether a custom podman binary should be used +// +// The function parses the OpenShift version string to decide if the +// preinstalled podman on each node is suitable. For versions below 4.13 it +// selects a custom, precompiled podman that works with older RHEL 8.x based +// clusters; for newer releases or parsing failures it defaults to the node’s +// built‑in podman. The result is returned as a boolean. func shouldUseCustomPodman(check *checksdb.Check, ocpVersion string) bool { const ( ocpForPreinstalledPodmanMajor = 4 @@ -128,6 +152,12 @@ func shouldUseCustomPodman(check *checksdb.Check, ocpVersion string) bool { return false } +// FsDiff.intersectTargetFolders Filters a list of folders to those that are monitored +// +// The function iterates over the supplied slice, checking each path against a +// predefined set of target directories. If a match is found, it logs a warning +// and adds the folder to the result slice. The resulting slice contains only +// paths that belong to the monitored set. func (f *FsDiff) intersectTargetFolders(src []string) []string { var dst []string for _, folder := range src { @@ -139,6 +169,13 @@ func (f *FsDiff) intersectTargetFolders(src []string) []string { return dst } +// FsDiff.runPodmanDiff Runs podman diff and returns its JSON output +// +// This method constructs the path to podman, optionally using a custom binary +// if configured. It then executes a chrooted command inside the host +// environment to obtain a diff of the container’s filesystem in JSON format. +// The function captures standard output and errors, returning the output string +// or an error if execution fails. func (f *FsDiff) runPodmanDiff(containerUID string) (string, error) { podmanPath := "podman" if f.useCustomPodman { @@ -155,6 +192,13 @@ func (f *FsDiff) runPodmanDiff(containerUID string) (string, error) { return output, nil } +// FsDiff.RunTest Executes podman diff to detect container file system changes +// +// The method runs the "podman diff" command on a specified container, +// optionally installing a custom podman binary if configured. It retries up to +// five times when encountering exit code 125 errors and parses the JSON output +// into deleted and changed folder lists. If any target folders are found +// altered or removed, the test fails; otherwise it succeeds. func (f *FsDiff) RunTest(containerUID string) { if f.useCustomPodman { err := f.installCustomPodman() @@ -211,13 +255,23 @@ func (f *FsDiff) RunTest(containerUID string) { } } +// FsDiff.GetResults provides the current result value +// +// The method simply retrieves and returns the integer field that holds the diff +// outcome. No parameters are required, and it does not modify any state. The +// returned value reflects the number of differences detected by the FsDiff +// instance. func (f *FsDiff) GetResults() int { return f.result } -// Generic helper function to execute a command inside the corresponding probe pod of the -// container under test. Whatever output in stdout or stderr is considered a failure, so it will -// return the concatenation of the given errorStr with those stdout, stderr and the error string. +// FsDiff.execCommandContainer Executes a shell command inside the probe pod and reports any output as an error +// +// It runs the supplied command in the container associated with FsDiff, +// capturing both stdout and stderr. If the command fails or produces any +// output, it returns an error that includes the provided error string plus the +// captured outputs and underlying execution error. Otherwise, it returns nil to +// indicate success. func (f *FsDiff) execCommandContainer(cmd, errorStr string) error { output, outerr, err := f.clientHolder.ExecCommandContainer(f.ctxt, cmd) if err != nil || output != "" || outerr != "" { @@ -227,26 +281,58 @@ func (f *FsDiff) execCommandContainer(cmd, errorStr string) error { return nil } +// FsDiff.createNodeFolder Creates a temporary folder on the node for mounting purposes +// +// The method runs a container command to make a directory at the path defined +// by nodeTmpMountFolder. It uses execCommandContainer to capture any output or +// errors, returning an error if the command fails or produces unexpected +// output. func (f *FsDiff) createNodeFolder() error { return f.execCommandContainer(fmt.Sprintf("mkdir %s", nodeTmpMountFolder), fmt.Sprintf("failed or unexpected output when creating folder %s.", nodeTmpMountFolder)) } +// FsDiff.deleteNodeFolder Removes the temporary mount directory on the target node +// +// This method issues a command to delete the folder designated by the constant +// nodeTmpMountFolder using the execCommandContainer helper. It expects no +// output from the command; any stdout, stderr or execution error results in an +// informative error being returned. The function is invoked during setup and +// teardown of custom Podman mounts to clean up the temporary directory. func (f *FsDiff) deleteNodeFolder() error { return f.execCommandContainer(fmt.Sprintf("rmdir %s", nodeTmpMountFolder), fmt.Sprintf("failed or unexpected output when deleting folder %s.", nodeTmpMountFolder)) } +// FsDiff.mountProbePodmanFolder Binds a partner pod's podman directory into the node's temporary mount point +// +// This method runs a bind‑mount command inside the container to expose the +// partner probe's podman folder at the node’s temporary location. It +// constructs the mount command with the source and destination paths, executes +// it via execCommandContainer, and returns any error from that execution. If +// the command succeeds, no value is returned. func (f *FsDiff) mountProbePodmanFolder() error { return f.execCommandContainer(fmt.Sprintf("mount --bind %s %s", partnerPodmanFolder, nodeTmpMountFolder), fmt.Sprintf("failed or unexpected output when mounting %s into %s.", partnerPodmanFolder, nodeTmpMountFolder)) } +// FsDiff.unmountProbePodmanFolder Unmounts the probe podman mount folder from within the container +// +// The method runs a command inside the container to unmount the temporary host +// folder used for probing filesystem differences. It reports any error or +// unexpected output, propagating it back to the caller. The operation is part +// of cleaning up after tests and returns an error if the unmount fails. func (f *FsDiff) unmountProbePodmanFolder() error { return f.execCommandContainer(fmt.Sprintf("umount %s", nodeTmpMountFolder), fmt.Sprintf("failed or unexpected output when unmounting %s.", nodeTmpMountFolder)) } +// FsDiff.installCustomPodman prepares a temporary mount point for custom podman +// +// This method creates a temporary directory, mounts the partner probe podman's +// podman binary into that directory, and cleans up if mounting fails. It logs +// each step and returns an error if any operation fails. The setup is used +// before running podman diff in tests. func (f *FsDiff) installCustomPodman() error { // We need to create the destination folder first. f.check.LogInfo("Creating temp folder %s", nodeTmpMountFolder) @@ -269,6 +355,12 @@ func (f *FsDiff) installCustomPodman() error { return nil } +// FsDiff.unmountCustomPodman Unmounts the temporary Podman mount directory +// +// The function logs that it is unmounting a specific folder, then attempts to +// unmount it using a helper command. If the unmount fails, it records an error +// and stops further cleanup. Finally, it deletes the now-unmounted folder, +// recording any errors encountered during deletion. func (f *FsDiff) unmountCustomPodman() { // Unmount podman folder from host. f.check.LogInfo("Unmounting folder %s", nodeTmpMountFolder) diff --git a/tests/platform/hugepages/hugepages.go b/tests/platform/hugepages/hugepages.go index d2dfc1b63..6baa79e36 100644 --- a/tests/platform/hugepages/hugepages.go +++ b/tests/platform/hugepages/hugepages.go @@ -32,8 +32,12 @@ type countBySize map[int]int // hugepagesByNuma maps a numa id to a hpSizeCounts map. type hugepagesByNuma map[int]countBySize -// String is the stringer implementation for the numaHpSizeCounts type so debug/info -// lines look better. +// hugepagesByNuma.String Produces a formatted string of NUMA node hugepage allocations +// +// It orders the NUMA indices, then for each index lists all page sizes with +// their counts in a human‑readable format. The resulting string contains +// entries like "Numa=0 [Size=2048kB Count=4]" and is returned for debugging or +// logging purposes. func (numaHps hugepagesByNuma) String() string { // Order numa ids/indexes numaIndexes := []int{} @@ -54,6 +58,13 @@ func (numaHps hugepagesByNuma) String() string { return sb.String() } +// Tester performs validation of node hugepage configuration against MachineConfig settings +// +// It gathers hugepage counts per NUMA from the node, parses MachineConfig +// kernel arguments or systemd units, and compares these values to ensure +// consistency. The Run method selects the appropriate comparison path based on +// whether systemd units are present. A successful run confirms that all +// configured hugepages match between the node and its MachineConfig. type Tester struct { node *provider.Node context clientsholder.Context @@ -63,6 +74,13 @@ type Tester struct { mcSystemdHugepagesByNuma hugepagesByNuma } +// hugepageSizeToInt Converts a hugepage size string into an integer kilobyte value +// +// This function takes a size string such as "2M" or "1G", extracts the numeric +// portion and multiplies it by 1024 for megabytes or 1024 squared for +// gigabytes. It returns the resulting value in kilobytes as an int, ignoring +// any errors from parsing. The conversion is used to translate kernel argument +// values into usable integer sizes within the program. func hugepageSizeToInt(s string) int { num, _ := strconv.Atoi(s[:len(s)-1]) unit := s[len(s)-1] @@ -76,6 +94,13 @@ func hugepageSizeToInt(s string) int { return num } +// NewTester Creates a tester for node hugepage validation +// +// This function initializes a Tester object with the provided node, probe pod, +// and command executor. It sets up the execution context inside the probe +// container and retrieves the node's NUMA hugepages information along with +// machineconfig systemd unit configurations. The resulting Tester is ready to +// run checks against the gathered data. func NewTester(node *provider.Node, probePod *corev1.Pod, commander clientsholder.Command) (*Tester, error) { tester := &Tester{ node: node, @@ -99,10 +124,24 @@ func NewTester(node *provider.Node, probePod *corev1.Pod, commander clientsholde return tester, nil } +// Tester.HasMcSystemdHugepagesUnits Indicates whether MachineConfig contains Systemd hugepage unit definitions +// +// The method returns true if the internal map of Systemd hugepages per NUMA +// node has one or more entries, meaning that the machine configuration includes +// explicit hugepage units. It does this by checking the length of the map; a +// non‑zero count signals presence, otherwise it indicates no such units were +// defined. func (tester *Tester) HasMcSystemdHugepagesUnits() bool { return len(tester.mcSystemdHugepagesByNuma) > 0 } +// Tester.Run Runs the hugepage configuration comparison tests +// +// The method checks whether MachineConfig includes systemd unit definitions for +// hugepages. If so, it verifies that the node's hugepage counts match those +// units; otherwise it compares kernel argument values against the node's +// totals. It logs progress and returns an error if any mismatch or test failure +// occurs. func (tester *Tester) Run() error { if tester.HasMcSystemdHugepagesUnits() { log.Info("Comparing MachineConfig Systemd hugepages info against node values.") @@ -118,7 +157,14 @@ func (tester *Tester) Run() error { return nil } -// TestNodeHugepagesWithMcSystemd compares the node's hugepages values against the mc's systemd units ones. +// Tester.TestNodeHugepagesWithMcSystemd Verifies node hugepage counts match MachineConfig systemd settings +// +// The function walks through each NUMA node’s actual hugepage configuration, +// ensuring that any size or node absent from the MachineConfig has a count of +// zero. It then cross‑checks every entry in the MachineConfig against the +// node’s values, confirming matching sizes and counts for all NUMA indices. +// If any discrepancy is found, it returns false with an explanatory error; +// otherwise it reports success. func (tester *Tester) TestNodeHugepagesWithMcSystemd() (bool, error) { // Iterate through node's actual hugepages to make sure that each node's size that does not exist in the // MachineConfig has a value of 0. @@ -169,9 +215,14 @@ func (tester *Tester) TestNodeHugepagesWithMcSystemd() (bool, error) { return true, nil } -// TestNodeHugepagesWithKernelArgs compares node hugepages against kernelArguments config. -// The total count of hugepages of the size defined in the kernelArguments must match the kernArgs' hugepages value. -// For other sizes, the sum should be 0. +// Tester.TestNodeHugepagesWithKernelArgs Validates node hugepage counts against kernel argument configuration +// +// The method retrieves the hugepage sizes and counts specified in a machine's +// kernel arguments, then checks that each size present on the node appears in +// those arguments with non‑zero counts. It aggregates node counts per size +// across all NUMA nodes and compares them to the expected totals from the +// kernel arguments, returning an error if any mismatch occurs. On success it +// logs matching sizes and returns true without error. func (tester *Tester) TestNodeHugepagesWithKernelArgs() (bool, error) { kernelArgsHpCountBySize, _ := getMcHugepagesFromMcKernelArguments(&tester.node.Mc) @@ -207,7 +258,13 @@ func (tester *Tester) TestNodeHugepagesWithKernelArgs() (bool, error) { return true, nil } -// getNodeNumaHugePages gets the actual node's hugepages config based on /sys/devices/system/node/nodeX files. +// Tester.getNodeNumaHugePages Retrieves the node's current hugepage configuration +// +// This method runs a command inside the probe pod to read +// /sys/devices/system/node files, parses each line for NUMA node number, page +// size, and count, and aggregates them into a map keyed by node. It returns the +// populated map or an error if execution fails or output cannot be parsed. The +// result is used to compare against desired hugepage settings. func (tester *Tester) getNodeNumaHugePages() (hugepages hugepagesByNuma, err error) { // This command must run inside the node, so we'll need the node's context to run commands inside the probe daemonset pod. stdout, stderr, err := tester.commander.ExecCommandContainer(tester.context, cmd) @@ -246,7 +303,13 @@ func (tester *Tester) getNodeNumaHugePages() (hugepages hugepagesByNuma, err err return hugepages, nil } -// getMcSystemdUnitsHugepagesConfig gets the hugepages information from machineconfig's systemd units. +// getMcSystemdUnitsHugepagesConfig extracts hugepage configuration from machineconfig systemd units +// +// This function scans the systemd unit files in a machine configuration for +// entries that define hugepage allocations. It parses each matching unit’s +// contents to capture the number, size, and NUMA node of the hugepages, +// organizing them into a nested map keyed by node and page size. The resulting +// structure is returned along with any parsing errors encountered. func getMcSystemdUnitsHugepagesConfig(mc *provider.MachineConfig) (hugepages hugepagesByNuma, err error) { const UnitContentsRegexMatchLen = 4 hugepages = hugepagesByNuma{} @@ -284,6 +347,13 @@ func getMcSystemdUnitsHugepagesConfig(mc *provider.MachineConfig) (hugepages hug return hugepages, nil } +// logMcKernelArgumentsHugepages Logs the hugepage configuration extracted from machine‑config kernel arguments +// +// This function builds a human‑readable string that includes the default +// hugepage size and each configured size with its count. It then sends this +// message to the package logger at info level, providing visibility into how +// many hugepages of each size were requested by the node’s machine +// configuration. func logMcKernelArgumentsHugepages(hugepagesPerSize map[int]int, defhugepagesz int) { var sb strings.Builder sb.WriteString(fmt.Sprintf("MC KernelArguments hugepages config: default_hugepagesz=%d-kB", defhugepagesz)) @@ -293,7 +363,13 @@ func logMcKernelArgumentsHugepages(hugepagesPerSize map[int]int, defhugepagesz i log.Info("%s", sb.String()) } -// getMcHugepagesFromMcKernelArguments gets the hugepages params from machineconfig's kernelArguments +// getMcHugepagesFromMcKernelArguments extracts hugepage configuration from kernel arguments +// +// The function parses the kernelArguments field of a MachineConfig to build a +// map that associates each hugepage size with its count, using RHEL defaults +// when necessary. It also determines the default hugepages size specified in +// the arguments or falls back to a system default. The resulting map and +// default size are returned for use by tests validating node hugepage settings. func getMcHugepagesFromMcKernelArguments(mc *provider.MachineConfig) (hugepagesPerSize map[int]int, defhugepagesz int) { defhugepagesz = RhelDefaultHugepagesz hugepagesPerSize = map[int]int{} diff --git a/tests/platform/isredhat/isredhat.go b/tests/platform/isredhat/isredhat.go index 2f70fa4d5..31f4dbf27 100644 --- a/tests/platform/isredhat/isredhat.go +++ b/tests/platform/isredhat/isredhat.go @@ -31,11 +31,25 @@ const ( VersionRegex = `(?m)Red Hat Enterprise Linux( Server)? release (\d+\.\d+)` ) +// BaseImageInfo provides utilities for inspecting a container’s base image +// +// The struct holds a command executor and context, enabling it to run commands +// inside a container. It offers methods such as TestContainerIsRedHatRelease, +// which checks the presence of /etc/redhat-release to determine if the image is +// RHEL-based, returning a boolean and error. The helper runCommand executes +// arbitrary shell commands via the client holder, handling errors and capturing +// output. type BaseImageInfo struct { ClientHolder clientsholder.Command OCPContext clientsholder.Context } +// NewBaseImageTester Creates a new instance of the base image tester +// +// The function accepts a client holder and a contextual object representing a +// Kubernetes pod or container. It constructs and returns a pointer to a struct +// that stores these inputs for subsequent checks on the container's base image. +// No additional processing occurs during construction. func NewBaseImageTester(client clientsholder.Command, ctx clientsholder.Context) *BaseImageInfo { return &BaseImageInfo{ ClientHolder: client, @@ -43,6 +57,13 @@ func NewBaseImageTester(client clientsholder.Command, ctx clientsholder.Context) } } +// BaseImageInfo.TestContainerIsRedHatRelease Checks if the container image is a Red Hat release +// +// The method runs a shell command inside the container to read +// /etc/redhat-release or report an unknown base image, logs the output, and +// then uses IsRHEL to determine whether the image matches known Red Hat +// patterns. It returns true when the image is confirmed as a Red Hat release, +// otherwise false, along with any execution error that occurs. func (b *BaseImageInfo) TestContainerIsRedHatRelease() (bool, error) { output, err := b.runCommand(`if [ -e /etc/redhat-release ]; then cat /etc/redhat-release; else echo \"Unknown Base Image\"; fi`) log.Info("Output from /etc/redhat-release: %q", output) @@ -52,6 +73,13 @@ func (b *BaseImageInfo) TestContainerIsRedHatRelease() (bool, error) { return IsRHEL(output), nil } +// IsRHEL determines whether the provided string signifies a Red Hat based release +// +// The function examines the supplied text for patterns that indicate a +// non‑Red Hat base image and immediately returns false if such patterns are +// found. If no negative matches occur, it logs the content of +// /etc/redhat-release and checks against a regular expression describing +// official Red Hat releases, returning true when a match is detected. func IsRHEL(output string) bool { // If the 'Unknown Base Image' string appears, return false. notRedHatRegex := regexp.MustCompile(NotRedHatBasedRegex) @@ -67,6 +95,12 @@ func IsRHEL(output string) bool { return len(matchVersion) > 0 } +// BaseImageInfo.runCommand Executes a shell command inside a container +// +// The method runs the supplied command in the container using the client +// holder, capturing both standard output and error streams. If execution fails +// or an error string is returned, it logs the issue and propagates an error to +// the caller. On success, it returns the command's output as a string. func (b *BaseImageInfo) runCommand(cmd string) (string, error) { output, outerr, err := b.ClientHolder.ExecCommandContainer(b.OCPContext, cmd) if err != nil { diff --git a/tests/platform/nodetainted/nodetainted.go b/tests/platform/nodetainted/nodetainted.go index be2a95082..21519e1f7 100644 --- a/tests/platform/nodetainted/nodetainted.go +++ b/tests/platform/nodetainted/nodetainted.go @@ -27,7 +27,13 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" ) -// NodeTainted holds information about tainted nodes. +// NodeTainted provides access to kernel taint information for a node +// +// It holds the context and node name used to query system files and run shell +// commands that expose kernel taints. The struct offers methods to retrieve the +// numeric taint mask, list modules that set taints, and parse those module +// taints from /sys/module. These functions enable inspection of tainted states +// on a target node. type NodeTainted struct { ctx *clientsholder.Context node string @@ -47,7 +53,13 @@ var runCommand = func(ctx *clientsholder.Context, cmd string) (string, error) { return output, nil } -// NewNodeTainted creates a new NodeTainted tester +// NewNodeTaintedTester Creates a tester for checking kernel taints on a node +// +// This function constructs and returns a new instance of the NodeTainted type. +// It stores the provided client context and node name so that subsequent +// methods can interact with the node’s kernel taint state via the Kubernetes +// API. The returned object is used by test logic to retrieve and analyze taints +// for compliance checks. func NewNodeTaintedTester(context *clientsholder.Context, node string) *NodeTainted { return &NodeTainted{ ctx: context, @@ -55,6 +67,13 @@ func NewNodeTaintedTester(context *clientsholder.Context, node string) *NodeTain } } +// NodeTainted.GetKernelTaintsMask Retrieves the kernel taints bitmask from a node +// +// This method runs a command to read /proc/sys/kernel/tainted, cleans up any +// whitespace characters, then parses the resulting string as an unsigned +// integer in base ten. If parsing fails it returns an error indicating the +// malformed value. On success it returns the taints mask as a uint64 and a nil +// error. func (nt *NodeTainted) GetKernelTaintsMask() (uint64, error) { output, err := runCommand(nt.ctx, `cat /proc/sys/kernel/tainted`) if err != nil { @@ -73,6 +92,13 @@ func (nt *NodeTainted) GetKernelTaintsMask() (uint64, error) { return taintsMask, nil } +// KernelTaint Represents an individual kernel taint +// +// This structure holds the human-readable description of a taint as well as its +// identifying letters used by the kernel to mark nodes. The Description field +// explains why the taint exists, while Letters contains the short string that +// is applied to node metadata. Instances are typically collected and examined +// when evaluating node health or scheduling constraints. type KernelTaint struct { Description string Letters string @@ -109,6 +135,12 @@ var kernelTaints = map[int]KernelTaint{ 31: {"BPF syscall has either been configured or enabled for unprivileged users/programs", "u"}, } +// GetTaintMsg Retrieves a descriptive message for a kernel taint bit +// +// This function looks up the given integer bit in a predefined map of known +// kernel taints. If found, it returns the taint's description along with the +// bit number; otherwise it indicates the bit is reserved. The output string is +// used to label taint information throughout the test suite. func GetTaintMsg(bit int) string { if taintMsg, exists := kernelTaints[bit]; exists { return fmt.Sprintf("%s (tainted bit %d)", taintMsg.Description, bit) @@ -117,6 +149,13 @@ func GetTaintMsg(bit int) string { return fmt.Sprintf("reserved (tainted bit %d)", bit) } +// DecodeKernelTaintsFromBitMask Converts a bitmask into human‑readable kernel taint messages +// +// The function iterates over all 64 bits of the supplied unsigned integer, +// checking each bit for a set value. For every bit that is on, it calls a +// helper to retrieve a descriptive message and appends that string to a slice. +// The resulting list of strings represents the active kernel taints +// corresponding to the original mask. func DecodeKernelTaintsFromBitMask(bitmask uint64) []string { taints := []string{} for i := 0; i < 64; i++ { @@ -128,6 +167,12 @@ func DecodeKernelTaintsFromBitMask(bitmask uint64) []string { return taints } +// RemoveAllExceptNumbers strips all non-digit characters from a string +// +// This function takes an input string, compiles a regular expression that +// matches any non‑digit sequence, and replaces those sequences with nothing. +// The result is a new string containing only the numeric characters that were +// present in the original input. func RemoveAllExceptNumbers(incomingStr string) string { // example string ", bit:10)" // return 10 @@ -137,6 +182,13 @@ func RemoveAllExceptNumbers(incomingStr string) string { return re.ReplaceAllString(incomingStr, "") } +// DecodeKernelTaintsFromLetters Converts a string of taint letters into descriptive taint strings +// +// This routine iterates over each character in the input, matching it against a +// predefined list of kernel taints. For matched letters it builds a +// human‑readable description that includes the taint’s name, the letter +// used, and its bit index. If a letter is unknown it records an "unknown taint" +// entry. The resulting slice contains one entry per letter. func DecodeKernelTaintsFromLetters(letters string) []string { taints := []string{} @@ -163,8 +215,13 @@ func DecodeKernelTaintsFromLetters(letters string) []string { return taints } -// getBitPosFromLetter returns the kernel taint bit position (base index 0) of the letter that -// represents a module's taint. +// getBitPosFromLetter Finds the bit index of a kernel taint letter +// +// The function accepts a single-character string representing a module taint +// and searches through a predefined list of known kernel taints to determine +// its corresponding bit position. It returns that integer index if found, +// otherwise it produces an error indicating the letter is invalid or unknown. +// Input validation ensures only one character is processed. func getBitPosFromLetter(letter string) (int, error) { if letter == "" || len(letter) > 1 { return 0, fmt.Errorf("input string must contain one letter") @@ -179,7 +236,12 @@ func getBitPosFromLetter(letter string) (int, error) { return 0, fmt.Errorf("letter %s does not belong to any known kernel taint", letter) } -// GetTaintedBitsByModules helper function to gets, for each module, the taint bits from its taint letters. +// GetTaintedBitsByModules Collects kernel taint bits from module letters +// +// This function receives a map of modules to their taint letter strings. It +// iterates over each letter, converts it to the corresponding bit position +// using a helper, and records that bit as true in a result map. Errors are +// returned if any letter cannot be mapped to a known taint. func GetTaintedBitsByModules(tainters map[string]string) (map[int]bool, error) { taintedBits := map[int]bool{} @@ -199,8 +261,13 @@ func GetTaintedBitsByModules(tainters map[string]string) (map[int]bool, error) { return taintedBits, nil } -// GetOtherTaintedBits helper function to get the tainted bits that are not related to -// any module. +// GetOtherTaintedBits Identifies kernel taint bits not associated with any module +// +// The function examines a 64‑bit mask of currently set kernel taints and +// compares each bit to a map that records which bits have been set by known +// modules. It iterates over all possible bit positions, collecting those that +// are active in the mask but absent from the module record. The result is a +// slice of integers representing the indices of these orphaned taint bits. func GetOtherTaintedBits(taintsMask uint64, taintedBitsByModules map[int]bool) []int { otherTaintedBits := []int{} // Lastly, check that all kernel taint bits come from modules. @@ -216,6 +283,12 @@ func GetOtherTaintedBits(taintsMask uint64, taintedBitsByModules map[int]bool) [ return otherTaintedBits } +// NodeTainted.getAllTainterModules Retrieves all kernel modules that are tainting the node +// +// The function runs a shell command to list every module in /sys/module, reads +// each module's taint file if present, and collects non‑empty taints into a +// map keyed by module name. It returns this mapping or an error if the command +// fails or parsing encounters duplicate entries or malformed lines. func (nt *NodeTainted) getAllTainterModules() (map[string]string, error) { const ( command = "modules=`ls /sys/module`; for module_name in $modules; do taint_file=/sys/module/$module_name/taint; " + @@ -262,12 +335,13 @@ func (nt *NodeTainted) getAllTainterModules() (map[string]string, error) { return tainters, nil } -// GetTainterModules runs a command in the node to get all the modules that -// have set a kernel taint bit. Returns: -// - tainters: maps a module to a string of taints letters. Each letter maps -// to a single bit in the taint mask. Tainters that appear in the allowlist will not -// be added to this map. -// - taintBits: bits (pos) of kernel taints caused by all modules (included the allowlisted ones). +// NodeTainted.GetTainterModules Retrieves non-allowlisted modules that set kernel taint bits +// +// The method runs a command on the node to list all modules with taint letters, +// then filters out those present in an allowlist. It returns a map of module +// names to their taint letter strings and another map indicating which taint +// bits are set across all modules. Errors from command execution or parsing are +// wrapped and returned. func (nt *NodeTainted) GetTainterModules(allowList map[string]bool) (tainters map[string]string, taintBits map[int]bool, err error) { // First, get all the modules that are tainting the kernel in this node. allTainters, err := nt.getAllTainterModules() diff --git a/tests/platform/operatingsystem/operatingsystem.go b/tests/platform/operatingsystem/operatingsystem.go index d005f089e..0e4dc0ed2 100644 --- a/tests/platform/operatingsystem/operatingsystem.go +++ b/tests/platform/operatingsystem/operatingsystem.go @@ -28,6 +28,13 @@ const ( //go:embed files/rhcos_version_map var rhcosVersionMap string +// GetRHCOSMappedVersions Parses a formatted string of RHCOS versions into a mapping +// +// The function receives a multiline string where each line contains a short +// RHCOS version, a slash, and its long-form counterpart. It splits the input by +// newline, trims whitespace, ignores empty lines, then separates each pair on +// the slash to build a map from short to long versions. The resulting map is +// returned along with any error . func GetRHCOSMappedVersions(rhcosVersionMap string) (map[string]string, error) { capturedInfo := make(map[string]string) @@ -54,6 +61,12 @@ func GetRHCOSMappedVersions(rhcosVersionMap string) (map[string]string, error) { return capturedInfo, nil } +// GetShortVersionFromLong Retrieves a concise RHCOS version string from its full identifier +// +// The function looks up the provided long-form RHCOS identifier in a preloaded +// mapping of short-to-long versions. If a match is found, it returns the +// corresponding short version; otherwise, it returns a sentinel value +// indicating that the version was not located. func GetShortVersionFromLong(longVersion string) (string, error) { capturedVersions, err := GetRHCOSMappedVersions(rhcosVersionMap) if err != nil { diff --git a/tests/platform/suite.go b/tests/platform/suite.go index 260001560..ce55c58c0 100644 --- a/tests/platform/suite.go +++ b/tests/platform/suite.go @@ -50,6 +50,16 @@ var ( } ) +// LoadChecks Registers platform alteration tests into the internal checks database +// +// The function logs that it is loading the platform alteration suite and +// creates a new checks group identified by a common key. It registers a +// before‑each hook and then adds numerous checks, each with its own skip +// conditions and execution logic. Each check is built from an identifier, +// configured to run only when appropriate environment conditions are met, and +// invokes a specific test function that evaluates node or pod properties. The +// assembled group is added to the checks database for later execution. +// //nolint:funlen func LoadChecks() { log.Debug("Loading %s suite checks", common.PlatformAlterationTestKey) @@ -175,6 +185,13 @@ func LoadChecks() { })) } +// testHyperThreadingEnabled Verifies hyper‑threading status on all bare metal nodes +// +// The routine retrieves every bare metal node from the test environment and +// queries whether hyper‑threading is active for each one. It records +// compliant nodes where hyper‑threading is enabled, logs errors for disabled +// or query failures, and compiles separate lists of compliant and +// non‑compliant objects before setting the check result. func testHyperThreadingEnabled(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -198,6 +215,13 @@ func testHyperThreadingEnabled(check *checksdb.Check, env *provider.TestEnvironm check.SetResult(compliantObjects, nonCompliantObjects) } +// testServiceMesh Verifies that every pod contains an Istio proxy container +// +// The function iterates over all pods in the test environment, checking each +// container for a service‑mesh indicator. Pods lacking an Istio proxy are +// recorded as non‑compliant and logged with an error; those containing one +// are marked compliant and logged positively. Finally, the check result is set +// with lists of compliant and non‑compliant report objects. func testServiceMesh(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -223,7 +247,13 @@ func testServiceMesh(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } -// testContainersFsDiff test that all CUT did not install new packages are starting +// testContainersFsDiff Verifies containers have not been altered by comparing file system snapshots +// +// The routine iterates over each container under test, locating a corresponding +// probe pod to obtain the original filesystem state. It runs a diff check; if +// the container shows no changes it records compliance, otherwise it logs the +// modified or deleted directories and marks non‑compliance. Errors during the +// diff process are captured as failures and reported with error details. func testContainersFsDiff(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -268,6 +298,15 @@ func testContainersFsDiff(check *checksdb.Check, env *provider.TestEnvironment) check.SetResult(compliantObjects, nonCompliantObjects) } +// testTainted Checks nodes for kernel taints against an allowlist +// +// The function iterates over cluster nodes, verifies a workload is present, +// retrieves each node's kernel taint bitmask, and decodes the taints. It +// compares found taints to a configured list of acceptable modules, logging +// errors when unexpected taints or non‑module taints appear. Compliant and +// non‑compliant findings are collected into report objects and reported via +// SetResult. +// //nolint:funlen func testTainted(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject @@ -403,6 +442,13 @@ func testTainted(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } +// testIsRedHatRelease Verifies that containers use a Red Hat Enterprise Linux base image +// +// The function iterates over all test containers, creating a tester for each +// based on its namespace, pod name, and container name. It calls the tester to +// determine if the underlying image is a RHEL release; any errors are logged as +// failures. Containers that pass or fail are recorded in separate report lists +// which are then stored in the check result. func testIsRedHatRelease(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -426,6 +472,12 @@ func testIsRedHatRelease(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } +// testIsSELinuxEnforcing Checks that SELinux is enforcing on cluster nodes +// +// The function runs a command inside each probe pod to read the SELinux mode +// via chroot and verifies it matches "Enforcing\n". It records compliant or +// non‑compliant results per node, logging errors for execution failures. The +// final result aggregates all objects and updates the check status. func testIsSELinuxEnforcing(check *checksdb.Check, env *provider.TestEnvironment) { const ( getenforceCommand = `chroot /host getenforce` @@ -458,6 +510,13 @@ func testIsSELinuxEnforcing(check *checksdb.Check, env *provider.TestEnvironment check.SetResult(compliantObjects, nonCompliantObjects) } +// testHugepages Verifies that node hugepages configuration has not been altered +// +// The function iterates over all nodes in the test environment, skipping +// non‑worker nodes as compliant. For each worker node it looks up a probe +// pod, creates a hugepages tester and runs its check. Results are collected +// into compliant or non‑compliant report objects which are then set on the +// provided check. func testHugepages(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -496,6 +555,14 @@ func testHugepages(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } +// testUnalteredBootParams Validates kernel boot parameters against MachineConfig and GRUB settings on each node +// +// The routine iterates over all containers in the test environment, ensuring +// each node is checked only once. For every unique node it calls a helper that +// compares current kernel command‑line arguments to those defined in the +// MachineConfig and GRUB configuration, logging any mismatches. Results are +// collected into compliant or non‑compliant report objects which are then set +// as the check’s outcome. func testUnalteredBootParams(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -523,6 +590,14 @@ func testUnalteredBootParams(check *checksdb.Check, env *provider.TestEnvironmen check.SetResult(compliantObjects, nonCompliantObjects) } +// testSysctlConfigs Verifies node sysctl values against machine config +// +// This routine iterates over containers, ensuring each node is checked only +// once. For every node it retrieves current sysctl settings and compares them +// to the expected kernel arguments defined in its machine configuration. +// Mismatches are logged and reported as non‑compliant; nodes with matching +// values are marked compliant. The results are stored in the check result for +// later reporting. func testSysctlConfigs(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -569,6 +644,12 @@ func testSysctlConfigs(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } +// testOCPStatus Checks OpenShift cluster version against lifecycle status +// +// The function inspects the environment’s OpenShift status, logs an +// appropriate message for EOL, maintenance, GA, or pre‑GA releases, and marks +// the check as compliant unless the version is in end of life. It constructs +// report objects indicating compliance and assigns them to the check result. func testOCPStatus(check *checksdb.Check, env *provider.TestEnvironment) { clusterIsInEOL := false switch env.OCPStatus { @@ -597,6 +678,8 @@ func testOCPStatus(check *checksdb.Check, env *provider.TestEnvironment) { check.SetResult(compliantObjects, nonCompliantObjects) } +// testNodeOperatingSystemStatus Verifies node operating system compatibility +// //nolint:funlen func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvironment) { failedControlPlaneNodes := []string{} @@ -706,6 +789,14 @@ func testNodeOperatingSystemStatus(check *checksdb.Check, env *provider.TestEnvi check.SetResult(compliantObjects, nonCompliantObjects) } +// testPodHugePagesSize Verifies that pods use the expected hugepages size +// +// The function iterates over all pods configured with hugepages in the test +// environment, checks each pod's allocated hugepages against a specified size, +// and logs whether each check passes or fails. It collects compliant and +// non‑compliant pods into separate report objects, which are then set as the +// result of the current test. Errors are logged for any pod that does not match +// the expected size. func testPodHugePagesSize(check *checksdb.Check, env *provider.TestEnvironment, size string) { var compliantObjects []*testhelper.ReportObject var nonCompliantObjects []*testhelper.ReportObject @@ -723,6 +814,12 @@ func testPodHugePagesSize(check *checksdb.Check, env *provider.TestEnvironment, check.SetResult(compliantObjects, nonCompliantObjects) } +// testClusterOperatorHealth Verifies that all cluster operators are available +// +// The function iterates over each operator in the test environment, logging a +// check for each one. It uses a helper to determine if an operator is in the +// 'Available' state and records compliant or non‑compliant results +// accordingly. Finally, it aggregates these results into the test's outcome. func testClusterOperatorHealth(check *checksdb.Check, env *provider.TestEnvironment) { // Checks the various ClusterOperator(s) to see if they are all in an 'Available' state. // If they are not in an 'Available' state, the check will fail. diff --git a/tests/platform/sysctlconfig/sysctlconfig.go b/tests/platform/sysctlconfig/sysctlconfig.go index f135692e1..27f0bd4c9 100644 --- a/tests/platform/sysctlconfig/sysctlconfig.go +++ b/tests/platform/sysctlconfig/sysctlconfig.go @@ -25,7 +25,13 @@ import ( "github.com/redhat-best-practices-for-k8s/certsuite/pkg/provider" ) -// Creates a map describing the final sysctl key-value pair out of the results of "sysctl --system" +// parseSysctlSystemOutput parses sysctl output into a map of key-value pairs +// +// The function takes the raw text returned by "sysctl --system" and splits it +// line by line. It ignores comment lines that start with an asterisk, then uses +// a regular expression to extract keys and values from standard assignments +// such as "kernel.yama.ptrace_scope = 0". Each extracted key and value is +// stored in a map which the function returns. func parseSysctlSystemOutput(sysctlSystemOutput string) map[string]string { retval := make(map[string]string) splitConfig := strings.Split(sysctlSystemOutput, "\n") @@ -46,6 +52,14 @@ func parseSysctlSystemOutput(sysctlSystemOutput string) map[string]string { return retval } +// GetSysctlSettings Retrieves system configuration values from a node's probe pod +// +// This function runs the command "chroot /host sysctl --system" inside a +// designated probe container to collect kernel settings for a specified node. +// It captures standard output and parses each line into key/value pairs, +// ignoring comments or non‑matching lines. The resulting map of setting names +// to values is returned, with an error if the command fails or produces +// unexpected output. func GetSysctlSettings(env *provider.TestEnvironment, nodeName string) (map[string]string, error) { const ( sysctlCommand = "chroot /host sysctl --system" diff --git a/tests/preflight/suite.go b/tests/preflight/suite.go index fbbf97c5f..df5b6adfa 100644 --- a/tests/preflight/suite.go +++ b/tests/preflight/suite.go @@ -38,6 +38,12 @@ var ( } ) +// labelsAllowTestRun checks whether a test run is permitted based on labels +// +// The function receives a string of labels and a list of allowed label +// identifiers. It scans each allowed identifier to see if it appears within the +// provided string, returning true upon the first match. If none of the allowed +// labels are found, it returns false. func labelsAllowTestRun(labelFilter string, allowedLabels []string) bool { for _, label := range allowedLabels { if strings.Contains(labelFilter, label) { @@ -47,13 +53,13 @@ func labelsAllowTestRun(labelFilter string, allowedLabels []string) bool { return false } -// Returns true if the preflight checks should run. -// Conditions: (1) the labels expr should contain any of the preflight tags/labels & (2) the -// preflight dockerconfig file must exist. -// This is just a hack to avoid running the preflight.LoadChecks() if it's not necessary -// since that function is actually running all the preflight lib's checks, which can take some -// time to finish. When they're finished, a checksdb.Check is created for each preflight lib's -// check that has run. The CheckFn will simply store the result. +// ShouldRun Determines whether preflight checks should be executed +// +// The function evaluates the provided label expression to see if it includes +// any preflight-specific tags, then verifies that a Docker configuration file +// is available. If either condition fails, it returns false or logs a warning +// and marks the environment to skip preflight tests. When both conditions are +// satisfied, it signals that the preflight suite may run. func ShouldRun(labelsExpr string) bool { env = provider.GetTestEnvironment() preflightAllowedLabels := []string{common.PreflightTestKey, identifiers.TagPreflight} @@ -72,6 +78,12 @@ func ShouldRun(labelsExpr string) bool { return true } +// LoadChecks Initializes the test environment and runs Preflight checks for containers and operators +// +// The function sets up logging, retrieves the current test environment, and +// creates a checks group for Preflight tests. It executes container preflight +// tests and conditionally runs operator tests if the cluster is OpenShift. +// Results are recorded in the checks group for later reporting. func LoadChecks() { log.Debug("Running %s suite checks", common.PreflightTestKey) @@ -90,6 +102,13 @@ func LoadChecks() { } } +// testPreflightOperators Runs preflight checks on all operators and records their outcomes +// +// This function iterates over each operator in the test environment, executing +// its preflight tests and capturing any errors. After collecting results, it +// logs completion of operator testing. Finally, it creates catalog entries for +// every unique preflight test found across operators, adding these checks to +// the provided group so they can be reported. func testPreflightOperators(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) { // Loop through all of the operators, run preflight, and set their results into their respective object for _, op := range env.Operators { @@ -111,6 +130,13 @@ func testPreflightOperators(checksGroup *checksdb.ChecksGroup, env *provider.Tes } } +// testPreflightContainers runs Preflight checks on all containers in the test environment +// +// The function iterates over each container, executing Preflight diagnostics +// while caching results per image to avoid duplicate work. It logs any errors +// encountered during execution and records completion of tests for the entire +// set. After processing, it aggregates unique test entries from container +// results and generates corresponding checks in the provided group. func testPreflightContainers(checksGroup *checksdb.ChecksGroup, env *provider.TestEnvironment) { // Using a cache to prevent unnecessary processing of images if we already have the results available preflightImageCache := make(map[string]provider.PreflightResultsDB) @@ -133,7 +159,13 @@ func testPreflightContainers(checksGroup *checksdb.ChecksGroup, env *provider.Te } } -// func generatePreflightContainerCnfCertTest(testName, testID string, tags []string, containers []*provider.Container) { +// generatePreflightContainerCnfCertTest Creates a test entry for each Preflight container check +// +// The function registers a catalog entry using the supplied name, description, +// and remediation, then adds a corresponding check to the checks group. For +// every container passed in, it examines preflight results and records which +// containers passed, failed, or errored on that specific test. The outcome is +// stored as compliant or non‑compliant objects within the check's result. func generatePreflightContainerCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, containers []*provider.Container) { // Based on a single test "name", we will be passing/failing in our test framework. // Brute force-ish type of method. @@ -177,6 +209,13 @@ func generatePreflightContainerCnfCertTest(checksGroup *checksdb.ChecksGroup, te })) } +// generatePreflightOperatorCnfCertTest Creates a test case that aggregates preflight results across operators +// +// The function registers a new test in the catalog, then builds a check that +// iterates over all operators to collect passed, failed, or errored preflight +// outcomes for a given test name. It constructs report objects for each +// operator and sets the overall result accordingly. The check is skipped if no +// operators are present. func generatePreflightOperatorCnfCertTest(checksGroup *checksdb.ChecksGroup, testName, description, remediation string, operators []*provider.Operator) { // Based on a single test "name", we will be passing/failing in our test framework. // Brute force-ish type of method. @@ -221,6 +260,13 @@ func generatePreflightOperatorCnfCertTest(checksGroup *checksdb.ChecksGroup, tes })) } +// getUniqueTestEntriesFromContainerResults Collects unique preflight test results from multiple containers +// +// This function iterates over a slice of container objects, extracting all +// passed, failed, and error preflight tests. It aggregates them into a map +// keyed by test name, ensuring that duplicate entries are overridden with the +// most recent result. The resulting map contains one entry per unique test +// across all containers. func getUniqueTestEntriesFromContainerResults(containers []*provider.Container) map[string]provider.PreflightTest { // If containers are sharing the same image, they should "presumably" have the same results returned from Preflight. testEntries := make(map[string]provider.PreflightTest) @@ -240,6 +286,14 @@ func getUniqueTestEntriesFromContainerResults(containers []*provider.Container) return testEntries } +// getUniqueTestEntriesFromOperatorResults collects unique preflight test results from all operators +// +// The function iterates over a slice of operator objects, extracting each +// passed, failed, or errored test result. For every test name it stores the +// corresponding test entry in a map, ensuring that only one instance per test +// name is kept even if multiple operators report the same test. The resulting +// map associates test names with their detailed preflight test information for +// later use. func getUniqueTestEntriesFromOperatorResults(operators []*provider.Operator) map[string]provider.PreflightTest { testEntries := make(map[string]provider.PreflightTest) for _, op := range operators { diff --git a/webserver/webserver.go b/webserver/webserver.go index 693b23a66..414ceda23 100644 --- a/webserver/webserver.go +++ b/webserver/webserver.go @@ -65,6 +65,13 @@ var upgrader = websocket.Upgrader{ }, } +// logStreamHandler Streams log output to a WebSocket client +// +// When called, the function upgrades an HTTP request to a WebSocket connection. +// It then continuously reads lines from a log source, converts each line to +// HTML-safe format, appends a line break, and sends it over the socket. The +// loop sleeps briefly between messages and logs any errors that occur during +// reading or transmission. func logStreamHandler(w http.ResponseWriter, r *http.Request) { conn, err := upgrader.Upgrade(w, r, nil) if err != nil { @@ -94,6 +101,13 @@ func logStreamHandler(w http.ResponseWriter, r *http.Request) { } } +// RequestedData Holds user‑supplied configuration options for updating a test framework +// +// This structure aggregates all settings that can be specified in the UI or +// command line, such as namespaces, labels, deployment names, and API +// credentials. Each field is a slice of strings to allow multiple values, with +// optional fields omitted from JSON if empty. The data is consumed by updateTnf +// to rebuild the YAML configuration for the test environment. type RequestedData struct { SelectedOptions []string `json:"selectedOptions"` TargetNameSpaces []string `json:"targetNameSpaces"` @@ -122,10 +136,23 @@ type RequestedData struct { ConnectAPIProxyURL []string `json:"proxyURL,omitempty"` ConnectAPIProxyPort []string `json:"proxyPort,omitempty"` } + +// ResponseData Holds a response message +// +// This struct contains a single field that stores a text message to be returned +// in HTTP responses. The JSON tag ensures the field is serialized with the key +// "message" when the struct is encoded to JSON. type ResponseData struct { Message string `json:"message"` } +// installReqHandlers Registers HTTP routes for static content and classification data +// +// This function sets up several URL handlers that serve embedded HTML, +// JavaScript, and classification information. Each handler writes the +// appropriate content type header before sending the precompiled bytes or +// generated JSON string. Errors during writing result in a 500 response to the +// client. func installReqHandlers() { http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { // Set the content type to "text/html". @@ -198,6 +225,13 @@ func installReqHandlers() { http.HandleFunc("/logstream", logStreamHandler) } +// StartServer Starts an HTTP server that serves test results and static assets +// +// The function creates a server listening on port 8084, attaches context with +// the output folder path, registers handlers for static files and runFunction, +// then begins serving requests. It logs the server address and panics if +// ListenAndServe returns an error. The server provides endpoints for HTML, +// JavaScript, and log streaming used by the web interface. func StartServer(outputFolder string) { ctx := context.TODO() server := &http.Server{ @@ -219,7 +253,13 @@ func StartServer(outputFolder string) { } } -// Define an HTTP handler that triggers CERTSUITE tests +// runHandler Triggers Cert Suite tests from an HTTP request +// +// The handler reads form data containing JSON options and a kubeconfig file, +// writes the config to a temporary file, updates the test configuration YAML, +// and then runs the Cert Suite with the supplied labels filter. It logs +// progress, handles errors by writing HTTP error responses or logging fatal +// messages, and finally returns a JSON success message. // //nolint:funlen func runHandler(w http.ResponseWriter, r *http.Request) { @@ -330,6 +370,15 @@ func runHandler(w http.ResponseWriter, r *http.Request) { } } +// updateTnf Updates a YAML configuration with user-provided data +// +// This function parses an existing YAML configuration into a struct, then +// overwrites numerous fields such as namespaces, labels, deployment lists, +// filters, and connection settings based on the supplied RequestedData. After +// all updates are applied, it serializes the struct back to YAML and returns +// the byte slice. Errors during unmarshalling or marshalling cause fatal log +// entries that terminate the program. +// //nolint:funlen,gocyclo func updateTnf(tnfConfig []byte, data *RequestedData) []byte { // Unmarshal the YAML data into a Config struct @@ -438,7 +487,13 @@ func updateTnf(tnfConfig []byte, data *RequestedData) []byte { return newData } -// outputTestCases outputs the Markdown representation for test cases from the catalog to stdout. +// outputTestCases Creates a Markdown-formatted classification list for test cases +// +// The function collects all identifiers from the catalog, sorts them by ID, +// groups them by suite name, and then builds a string containing each test’s +// description, remediation, best practice reference, and category +// classification in JSON-like format. The resulting string is returned for use +// as a JavaScript variable in the web UI. func outputTestCases() (outString string) { // Building a separate data structure to store the key order for the map keys := make([]claim.Identifier, 0, len(identifiers.Catalog)) @@ -478,6 +533,12 @@ func outputTestCases() (outString string) { outString += "}" return outString } + +// toJSONString Formats a map into an indented JSON string +// +// The function takes a key/value map of strings, marshals it with indentation +// to produce readable JSON, and returns the result as a string. If marshalling +// fails, it simply returns an empty string. func toJSONString(data map[string]string) string { // Convert the map to a JSON-like string jsonbytes, err := json.MarshalIndent(data, "", " ") @@ -487,6 +548,13 @@ func toJSONString(data map[string]string) string { return string(jsonbytes) } + +// GetSuitesFromIdentifiers Retrieves unique suite names from a list of identifiers +// +// The function iterates over each identifier, collects its suite field into a +// slice, then removes duplicates using a helper that returns only distinct +// values. It returns a string slice containing the unique suite names present +// in the input. func GetSuitesFromIdentifiers(keys []claim.Identifier) []string { var suites []string for _, i := range keys { @@ -495,11 +563,23 @@ func GetSuitesFromIdentifiers(keys []claim.Identifier) []string { return arrayhelper.Unique(suites) } +// Entry Represents a test case entry in the printable catalog +// +// Each instance holds the name of a test and its identifying information, +// including URL and version details. The struct is used to build a mapping from +// suite names to collections of tests when generating a printable catalog. type Entry struct { testName string identifier claim.Identifier // {url and version} } +// CreatePrintableCatalogFromIdentifiers Organizes identifiers into a map keyed by suite names +// +// The function receives a slice of identifier objects and constructs a mapping +// from each identifier's suite to a list of entries containing the test name +// and the full identifier. It initializes an empty map, iterates over the input +// slice, appends a new entry for each identifier, and returns the populated +// map. If no identifiers are provided, it simply returns an empty map. func CreatePrintableCatalogFromIdentifiers(keys []claim.Identifier) map[string][]Entry { catalog := make(map[string][]Entry) // we need the list of suite's names